hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
9ca52c15895ba78c9ef0663b5eea63e5eae41cd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaMathEngineDnnConvs.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <Kernels/CudaDnnRleConvKernels.h>
namespace NeoML {
void CCudaMathEngine::blobConvertFromRle( const CCudaRleConvolutionDesc& desc, const CFloatHandle& sourceData,
const CFloatHandle& resultData )
{
const CCudaConvolutionDescInternal& convDesc = static_cast<const CCudaConvolutionDesc*>( desc.ConvDesc )->Internal;
const CCudaBlobDesc& source = convDesc.Source;
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
ASSERT_EXPR( source.Depth() == 1 );
ASSERT_EXPR( source.Channels() == 1 );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, source.ObjectCount(), source.Height());
hipLaunchKernelGGL(( BlobConvertFromRleKernel), dim3(blockCount), dim3(threadCount), 0, cudaStream, convDesc, desc.StrokeValue, desc.NonStrokeValue,
GetRaw( sourceData ), source.ObjectSize() * sizeof(float), GetRaw( resultData ) );
}
CRleConvolutionDesc* CCudaMathEngine::InitBlobRleConvolution( const CBlobDesc& source, float strokeValue,
float nonStrokeValue, int strideHeight, int strideWidth, const CBlobDesc& filter,
const CBlobDesc& result )
{
ASSERT_EXPR( strideHeight > 0 );
ASSERT_EXPR( strideWidth > 0 );
ASSERT_EXPR( source.Channels() == filter.Channels() );
ASSERT_EXPR( source.Depth() == filter.Depth() );
ASSERT_EXPR( filter.Height() <= source.Height() );
ASSERT_EXPR( filter.Width() <= source.Width() );
ASSERT_EXPR( filter.BatchLength() == 1 );
ASSERT_EXPR( result.BatchLength() == source.BatchLength() );
ASSERT_EXPR( result.BatchWidth() == source.BatchWidth() );
ASSERT_EXPR( result.Height() == 1 + ( source.Height() - filter.Height() ) / strideHeight );
ASSERT_EXPR( result.Width() == 1 + ( source.Width() - filter.Width() ) / strideWidth );
ASSERT_EXPR( result.Channels() == filter.BatchWidth() );
ASSERT_EXPR( result.Depth() == 1 );
ASSERT_EXPR( filter.Width() <= MaxRleConvFilterWidth );
ASSERT_EXPR( source.Width() <= MaxRleConvImageWidth );
ASSERT_EXPR( source.Channels() == 1 );
ASSERT_EXPR( ( filter.ObjectCount() % 4 ) == 0 );
CCudaRleConvolutionDesc* desc = new CCudaRleConvolutionDesc();
desc->StrokeValue = strokeValue;
desc->NonStrokeValue = nonStrokeValue;
desc->ConvDesc = static_cast<CCudaConvolutionDesc*>( InitBlobConvolution( source, 0, 0, strideHeight, strideWidth, 1, 1, filter, result ) );
return desc;
}
void CCudaMathEngine::BlobRleConvolution( const CRleConvolutionDesc& desc, const CFloatHandle& sourceData,
const CFloatHandle& filterData, const CFloatHandle* freeTermData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( filterData.GetMathEngine() == this );
ASSERT_EXPR( freeTermData == 0 || freeTermData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
const CCudaConvolutionDescInternal& convDesc = static_cast<const CCudaRleConvolutionDesc&>( desc ).ConvDesc->Internal;
CFloatHandleVar inputConverted( mathEngine(), convDesc.Source.BlobSize() );
blobConvertFromRle( static_cast<const CCudaRleConvolutionDesc&>(desc), sourceData, inputConverted );
BlobConvolution( *static_cast<const CCudaRleConvolutionDesc&>(desc).ConvDesc, inputConverted, filterData, freeTermData, resultData );
}
void CCudaMathEngine::BlobRleConvolutionLearnAdd( const CRleConvolutionDesc& desc,
const CFloatHandle& sourceData, const CFloatHandle& outputDiffData, const CFloatHandle& filterDiffData,
const CFloatHandle* freeTermDiffData )
{
const CCudaConvolutionDescInternal& convDesc = static_cast<const CCudaRleConvolutionDesc&>( desc ).ConvDesc->Internal;
CFloatHandleVar inputConverted( mathEngine(), convDesc.Source.BlobSize() );
blobConvertFromRle( static_cast<const CCudaRleConvolutionDesc&>(desc), sourceData, inputConverted );
BlobConvolutionLearnAdd( *static_cast<const CCudaRleConvolutionDesc&>(desc).ConvDesc, inputConverted, outputDiffData, filterDiffData,
freeTermDiffData, false );
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
| 9ca52c15895ba78c9ef0663b5eea63e5eae41cd0.cu | /* Copyright © 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
#include <NeoMathEngine/NeoMathEngineDefs.h>
#ifdef NEOML_USE_CUDA
#include <CudaMathEngine.h>
#include <CudaMathEngineDnnConvs.h>
#include <MemoryHandleInternal.h>
#include <MathEngineCommon.h>
#include <Kernels/CudaDnnRleConvKernels.h>
namespace NeoML {
void CCudaMathEngine::blobConvertFromRle( const CCudaRleConvolutionDesc& desc, const CFloatHandle& sourceData,
const CFloatHandle& resultData )
{
const CCudaConvolutionDescInternal& convDesc = static_cast<const CCudaConvolutionDesc*>( desc.ConvDesc )->Internal;
const CCudaBlobDesc& source = convDesc.Source;
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
ASSERT_EXPR( source.Depth() == 1 );
ASSERT_EXPR( source.Channels() == 1 );
dim3 blockCount;
dim3 threadCount;
getCudaTaskGrid2D(blockCount, threadCount, source.ObjectCount(), source.Height());
BlobConvertFromRleKernel<<<blockCount, threadCount, 0, cudaStream>>>( convDesc, desc.StrokeValue, desc.NonStrokeValue,
GetRaw( sourceData ), source.ObjectSize() * sizeof(float), GetRaw( resultData ) );
}
CRleConvolutionDesc* CCudaMathEngine::InitBlobRleConvolution( const CBlobDesc& source, float strokeValue,
float nonStrokeValue, int strideHeight, int strideWidth, const CBlobDesc& filter,
const CBlobDesc& result )
{
ASSERT_EXPR( strideHeight > 0 );
ASSERT_EXPR( strideWidth > 0 );
ASSERT_EXPR( source.Channels() == filter.Channels() );
ASSERT_EXPR( source.Depth() == filter.Depth() );
ASSERT_EXPR( filter.Height() <= source.Height() );
ASSERT_EXPR( filter.Width() <= source.Width() );
ASSERT_EXPR( filter.BatchLength() == 1 );
ASSERT_EXPR( result.BatchLength() == source.BatchLength() );
ASSERT_EXPR( result.BatchWidth() == source.BatchWidth() );
ASSERT_EXPR( result.Height() == 1 + ( source.Height() - filter.Height() ) / strideHeight );
ASSERT_EXPR( result.Width() == 1 + ( source.Width() - filter.Width() ) / strideWidth );
ASSERT_EXPR( result.Channels() == filter.BatchWidth() );
ASSERT_EXPR( result.Depth() == 1 );
ASSERT_EXPR( filter.Width() <= MaxRleConvFilterWidth );
ASSERT_EXPR( source.Width() <= MaxRleConvImageWidth );
ASSERT_EXPR( source.Channels() == 1 );
ASSERT_EXPR( ( filter.ObjectCount() % 4 ) == 0 );
CCudaRleConvolutionDesc* desc = new CCudaRleConvolutionDesc();
desc->StrokeValue = strokeValue;
desc->NonStrokeValue = nonStrokeValue;
desc->ConvDesc = static_cast<CCudaConvolutionDesc*>( InitBlobConvolution( source, 0, 0, strideHeight, strideWidth, 1, 1, filter, result ) );
return desc;
}
void CCudaMathEngine::BlobRleConvolution( const CRleConvolutionDesc& desc, const CFloatHandle& sourceData,
const CFloatHandle& filterData, const CFloatHandle* freeTermData, const CFloatHandle& resultData )
{
ASSERT_EXPR( sourceData.GetMathEngine() == this );
ASSERT_EXPR( filterData.GetMathEngine() == this );
ASSERT_EXPR( freeTermData == 0 || freeTermData->GetMathEngine() == this );
ASSERT_EXPR( resultData.GetMathEngine() == this );
const CCudaConvolutionDescInternal& convDesc = static_cast<const CCudaRleConvolutionDesc&>( desc ).ConvDesc->Internal;
CFloatHandleVar inputConverted( mathEngine(), convDesc.Source.BlobSize() );
blobConvertFromRle( static_cast<const CCudaRleConvolutionDesc&>(desc), sourceData, inputConverted );
BlobConvolution( *static_cast<const CCudaRleConvolutionDesc&>(desc).ConvDesc, inputConverted, filterData, freeTermData, resultData );
}
void CCudaMathEngine::BlobRleConvolutionLearnAdd( const CRleConvolutionDesc& desc,
const CFloatHandle& sourceData, const CFloatHandle& outputDiffData, const CFloatHandle& filterDiffData,
const CFloatHandle* freeTermDiffData )
{
const CCudaConvolutionDescInternal& convDesc = static_cast<const CCudaRleConvolutionDesc&>( desc ).ConvDesc->Internal;
CFloatHandleVar inputConverted( mathEngine(), convDesc.Source.BlobSize() );
blobConvertFromRle( static_cast<const CCudaRleConvolutionDesc&>(desc), sourceData, inputConverted );
BlobConvolutionLearnAdd( *static_cast<const CCudaRleConvolutionDesc&>(desc).ConvDesc, inputConverted, outputDiffData, filterDiffData,
freeTermDiffData, false );
}
} // namespace NeoML
#endif // NEOML_USE_CUDA
|
2a3a5732108d68a5b041d685ace6035cf42a2f0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-8);
int j = j0 + (int)(threadIdx.y);
//Declarations
double reg_cons_1_m4=0, reg_cons_1_m3=0, reg_cons_1_m2=0, reg_cons_1_m1=0, __shared__ sh_cons_1_c0[16][16], reg_cons_1_p1=0, reg_cons_1_p2=0, reg_cons_1_p3=0, reg_cons_1_p4=0;
double reg_cons_2_m4=0, reg_cons_2_m3=0, reg_cons_2_m2=0, reg_cons_2_m1=0, __shared__ sh_cons_2_c0[16][16], reg_cons_2_p1=0, reg_cons_2_p2=0, reg_cons_2_p3=0, reg_cons_2_p4=0;
double reg_cons_3_m4=0, reg_cons_3_m3=0, reg_cons_3_m2=0, reg_cons_3_m1=0, __shared__ sh_cons_3_c0[16][16], reg_cons_3_p1=0, reg_cons_3_p2=0, reg_cons_3_p3=0, reg_cons_3_p4=0;
double reg_cons_4_m4=0, reg_cons_4_m3=0, reg_cons_4_m2=0, reg_cons_4_m1=0, __shared__ sh_cons_4_c0[16][16], reg_cons_4_p1=0, reg_cons_4_p2=0, reg_cons_4_p3=0, reg_cons_4_p4=0;
double __shared__ sh_q_1_c0[16][16];
double __shared__ sh_q_2_c0[16][16];
double reg_q_3_m4=0, reg_q_3_m3=0, reg_q_3_m2=0, reg_q_3_m1=0, reg_q_3_c0=0, reg_q_3_p1=0, reg_q_3_p2=0, reg_q_3_p3=0, reg_q_3_p4=0;
double reg_q_4_m4=0, reg_q_4_m3=0, reg_q_4_m2=0, reg_q_4_m1=0, __shared__ sh_q_4_c0[16][16], reg_q_4_p1=0, reg_q_4_p2=0, reg_q_4_p3=0, reg_q_4_p4=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = cons_1[0 + j*N + i];
reg_cons_1_m3 = cons_1[1*M*N + j*N + i];
reg_cons_1_m2 = cons_1[2*M*N + j*N + i];
reg_cons_1_m1 = cons_1[3*M*N + j*N + i];
sh_cons_1_c0[j-j0][i-i0] = cons_1[4*M*N + j*N + i];
reg_cons_1_p1 = cons_1[5*M*N + j*N + i];
reg_cons_1_p2 = cons_1[6*M*N + j*N + i];
reg_cons_1_p3 = cons_1[7*M*N + j*N + i];
reg_cons_2_m4 = cons_2[0 + j*N + i];
reg_cons_2_m3 = cons_2[1*M*N + j*N + i];
reg_cons_2_m2 = cons_2[2*M*N + j*N + i];
reg_cons_2_m1 = cons_2[3*M*N + j*N + i];
sh_cons_2_c0[j-j0][i-i0] = cons_2[4*M*N + j*N + i];
reg_cons_2_p1 = cons_2[5*M*N + j*N + i];
reg_cons_2_p2 = cons_2[6*M*N + j*N + i];
reg_cons_2_p3 = cons_2[7*M*N + j*N + i];
reg_cons_3_m4 = cons_3[0 + j*N + i];
reg_cons_3_m3 = cons_3[1*M*N + j*N + i];
reg_cons_3_m2 = cons_3[2*M*N + j*N + i];
reg_cons_3_m1 = cons_3[3*M*N + j*N + i];
sh_cons_3_c0[j-j0][i-i0] = cons_3[4*M*N + j*N + i];
reg_cons_3_p1 = cons_3[5*M*N + j*N + i];
reg_cons_3_p2 = cons_3[6*M*N + j*N + i];
reg_cons_3_p3 = cons_3[7*M*N + j*N + i];
reg_cons_4_m4 = cons_4[0 + j*N + i];
reg_cons_4_m3 = cons_4[1*M*N + j*N + i];
reg_cons_4_m2 = cons_4[2*M*N + j*N + i];
reg_cons_4_m1 = cons_4[3*M*N + j*N + i];
sh_cons_4_c0[j-j0][i-i0] = cons_4[4*M*N + j*N + i];
reg_cons_4_p1 = cons_4[5*M*N + j*N + i];
reg_cons_4_p2 = cons_4[6*M*N + j*N + i];
reg_cons_4_p3 = cons_4[7*M*N + j*N + i];
reg_q_3_m4 = q_3[0 + j*N + i];
reg_q_3_m3 = q_3[1*M*N + j*N + i];
reg_q_3_m2 = q_3[2*M*N + j*N + i];
reg_q_3_m1 = q_3[3*M*N + j*N + i];
reg_q_3_c0 = q_3[4*M*N + j*N + i];
reg_q_3_p1 = q_3[5*M*N + j*N + i];
reg_q_3_p2 = q_3[6*M*N + j*N + i];
reg_q_3_p3 = q_3[7*M*N + j*N + i];
reg_q_4_m4 = q_4[0 + j*N + i];
reg_q_4_m3 = q_4[1*M*N + j*N + i];
reg_q_4_m2 = q_4[2*M*N + j*N + i];
reg_q_4_m1 = q_4[3*M*N + j*N + i];
sh_q_4_c0[j-j0][i-i0] = q_4[4*M*N + j*N + i];
reg_q_4_p1 = q_4[5*M*N + j*N + i];
reg_q_4_p2 = q_4[6*M*N + j*N + i];
reg_q_4_p3 = q_4[7*M*N + j*N + i];
}
//Rest of the computation
for (int k=4; k<=L-5; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_p4 = cons_1[(k+4)*M*N + j*N + i];
reg_cons_2_p4 = cons_2[(k+4)*M*N + j*N + i];
reg_cons_3_p4 = cons_3[(k+4)*M*N + j*N + i];
reg_cons_4_p4 = cons_4[(k+4)*M*N + j*N + i];
sh_q_1_c0[j-j0][i-i0] = q_1[k*M*N + j*N + i];
sh_q_2_c0[j-j0][i-i0] = q_2[k*M*N + j*N + i];
reg_q_3_p4 = q_3[(k+4)*M*N + j*N + i];
reg_q_4_p4 = q_4[(k+4)*M*N + j*N + i];
}
__syncthreads ();
if (j >= j0+4 & j <= min (j0+blockdim_j-5, M-1) & i >= i0+4 & i <= min (i0+blockdim_i-5, N-5)) {
double f0 = -(((((0.8f * (sh_cons_1_c0[j-j0][i-i0+1] - sh_cons_1_c0[j-j0][i-i0-1])) - (0.2f * (sh_cons_1_c0[j-j0][i-i0+2] - sh_cons_1_c0[j-j0][i-i0-2]))) + (0.038f * (sh_cons_1_c0[j-j0][i-i0+3] - sh_cons_1_c0[j-j0][i-i0-3]))) - (0.0035f * (sh_cons_1_c0[j-j0][i-i0+4] - sh_cons_1_c0[j-j0][i-i0-4]))) * dxinv0);
f0 -= (((((0.8f * (sh_cons_2_c0[j-j0+1][i-i0] - sh_cons_2_c0[j-j0-1][i-i0])) - (0.2f * (sh_cons_2_c0[j-j0+2][i-i0] - sh_cons_2_c0[j-j0-2][i-i0]))) + (0.038f * (sh_cons_2_c0[j-j0+3][i-i0] - sh_cons_2_c0[j-j0-3][i-i0]))) - (0.0035f * (sh_cons_2_c0[j-j0+4][i-i0] - sh_cons_2_c0[j-j0-4][i-i0]))) * dxinv1);
f0 -= (((((0.8f * (reg_cons_3_p1 - reg_cons_3_m1)) - (0.2f * (reg_cons_3_p2 - reg_cons_3_m2))) + (0.038f * (reg_cons_3_p3 - reg_cons_3_m3))) - (0.0035f * (reg_cons_3_p4 - reg_cons_3_m4))) * dxinv2);
flux_0[k*M*N + j*N + i] = f0;
double f1 = -(((((0.8f * (((sh_cons_1_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_1_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + (sh_q_4_c0[j-j0][i-i0+1] - sh_q_4_c0[j-j0][i-i0-1]))) - (0.2f * (((sh_cons_1_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_1_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + (sh_q_4_c0[j-j0][i-i0+2] - sh_q_4_c0[j-j0][i-i0-2])))) + (0.038f * (((sh_cons_1_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_1_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + (sh_q_4_c0[j-j0][i-i0+3] - sh_q_4_c0[j-j0][i-i0-3])))) - (0.0035f * (((sh_cons_1_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_1_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + (sh_q_4_c0[j-j0][i-i0+4] - sh_q_4_c0[j-j0][i-i0-4])))) * dxinv0);
f1 -= (((((0.8f * ((sh_cons_1_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_1_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_1_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_1_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_1_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_1_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_1_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_1_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f1 -= (((((0.8f * ((reg_cons_1_p1 * reg_q_3_p1) - (reg_cons_1_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_1_p2 * reg_q_3_p2) - (reg_cons_1_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_1_p3 * reg_q_3_p3) - (reg_cons_1_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_1_p4 * reg_q_3_p4) - (reg_cons_1_m4 * reg_q_3_m4)))) * dxinv2);
flux_1[k*M*N + j*N + i] = f1;
double f2 = -(((((0.8f * ((sh_cons_2_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_2_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_2_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_2_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_2_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_2_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_2_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_2_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f2 -= (((((0.8f * (((sh_cons_2_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_2_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + (sh_q_4_c0[j-j0+1][i-i0] - sh_q_4_c0[j-j0-1][i-i0]))) - (0.2f * (((sh_cons_2_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_2_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + (sh_q_4_c0[j-j0+2][i-i0] - sh_q_4_c0[j-j0-2][i-i0])))) + (0.038f * (((sh_cons_2_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_2_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + (sh_q_4_c0[j-j0+3][i-i0] - sh_q_4_c0[j-j0-3][i-i0])))) - (0.0035f * (((sh_cons_2_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_2_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + (sh_q_4_c0[j-j0+4][i-i0] - sh_q_4_c0[j-j0-4][i-i0])))) * dxinv1);
f2 -= (((((0.8f * ((reg_cons_2_p1 * reg_q_3_p1) - (reg_cons_2_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_2_p2 * reg_q_3_p2) - (reg_cons_2_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_2_p3 * reg_q_3_p3) - (reg_cons_2_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_2_p4 * reg_q_3_p4) - (reg_cons_2_m4 * reg_q_3_m4)))) * dxinv2);
flux_2[k*M*N + j*N + i] = f2;
double f3 = -(((((0.8f * ((sh_cons_3_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_3_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_3_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_3_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_3_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_3_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_3_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_3_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_3_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_3_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_3_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_3_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_3_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_3_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f3 -= (((((0.8f * (((reg_cons_3_p1 * reg_q_3_p1) - (reg_cons_3_m1 * reg_q_3_m1)) + (reg_q_4_p1 - reg_q_4_m1))) - (0.2f * (((reg_cons_3_p2 * reg_q_3_p2) - (reg_cons_3_m2 * reg_q_3_m2)) + (reg_q_4_p2 - reg_q_4_m2)))) + (0.038f * (((reg_cons_3_p3 * reg_q_3_p3) - (reg_cons_3_m3 * reg_q_3_m3)) + (reg_q_4_p3 - reg_q_4_m3)))) - (0.0035f * (((reg_cons_3_p4 * reg_q_3_p4) - (reg_cons_3_m4 * reg_q_3_m4)) + (reg_q_4_p4 - reg_q_4_m4)))) * dxinv2);
flux_3[k*M*N + j*N + i] = f3;
double f4 = -(((((0.8f * (((sh_cons_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + ((sh_q_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_q_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])))) - (0.2f * (((sh_cons_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + ((sh_q_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_q_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2]))))) + (0.038f * (((sh_cons_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + ((sh_q_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_q_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3]))))) - (0.0035f * (((sh_cons_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + ((sh_q_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_q_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4]))))) * dxinv0);
f4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + ((sh_q_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_q_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])))) - (0.2f * (((sh_cons_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + ((sh_q_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_q_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0]))))) + (0.038f * (((sh_cons_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + ((sh_q_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_q_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + ((sh_q_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_q_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0]))))) * dxinv1);
f4 -= (((((0.8f * (((reg_cons_4_p1 * reg_q_3_p1) - (reg_cons_4_m1 * reg_q_3_m1)) + ((reg_q_4_p1 * reg_q_3_p1) - (reg_q_4_m1 * reg_q_3_m1)))) - (0.2f * (((reg_cons_4_p2 * reg_q_3_p2) - (reg_cons_4_m2 * reg_q_3_m2)) + ((reg_q_4_p2 * reg_q_3_p2) - (reg_q_4_m2 * reg_q_3_m2))))) + (0.038f * (((reg_cons_4_p3 * reg_q_3_p3) - (reg_cons_4_m3 * reg_q_3_m3)) + ((reg_q_4_p3 * reg_q_3_p3) - (reg_q_4_m3 * reg_q_3_m3))))) - (0.0035f * (((reg_cons_4_p4 * reg_q_3_p4) - (reg_cons_4_m4 * reg_q_3_m4)) + ((reg_q_4_p4 * reg_q_3_p4) - (reg_q_4_m4 * reg_q_3_m4))))) * dxinv2);
flux_4[k*M*N + j*N + i] = f4;
}
__syncthreads ();
//Value rotation
reg_cons_1_m4 = reg_cons_1_m3;
reg_cons_1_m3 = reg_cons_1_m2;
reg_cons_1_m2 = reg_cons_1_m1;
reg_cons_1_m1 = sh_cons_1_c0[j-j0][i-i0];
sh_cons_1_c0[j-j0][i-i0] = reg_cons_1_p1;
reg_cons_1_p1 = reg_cons_1_p2;
reg_cons_1_p2 = reg_cons_1_p3;
reg_cons_1_p3 = reg_cons_1_p4;
reg_cons_2_m4 = reg_cons_2_m3;
reg_cons_2_m3 = reg_cons_2_m2;
reg_cons_2_m2 = reg_cons_2_m1;
reg_cons_2_m1 = sh_cons_2_c0[j-j0][i-i0];
sh_cons_2_c0[j-j0][i-i0] = reg_cons_2_p1;
reg_cons_2_p1 = reg_cons_2_p2;
reg_cons_2_p2 = reg_cons_2_p3;
reg_cons_2_p3 = reg_cons_2_p4;
reg_cons_3_m4 = reg_cons_3_m3;
reg_cons_3_m3 = reg_cons_3_m2;
reg_cons_3_m2 = reg_cons_3_m1;
reg_cons_3_m1 = sh_cons_3_c0[j-j0][i-i0];
sh_cons_3_c0[j-j0][i-i0] = reg_cons_3_p1;
reg_cons_3_p1 = reg_cons_3_p2;
reg_cons_3_p2 = reg_cons_3_p3;
reg_cons_3_p3 = reg_cons_3_p4;
reg_cons_4_m4 = reg_cons_4_m3;
reg_cons_4_m3 = reg_cons_4_m2;
reg_cons_4_m2 = reg_cons_4_m1;
reg_cons_4_m1 = sh_cons_4_c0[j-j0][i-i0];
sh_cons_4_c0[j-j0][i-i0] = reg_cons_4_p1;
reg_cons_4_p1 = reg_cons_4_p2;
reg_cons_4_p2 = reg_cons_4_p3;
reg_cons_4_p3 = reg_cons_4_p4;
reg_q_3_m4 = reg_q_3_m3;
reg_q_3_m3 = reg_q_3_m2;
reg_q_3_m2 = reg_q_3_m1;
reg_q_3_m1 = reg_q_3_c0;
reg_q_3_c0 = reg_q_3_p1;
reg_q_3_p1 = reg_q_3_p2;
reg_q_3_p2 = reg_q_3_p3;
reg_q_3_p3 = reg_q_3_p4;
reg_q_4_m4 = reg_q_4_m3;
reg_q_4_m3 = reg_q_4_m2;
reg_q_4_m2 = reg_q_4_m1;
reg_q_4_m1 = sh_q_4_c0[j-j0][i-i0];
sh_q_4_c0[j-j0][i-i0] = reg_q_4_p1;
reg_q_4_p1 = reg_q_4_p2;
reg_q_4_p2 = reg_q_4_p3;
reg_q_4_p3 = reg_q_4_p4;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig_1 (16, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M, blockconfig_1.y-8), 1);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<500; x++) {
hipLaunchKernelGGL(( hypterm) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
//Free allocated memory
hipFree (flux_0);
hipFree (flux_1);
hipFree (flux_2);
hipFree (flux_3);
hipFree (flux_4);
hipFree (cons_1);
hipFree (cons_2);
hipFree (cons_3);
hipFree (cons_4);
hipFree (q_1);
hipFree (q_2);
hipFree (q_3);
hipFree (q_4);
}
| 2a3a5732108d68a5b041d685ace6035cf42a2f0d.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
#include <nvml.h>
#include <assert.h>
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-8);
int j = j0 + (int)(threadIdx.y);
//Declarations
double reg_cons_1_m4=0, reg_cons_1_m3=0, reg_cons_1_m2=0, reg_cons_1_m1=0, __shared__ sh_cons_1_c0[16][16], reg_cons_1_p1=0, reg_cons_1_p2=0, reg_cons_1_p3=0, reg_cons_1_p4=0;
double reg_cons_2_m4=0, reg_cons_2_m3=0, reg_cons_2_m2=0, reg_cons_2_m1=0, __shared__ sh_cons_2_c0[16][16], reg_cons_2_p1=0, reg_cons_2_p2=0, reg_cons_2_p3=0, reg_cons_2_p4=0;
double reg_cons_3_m4=0, reg_cons_3_m3=0, reg_cons_3_m2=0, reg_cons_3_m1=0, __shared__ sh_cons_3_c0[16][16], reg_cons_3_p1=0, reg_cons_3_p2=0, reg_cons_3_p3=0, reg_cons_3_p4=0;
double reg_cons_4_m4=0, reg_cons_4_m3=0, reg_cons_4_m2=0, reg_cons_4_m1=0, __shared__ sh_cons_4_c0[16][16], reg_cons_4_p1=0, reg_cons_4_p2=0, reg_cons_4_p3=0, reg_cons_4_p4=0;
double __shared__ sh_q_1_c0[16][16];
double __shared__ sh_q_2_c0[16][16];
double reg_q_3_m4=0, reg_q_3_m3=0, reg_q_3_m2=0, reg_q_3_m1=0, reg_q_3_c0=0, reg_q_3_p1=0, reg_q_3_p2=0, reg_q_3_p3=0, reg_q_3_p4=0;
double reg_q_4_m4=0, reg_q_4_m3=0, reg_q_4_m2=0, reg_q_4_m1=0, __shared__ sh_q_4_c0[16][16], reg_q_4_p1=0, reg_q_4_p2=0, reg_q_4_p3=0, reg_q_4_p4=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = cons_1[0 + j*N + i];
reg_cons_1_m3 = cons_1[1*M*N + j*N + i];
reg_cons_1_m2 = cons_1[2*M*N + j*N + i];
reg_cons_1_m1 = cons_1[3*M*N + j*N + i];
sh_cons_1_c0[j-j0][i-i0] = cons_1[4*M*N + j*N + i];
reg_cons_1_p1 = cons_1[5*M*N + j*N + i];
reg_cons_1_p2 = cons_1[6*M*N + j*N + i];
reg_cons_1_p3 = cons_1[7*M*N + j*N + i];
reg_cons_2_m4 = cons_2[0 + j*N + i];
reg_cons_2_m3 = cons_2[1*M*N + j*N + i];
reg_cons_2_m2 = cons_2[2*M*N + j*N + i];
reg_cons_2_m1 = cons_2[3*M*N + j*N + i];
sh_cons_2_c0[j-j0][i-i0] = cons_2[4*M*N + j*N + i];
reg_cons_2_p1 = cons_2[5*M*N + j*N + i];
reg_cons_2_p2 = cons_2[6*M*N + j*N + i];
reg_cons_2_p3 = cons_2[7*M*N + j*N + i];
reg_cons_3_m4 = cons_3[0 + j*N + i];
reg_cons_3_m3 = cons_3[1*M*N + j*N + i];
reg_cons_3_m2 = cons_3[2*M*N + j*N + i];
reg_cons_3_m1 = cons_3[3*M*N + j*N + i];
sh_cons_3_c0[j-j0][i-i0] = cons_3[4*M*N + j*N + i];
reg_cons_3_p1 = cons_3[5*M*N + j*N + i];
reg_cons_3_p2 = cons_3[6*M*N + j*N + i];
reg_cons_3_p3 = cons_3[7*M*N + j*N + i];
reg_cons_4_m4 = cons_4[0 + j*N + i];
reg_cons_4_m3 = cons_4[1*M*N + j*N + i];
reg_cons_4_m2 = cons_4[2*M*N + j*N + i];
reg_cons_4_m1 = cons_4[3*M*N + j*N + i];
sh_cons_4_c0[j-j0][i-i0] = cons_4[4*M*N + j*N + i];
reg_cons_4_p1 = cons_4[5*M*N + j*N + i];
reg_cons_4_p2 = cons_4[6*M*N + j*N + i];
reg_cons_4_p3 = cons_4[7*M*N + j*N + i];
reg_q_3_m4 = q_3[0 + j*N + i];
reg_q_3_m3 = q_3[1*M*N + j*N + i];
reg_q_3_m2 = q_3[2*M*N + j*N + i];
reg_q_3_m1 = q_3[3*M*N + j*N + i];
reg_q_3_c0 = q_3[4*M*N + j*N + i];
reg_q_3_p1 = q_3[5*M*N + j*N + i];
reg_q_3_p2 = q_3[6*M*N + j*N + i];
reg_q_3_p3 = q_3[7*M*N + j*N + i];
reg_q_4_m4 = q_4[0 + j*N + i];
reg_q_4_m3 = q_4[1*M*N + j*N + i];
reg_q_4_m2 = q_4[2*M*N + j*N + i];
reg_q_4_m1 = q_4[3*M*N + j*N + i];
sh_q_4_c0[j-j0][i-i0] = q_4[4*M*N + j*N + i];
reg_q_4_p1 = q_4[5*M*N + j*N + i];
reg_q_4_p2 = q_4[6*M*N + j*N + i];
reg_q_4_p3 = q_4[7*M*N + j*N + i];
}
//Rest of the computation
for (int k=4; k<=L-5; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_p4 = cons_1[(k+4)*M*N + j*N + i];
reg_cons_2_p4 = cons_2[(k+4)*M*N + j*N + i];
reg_cons_3_p4 = cons_3[(k+4)*M*N + j*N + i];
reg_cons_4_p4 = cons_4[(k+4)*M*N + j*N + i];
sh_q_1_c0[j-j0][i-i0] = q_1[k*M*N + j*N + i];
sh_q_2_c0[j-j0][i-i0] = q_2[k*M*N + j*N + i];
reg_q_3_p4 = q_3[(k+4)*M*N + j*N + i];
reg_q_4_p4 = q_4[(k+4)*M*N + j*N + i];
}
__syncthreads ();
if (j >= j0+4 & j <= min (j0+blockdim_j-5, M-1) & i >= i0+4 & i <= min (i0+blockdim_i-5, N-5)) {
double f0 = -(((((0.8f * (sh_cons_1_c0[j-j0][i-i0+1] - sh_cons_1_c0[j-j0][i-i0-1])) - (0.2f * (sh_cons_1_c0[j-j0][i-i0+2] - sh_cons_1_c0[j-j0][i-i0-2]))) + (0.038f * (sh_cons_1_c0[j-j0][i-i0+3] - sh_cons_1_c0[j-j0][i-i0-3]))) - (0.0035f * (sh_cons_1_c0[j-j0][i-i0+4] - sh_cons_1_c0[j-j0][i-i0-4]))) * dxinv0);
f0 -= (((((0.8f * (sh_cons_2_c0[j-j0+1][i-i0] - sh_cons_2_c0[j-j0-1][i-i0])) - (0.2f * (sh_cons_2_c0[j-j0+2][i-i0] - sh_cons_2_c0[j-j0-2][i-i0]))) + (0.038f * (sh_cons_2_c0[j-j0+3][i-i0] - sh_cons_2_c0[j-j0-3][i-i0]))) - (0.0035f * (sh_cons_2_c0[j-j0+4][i-i0] - sh_cons_2_c0[j-j0-4][i-i0]))) * dxinv1);
f0 -= (((((0.8f * (reg_cons_3_p1 - reg_cons_3_m1)) - (0.2f * (reg_cons_3_p2 - reg_cons_3_m2))) + (0.038f * (reg_cons_3_p3 - reg_cons_3_m3))) - (0.0035f * (reg_cons_3_p4 - reg_cons_3_m4))) * dxinv2);
flux_0[k*M*N + j*N + i] = f0;
double f1 = -(((((0.8f * (((sh_cons_1_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_1_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + (sh_q_4_c0[j-j0][i-i0+1] - sh_q_4_c0[j-j0][i-i0-1]))) - (0.2f * (((sh_cons_1_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_1_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + (sh_q_4_c0[j-j0][i-i0+2] - sh_q_4_c0[j-j0][i-i0-2])))) + (0.038f * (((sh_cons_1_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_1_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + (sh_q_4_c0[j-j0][i-i0+3] - sh_q_4_c0[j-j0][i-i0-3])))) - (0.0035f * (((sh_cons_1_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_1_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + (sh_q_4_c0[j-j0][i-i0+4] - sh_q_4_c0[j-j0][i-i0-4])))) * dxinv0);
f1 -= (((((0.8f * ((sh_cons_1_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_1_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_1_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_1_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_1_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_1_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_1_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_1_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f1 -= (((((0.8f * ((reg_cons_1_p1 * reg_q_3_p1) - (reg_cons_1_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_1_p2 * reg_q_3_p2) - (reg_cons_1_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_1_p3 * reg_q_3_p3) - (reg_cons_1_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_1_p4 * reg_q_3_p4) - (reg_cons_1_m4 * reg_q_3_m4)))) * dxinv2);
flux_1[k*M*N + j*N + i] = f1;
double f2 = -(((((0.8f * ((sh_cons_2_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_2_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_2_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_2_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_2_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_2_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_2_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_2_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f2 -= (((((0.8f * (((sh_cons_2_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_2_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + (sh_q_4_c0[j-j0+1][i-i0] - sh_q_4_c0[j-j0-1][i-i0]))) - (0.2f * (((sh_cons_2_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_2_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + (sh_q_4_c0[j-j0+2][i-i0] - sh_q_4_c0[j-j0-2][i-i0])))) + (0.038f * (((sh_cons_2_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_2_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + (sh_q_4_c0[j-j0+3][i-i0] - sh_q_4_c0[j-j0-3][i-i0])))) - (0.0035f * (((sh_cons_2_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_2_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + (sh_q_4_c0[j-j0+4][i-i0] - sh_q_4_c0[j-j0-4][i-i0])))) * dxinv1);
f2 -= (((((0.8f * ((reg_cons_2_p1 * reg_q_3_p1) - (reg_cons_2_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_2_p2 * reg_q_3_p2) - (reg_cons_2_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_2_p3 * reg_q_3_p3) - (reg_cons_2_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_2_p4 * reg_q_3_p4) - (reg_cons_2_m4 * reg_q_3_m4)))) * dxinv2);
flux_2[k*M*N + j*N + i] = f2;
double f3 = -(((((0.8f * ((sh_cons_3_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_3_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_3_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_3_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_3_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_3_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_3_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_3_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_3_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_3_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_3_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_3_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_3_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_3_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f3 -= (((((0.8f * (((reg_cons_3_p1 * reg_q_3_p1) - (reg_cons_3_m1 * reg_q_3_m1)) + (reg_q_4_p1 - reg_q_4_m1))) - (0.2f * (((reg_cons_3_p2 * reg_q_3_p2) - (reg_cons_3_m2 * reg_q_3_m2)) + (reg_q_4_p2 - reg_q_4_m2)))) + (0.038f * (((reg_cons_3_p3 * reg_q_3_p3) - (reg_cons_3_m3 * reg_q_3_m3)) + (reg_q_4_p3 - reg_q_4_m3)))) - (0.0035f * (((reg_cons_3_p4 * reg_q_3_p4) - (reg_cons_3_m4 * reg_q_3_m4)) + (reg_q_4_p4 - reg_q_4_m4)))) * dxinv2);
flux_3[k*M*N + j*N + i] = f3;
double f4 = -(((((0.8f * (((sh_cons_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + ((sh_q_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_q_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])))) - (0.2f * (((sh_cons_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + ((sh_q_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_q_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2]))))) + (0.038f * (((sh_cons_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + ((sh_q_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_q_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3]))))) - (0.0035f * (((sh_cons_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + ((sh_q_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_q_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4]))))) * dxinv0);
f4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + ((sh_q_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_q_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])))) - (0.2f * (((sh_cons_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + ((sh_q_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_q_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0]))))) + (0.038f * (((sh_cons_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + ((sh_q_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_q_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + ((sh_q_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_q_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0]))))) * dxinv1);
f4 -= (((((0.8f * (((reg_cons_4_p1 * reg_q_3_p1) - (reg_cons_4_m1 * reg_q_3_m1)) + ((reg_q_4_p1 * reg_q_3_p1) - (reg_q_4_m1 * reg_q_3_m1)))) - (0.2f * (((reg_cons_4_p2 * reg_q_3_p2) - (reg_cons_4_m2 * reg_q_3_m2)) + ((reg_q_4_p2 * reg_q_3_p2) - (reg_q_4_m2 * reg_q_3_m2))))) + (0.038f * (((reg_cons_4_p3 * reg_q_3_p3) - (reg_cons_4_m3 * reg_q_3_m3)) + ((reg_q_4_p3 * reg_q_3_p3) - (reg_q_4_m3 * reg_q_3_m3))))) - (0.0035f * (((reg_cons_4_p4 * reg_q_3_p4) - (reg_cons_4_m4 * reg_q_3_m4)) + ((reg_q_4_p4 * reg_q_3_p4) - (reg_q_4_m4 * reg_q_3_m4))))) * dxinv2);
flux_4[k*M*N + j*N + i] = f4;
}
__syncthreads ();
//Value rotation
reg_cons_1_m4 = reg_cons_1_m3;
reg_cons_1_m3 = reg_cons_1_m2;
reg_cons_1_m2 = reg_cons_1_m1;
reg_cons_1_m1 = sh_cons_1_c0[j-j0][i-i0];
sh_cons_1_c0[j-j0][i-i0] = reg_cons_1_p1;
reg_cons_1_p1 = reg_cons_1_p2;
reg_cons_1_p2 = reg_cons_1_p3;
reg_cons_1_p3 = reg_cons_1_p4;
reg_cons_2_m4 = reg_cons_2_m3;
reg_cons_2_m3 = reg_cons_2_m2;
reg_cons_2_m2 = reg_cons_2_m1;
reg_cons_2_m1 = sh_cons_2_c0[j-j0][i-i0];
sh_cons_2_c0[j-j0][i-i0] = reg_cons_2_p1;
reg_cons_2_p1 = reg_cons_2_p2;
reg_cons_2_p2 = reg_cons_2_p3;
reg_cons_2_p3 = reg_cons_2_p4;
reg_cons_3_m4 = reg_cons_3_m3;
reg_cons_3_m3 = reg_cons_3_m2;
reg_cons_3_m2 = reg_cons_3_m1;
reg_cons_3_m1 = sh_cons_3_c0[j-j0][i-i0];
sh_cons_3_c0[j-j0][i-i0] = reg_cons_3_p1;
reg_cons_3_p1 = reg_cons_3_p2;
reg_cons_3_p2 = reg_cons_3_p3;
reg_cons_3_p3 = reg_cons_3_p4;
reg_cons_4_m4 = reg_cons_4_m3;
reg_cons_4_m3 = reg_cons_4_m2;
reg_cons_4_m2 = reg_cons_4_m1;
reg_cons_4_m1 = sh_cons_4_c0[j-j0][i-i0];
sh_cons_4_c0[j-j0][i-i0] = reg_cons_4_p1;
reg_cons_4_p1 = reg_cons_4_p2;
reg_cons_4_p2 = reg_cons_4_p3;
reg_cons_4_p3 = reg_cons_4_p4;
reg_q_3_m4 = reg_q_3_m3;
reg_q_3_m3 = reg_q_3_m2;
reg_q_3_m2 = reg_q_3_m1;
reg_q_3_m1 = reg_q_3_c0;
reg_q_3_c0 = reg_q_3_p1;
reg_q_3_p1 = reg_q_3_p2;
reg_q_3_p2 = reg_q_3_p3;
reg_q_3_p3 = reg_q_3_p4;
reg_q_4_m4 = reg_q_4_m3;
reg_q_4_m3 = reg_q_4_m2;
reg_q_4_m2 = reg_q_4_m1;
reg_q_4_m1 = sh_q_4_c0[j-j0][i-i0];
sh_q_4_c0[j-j0][i-i0] = reg_q_4_p1;
reg_q_4_p1 = reg_q_4_p2;
reg_q_4_p2 = reg_q_4_p3;
reg_q_4_p3 = reg_q_4_p4;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig_1 (16, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M, blockconfig_1.y-8), 1);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<500; x++) {
hypterm <<<gridconfig_1, blockconfig_1>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
//Free allocated memory
cudaFree (flux_0);
cudaFree (flux_1);
cudaFree (flux_2);
cudaFree (flux_3);
cudaFree (flux_4);
cudaFree (cons_1);
cudaFree (cons_2);
cudaFree (cons_3);
cudaFree (cons_4);
cudaFree (q_1);
cudaFree (q_2);
cudaFree (q_3);
cudaFree (q_4);
}
|
759319263f25ff9dbb5d4c466c6b27e39de4fdd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/one_hot_ops.h"
namespace caffe2 {
__global__ void OneHotOpKernel(
const TIndex batch_size,
const TIndex index_size,
const TIndex* indices,
float* output) {
CUDA_1D_KERNEL_LOOP(i, batch_size) {
output[i * index_size + indices[i]] = 1.;
}
}
template <>
void OneHotOp<CUDAContext>::DoOneHotOp(
TIndex batch_size,
TIndex index_size,
const Tensor<CUDAContext>& indices,
Tensor<CUDAContext>* output) {
float* output_ptr = output->mutable_data<float>();
math::Set<float, CUDAContext>(output->size(), 0., output_ptr, &context_);
hipLaunchKernelGGL(( OneHotOpKernel),
dim3(CAFFE_GET_BLOCKS(batch_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
batch_size, index_size, indices.data<TIndex>(), output_ptr);
}
REGISTER_CUDA_OPERATOR(OneHot, OneHotOp<CUDAContext>);
} // namespace
| 759319263f25ff9dbb5d4c466c6b27e39de4fdd1.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/one_hot_ops.h"
namespace caffe2 {
__global__ void OneHotOpKernel(
const TIndex batch_size,
const TIndex index_size,
const TIndex* indices,
float* output) {
CUDA_1D_KERNEL_LOOP(i, batch_size) {
output[i * index_size + indices[i]] = 1.;
}
}
template <>
void OneHotOp<CUDAContext>::DoOneHotOp(
TIndex batch_size,
TIndex index_size,
const Tensor<CUDAContext>& indices,
Tensor<CUDAContext>* output) {
float* output_ptr = output->mutable_data<float>();
math::Set<float, CUDAContext>(output->size(), 0., output_ptr, &context_);
OneHotOpKernel<<<
CAFFE_GET_BLOCKS(batch_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
batch_size, index_size, indices.data<TIndex>(), output_ptr);
}
REGISTER_CUDA_OPERATOR(OneHot, OneHotOp<CUDAContext>);
} // namespace
|
b73fdf70796bcefc25b624af7bbe5646d496f2e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 512
typedef struct Data {
double* a;
double* b;
double* c;
} Data;
__global__ void add( Data data, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x*blockDim.x + threadIdx.x;
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
data.c[tid] = data.a[tid] + data.b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 2) {
// Tell the user how to run the program
printf ("Usage: %s vector_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int grid_size = ((vector_size-1)/BLOCK_SIZE) + 1;
// Set device that we will use for our cuda code
// It will be 0, 1, 2 or 3
hipSetDevice(0);
// Time Variables
hipEvent_t start, stop;
float time;
hipEventCreate (&start);
hipEventCreate (&stop);
// CPU Struct
Data data_cpu;
data_cpu.a = new double [vector_size];
data_cpu.b = new double [vector_size];
data_cpu.c = new double [vector_size];
Data data_gpu_on_cpu;
data_gpu_on_cpu.c = new double [vector_size];
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
data_cpu.a[i] = rand()*cos(i);
data_cpu.b[i] = rand()*sin(i);
data_cpu.c[i] = 0.0;
}
// allocate the memory on the GPU
Data data_gpu;
hipMalloc (&data_gpu.a, vector_size*sizeof(double));
hipMalloc (&data_gpu.b, vector_size*sizeof(double));
hipMalloc (&data_gpu.c, vector_size*sizeof(double));
// copy the input to the GPU
hipMemcpy (data_gpu.a, data_cpu.a, vector_size*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy (data_gpu.b, data_cpu.b, vector_size*sizeof(double), hipMemcpyHostToDevice);
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
hipEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
data_cpu.c[i] = data_cpu.a[i] + data_cpu.b[i];
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
hipEventRecord(start,0);
// call the kernel
hipLaunchKernelGGL(( add), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, data_gpu, vector_size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
hipMemcpy (data_gpu_on_cpu.c, data_gpu.c, vector_size*sizeof(double), hipMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (data_cpu.c[i] != data_gpu_on_cpu.c[i]){
error = 1;
printf( "Error starting element %d, %f != %f\n", i, data_gpu_on_cpu.c[i], data_cpu.c[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (data_cpu.a);
free (data_cpu.b);
free (data_cpu.c);
free (data_gpu_on_cpu.c);
// free the memory allocated on the GPU
hipFree (data_gpu.a);
hipFree (data_gpu.b);
hipFree (data_gpu.c);
return 0;
}
| b73fdf70796bcefc25b624af7bbe5646d496f2e0.cu | #include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 512
typedef struct Data {
double* a;
double* b;
double* c;
} Data;
__global__ void add( Data data, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x*blockDim.x + threadIdx.x;
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
data.c[tid] = data.a[tid] + data.b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 2) {
// Tell the user how to run the program
printf ("Usage: %s vector_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int grid_size = ((vector_size-1)/BLOCK_SIZE) + 1;
// Set device that we will use for our cuda code
// It will be 0, 1, 2 or 3
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// CPU Struct
Data data_cpu;
data_cpu.a = new double [vector_size];
data_cpu.b = new double [vector_size];
data_cpu.c = new double [vector_size];
Data data_gpu_on_cpu;
data_gpu_on_cpu.c = new double [vector_size];
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
data_cpu.a[i] = rand()*cos(i);
data_cpu.b[i] = rand()*sin(i);
data_cpu.c[i] = 0.0;
}
// allocate the memory on the GPU
Data data_gpu;
cudaMalloc (&data_gpu.a, vector_size*sizeof(double));
cudaMalloc (&data_gpu.b, vector_size*sizeof(double));
cudaMalloc (&data_gpu.c, vector_size*sizeof(double));
// copy the input to the GPU
cudaMemcpy (data_gpu.a, data_cpu.a, vector_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy (data_gpu.b, data_cpu.b, vector_size*sizeof(double), cudaMemcpyHostToDevice);
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
data_cpu.c[i] = data_cpu.a[i] + data_cpu.b[i];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
cudaEventRecord(start,0);
// call the kernel
add<<<grid_size, BLOCK_SIZE>>>(data_gpu, vector_size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy (data_gpu_on_cpu.c, data_gpu.c, vector_size*sizeof(double), cudaMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (data_cpu.c[i] != data_gpu_on_cpu.c[i]){
error = 1;
printf( "Error starting element %d, %f != %f\n", i, data_gpu_on_cpu.c[i], data_cpu.c[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (data_cpu.a);
free (data_cpu.b);
free (data_cpu.c);
free (data_gpu_on_cpu.c);
// free the memory allocated on the GPU
cudaFree (data_gpu.a);
cudaFree (data_gpu.b);
cudaFree (data_gpu.c);
return 0;
}
|
437fd061c8a039b62e87158880cfb52ec882cdcf.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/array.h>
#include <af/dim4.hpp>
#include <af/defines.h>
#include <Array.hpp>
#include <diagonal.hpp>
#include <math.hpp>
#include <err_cuda.hpp>
#include <kernel/diagonal.hpp>
namespace cuda
{
template<typename T>
Array<T> diagCreate(const Array<T> &in, const int num)
{
int size = in.dims()[0] + std::abs(num);
int batch = in.dims()[1];
Array<T> out = createEmptyArray<T>(dim4(size, size, batch));
kernel::diagCreate<T>(out, in, num);
return out;
}
template<typename T>
Array<T> diagExtract(const Array<T> &in, const int num)
{
const dim_type *idims = in.dims().get();
dim_type size = ::max(idims[0], idims[1]) - std::abs(num);
Array<T> out = createEmptyArray<T>(dim4(size, 1, idims[2], idims[3]));
kernel::diagExtract<T>(out, in, num);
return out;
}
#define INSTANTIATE_DIAGONAL(T) \
template Array<T> diagExtract<T> (const Array<T> &in, const int num); \
template Array<T> diagCreate <T> (const Array<T> &in, const int num);
INSTANTIATE_DIAGONAL(float)
INSTANTIATE_DIAGONAL(double)
INSTANTIATE_DIAGONAL(cfloat)
INSTANTIATE_DIAGONAL(cdouble)
INSTANTIATE_DIAGONAL(int)
INSTANTIATE_DIAGONAL(uint)
INSTANTIATE_DIAGONAL(char)
INSTANTIATE_DIAGONAL(uchar)
}
| 437fd061c8a039b62e87158880cfb52ec882cdcf.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/array.h>
#include <af/dim4.hpp>
#include <af/defines.h>
#include <Array.hpp>
#include <diagonal.hpp>
#include <math.hpp>
#include <err_cuda.hpp>
#include <kernel/diagonal.hpp>
namespace cuda
{
template<typename T>
Array<T> diagCreate(const Array<T> &in, const int num)
{
int size = in.dims()[0] + std::abs(num);
int batch = in.dims()[1];
Array<T> out = createEmptyArray<T>(dim4(size, size, batch));
kernel::diagCreate<T>(out, in, num);
return out;
}
template<typename T>
Array<T> diagExtract(const Array<T> &in, const int num)
{
const dim_type *idims = in.dims().get();
dim_type size = std::max(idims[0], idims[1]) - std::abs(num);
Array<T> out = createEmptyArray<T>(dim4(size, 1, idims[2], idims[3]));
kernel::diagExtract<T>(out, in, num);
return out;
}
#define INSTANTIATE_DIAGONAL(T) \
template Array<T> diagExtract<T> (const Array<T> &in, const int num); \
template Array<T> diagCreate <T> (const Array<T> &in, const int num);
INSTANTIATE_DIAGONAL(float)
INSTANTIATE_DIAGONAL(double)
INSTANTIATE_DIAGONAL(cfloat)
INSTANTIATE_DIAGONAL(cdouble)
INSTANTIATE_DIAGONAL(int)
INSTANTIATE_DIAGONAL(uint)
INSTANTIATE_DIAGONAL(char)
INSTANTIATE_DIAGONAL(uchar)
}
|
7669023d68951f61196b11ced8ccfc29d6f2257f.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
struct kl_functor
{
__host__ __device__ float operator()(const float& x, const float& y) const
{
return y > 0 ? y * (log(y) - x) : 0;
}
};
void THNN_CudaDistKLDivCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 2, input, target);
THArgCheck(THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2,
"input and target need to have the same number of elements");
float sum;
ptrdiff_t size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), kl_functor());
if (sizeAverage)
sum /= size;
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_set1d(state, output, 0, sum);
}
struct kl_updateGradInput_functor
{
const float norm;
kl_updateGradInput_functor(float norm_)
: norm(norm_)
{}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return y > 0 ? norm * (-y) : 0;
}
};
void THNN_CudaDistKLDivCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 3, input, target, gradInput);
THArgCheck(THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2,
"input and target need to have the same number of elements");
ptrdiff_t size = THCudaTensor_nElement(state, input);
float norm = (sizeAverage ? 1./size : 1.);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
THCudaTensor_resizeAs(state, gradInput, input);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data, kl_updateGradInput_functor(norm));
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
}
| 7669023d68951f61196b11ced8ccfc29d6f2257f.cu | #include "THCUNN.h"
#include "common.h"
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
struct kl_functor
{
__host__ __device__ float operator()(const float& x, const float& y) const
{
return y > 0 ? y * (log(y) - x) : 0;
}
};
void THNN_CudaDistKLDivCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 2, input, target);
THArgCheck(THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2,
"input and target need to have the same number of elements");
float sum;
ptrdiff_t size = THCudaTensor_nElement(state, input);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), kl_functor());
if (sizeAverage)
sum /= size;
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
THCudaTensor_set1d(state, output, 0, sum);
}
struct kl_updateGradInput_functor
{
const float norm;
kl_updateGradInput_functor(float norm_)
: norm(norm_)
{}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return y > 0 ? norm * (-y) : 0;
}
};
void THNN_CudaDistKLDivCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage)
{
THCUNN_assertSameGPU(state, 3, input, target, gradInput);
THArgCheck(THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2,
"input and target need to have the same number of elements");
ptrdiff_t size = THCudaTensor_nElement(state, input);
float norm = (sizeAverage ? 1./size : 1.);
input = THCudaTensor_newContiguous(state, input);
target = THCudaTensor_newContiguous(state, target);
THCudaTensor_resizeAs(state, gradInput, input);
thrust::device_ptr<float> input_data(THCudaTensor_data(state, input));
thrust::device_ptr<float> target_data(THCudaTensor_data(state, target));
thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput));
thrust::transform(input_data, input_data+size, target_data, gradInput_data, kl_updateGradInput_functor(norm));
THCudaTensor_free(state, input);
THCudaTensor_free(state, target);
}
|
4b4023d55291e3d3632954bda60fb58834c38f8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_LOG_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/comm_impl.hpp"
#include "lbann/layers/activations/log_softmax.hpp"
#ifdef LBANN_HAS_DNN_LIB
#include "lbann/utils/dnn_lib/softmax.hpp"
#endif // LBANN_HAS_DNN_LIB
namespace lbann {
namespace {
/** @brief Max functor */
template <class T>
struct max_op {
__device__ __forceinline__
DataType operator()(const T& x1, const T& x2) const {
return gpu_lib::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row+col*values_ldim];
thread_max_val = gpu_lib::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val
= gpu_lib::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val);
if (tid == 0) {
max_values[bidx+col*nblocksx] = block_max_val;
}
}
}
/** @brief Kernel for matrix column sums
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums On input, array of zeros. On output, sum(x) for each
* column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_sum_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
thread_sum += values[row+col*values_ldim];
}
// Compute sum for each block
const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute sum(exp(x-shift)) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums On input, array of zeros. On output,
* sum(exp(x-shift)) for each column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_sumexp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
thread_sum += gpu_lib::exp(x-shift);
}
// Compute sum for each block
const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = x - shift - log(sum(x-shift))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
const TensorDataType* __restrict__ sums) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& shift = shifts[col];
const TensorDataType log_sum_exp = gpu_lib::log(sums[col]);
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
auto& y = output[row+col*output_ldim];
y = x - shift - log_sum_exp;
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = dy - softmax(x) * sum(dy)
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums Column sums of the gradient w.r.t. output
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ sums,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& sum = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim];
dx = dy - gpu_lib::exp(y) * sum;
}
}
}
} // namespace
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
dnn_lib::softmax_forward(one,
l.m_tensors_dnn_desc.get_prev_activations(),
local_input,
zero,
l.m_tensors_dnn_desc.get_activations(),
local_output,
softmax_mode::INSTANCE,
softmax_alg::LOG);
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
dnn_lib::softmax_backward(one,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_tensors_dnn_desc.get_prev_error_signals(),
local_gradient_wrt_output,
zero,
l.m_tensors_dnn_desc.get_error_signals(),
local_gradient_wrt_input,
softmax_mode::INSTANCE,
softmax_alg::LOG);
}
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<GPUMatType&>(l.get_local_activations());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_input.Height();
const auto& local_width = local_input.Width();
// GPU objects
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_workspace));
// The comm templates will not convert the multisync, so cast the multisync
// and use sync_info for comms.
El::SyncInfo<El::Device::GPU> const& sync_info = multisync;
// Find max value in each column
gpu_lib::thrust::vector<TensorDataType> max_vals;
if (local_input.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<DataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
// Launch GPU Kernel
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
prev_height, local_width,
prev_vals.data().get(), prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(), max_vals.size(),
El::mpi::MAX, l.m_workspace->RedundantComm(),
sync_info);
// Compute sum(exp(x-max_val)) for each column
El::Zero(*l.m_workspace);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_sumexp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = x - max_val - log(sum(exp(x-max_val)))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
max_vals.data().get(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_workspace));
// Compute sum of entries in gradient w.r.t. output
El::Zero(local_workspace);
if (!local_gradient_wrt_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
reduce_sum_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_gradient_wrt_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.LockedBuffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class log_softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class log_softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| 4b4023d55291e3d3632954bda60fb58834c38f8a.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_LOG_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/comm_impl.hpp"
#include "lbann/layers/activations/log_softmax.hpp"
#ifdef LBANN_HAS_DNN_LIB
#include "lbann/utils/dnn_lib/softmax.hpp"
#endif // LBANN_HAS_DNN_LIB
namespace lbann {
namespace {
/** @brief Max functor */
template <class T>
struct max_op {
__device__ __forceinline__
DataType operator()(const T& x1, const T& x2) const {
return gpu_lib::max(x1, x2);
}
};
/** @brief Kernel for max reduction on matrix columns
*
* Each CUDA block computes the max over a subset of matrix entries
* and outputs the result. This is repeated multiple times for
* column-wise max reduction.
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param values (height x width) matrix
* @param max_values (nblocksx x width) matrix
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_max_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ max_values) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidx = blockIdx.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksx = gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Find largest value for each thread
TensorDataType thread_max_val{-gpu_lib::infinity<TensorDataType>()};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& val = values[row+col*values_ldim];
thread_max_val = gpu_lib::max(thread_max_val, val);
}
// Find largest value for each block
const TensorDataType block_max_val
= gpu_lib::block_reduce<bsize,1,1,DataType,max_op<DataType>>(thread_max_val);
if (tid == 0) {
max_values[bidx+col*nblocksx] = block_max_val;
}
}
}
/** @brief Kernel for matrix column sums
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums On input, array of zeros. On output, sum(x) for each
* column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void reduce_sum_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ values,
size_t values_ldim,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
// Compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
thread_sum += values[row+col*values_ldim];
}
// Compute sum for each block
const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute sum(exp(x-shift)) for each matrix column
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums On input, array of zeros. On output,
* sum(exp(x-shift)) for each column.
*/
template <size_t bsize, typename TensorDataType>
__global__ void fp_sumexp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ sums) {
// Indices
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t bidy = blockIdx.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nblocksy = gridDim.y;
for (size_t col = bidy; col < width; col += nblocksy) {
const auto& shift = shifts[col];
// Exponentiate inputs and compute sum for each thread
TensorDataType thread_sum{0};
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
thread_sum += gpu_lib::exp(x-shift);
}
// Compute sum for each block
const TensorDataType block_sum = gpu_lib::block_reduce<bsize,1,1>(thread_sum);
if (tid == 0) {
gpu_lib::atomic_add(&sums[col], block_sum);
}
}
}
/** @brief Compute layer output
*
* y = x - shift - log(sum(x-shift))
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param shifts max(x) for each column
* @param sums sum(exp(x-shift)) for each column
*/
template <typename TensorDataType>
__global__ void fp_output_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ shifts,
const TensorDataType* __restrict__ sums) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& shift = shifts[col];
const TensorDataType log_sum_exp = gpu_lib::log(sums[col]);
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& x = input[row+col*input_ldim];
auto& y = output[row+col*output_ldim];
y = x - shift - log_sum_exp;
}
}
}
/** @brief Compute gradient w.r.t. input
*
* dx = dy - softmax(x) * sum(dy)
*
* Block dimensions: bsize x 1 x 1
*
* Grid dimension: (height / bsize) x width x 1
*
* @param sums Column sums of the gradient w.r.t. output
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
const TensorDataType* __restrict__ sums,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& sum = sums[col];
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto& y = output[row+col*output_ldim];
const auto& dy = gradient_wrt_output[row+col*gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row+col*gradient_wrt_input_ldim];
dx = dy - gpu_lib::exp(y) * sum;
}
}
}
} // namespace
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_input = dynamic_cast<const El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<El::Matrix<TensorDataType, El::Device::GPU>&>(l.get_local_activations());
dnn_lib::softmax_forward(one,
l.m_tensors_dnn_desc.get_prev_activations(),
local_input,
zero,
l.m_tensors_dnn_desc.get_activations(),
local_output,
softmax_mode::INSTANCE,
softmax_alg::LOG);
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::DATA_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
const TensorDataType zero = 0;
const TensorDataType one = 1;
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
dnn_lib::softmax_backward(one,
l.m_tensors_dnn_desc.get_activations(),
local_output,
l.m_tensors_dnn_desc.get_prev_error_signals(),
local_gradient_wrt_output,
zero,
l.m_tensors_dnn_desc.get_error_signals(),
local_gradient_wrt_input,
softmax_mode::INSTANCE,
softmax_alg::LOG);
}
template <typename TensorDataType>
void fp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_input = dynamic_cast<const GPUMatType&>(l.get_local_prev_activations());
auto& local_output = dynamic_cast<GPUMatType&>(l.get_local_activations());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_input.Height();
const auto& local_width = local_input.Width();
// GPU objects
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_input),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_workspace));
// The comm templates will not convert the multisync, so cast the multisync
// and use sync_info for comms.
El::SyncInfo<El::Device::GPU> const& sync_info = multisync;
// Find max value in each column
gpu_lib::thrust::vector<TensorDataType> max_vals;
if (local_input.IsEmpty()) {
max_vals.resize(local_width,
-std::numeric_limits<DataType>::infinity());
}
else {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
max_vals.resize(grid_dims.x * local_width);
// Launch GPU Kernel
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get());
while (grid_dims.x > 1) {
const size_t prev_height = grid_dims.x;
grid_dims.x = (prev_height + block_size - 1) / block_size;
gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(max_vals));
max_vals.resize(grid_dims.x * local_width);
hydrogen::gpu::LaunchKernel(
reduce_max_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
prev_height, local_width,
prev_vals.data().get(), prev_height,
max_vals.data().get());
}
}
El::mpi::AllReduce(max_vals.data().get(), max_vals.size(),
El::mpi::MAX, l.m_workspace->RedundantComm(),
sync_info);
// Compute sum(exp(x-max_val)) for each column
El::Zero(*l.m_workspace);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_sumexp_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
max_vals.data().get(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute output
// Note: y = x - max_val - log(sum(exp(x-max_val)))
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
max_vals.data().get(),
local_workspace.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_compute_impl(log_softmax_layer<TensorDataType, data_layout::MODEL_PARALLEL, El::Device::GPU>& l) {
using GPUMatType = El::Matrix<TensorDataType, El::Device::GPU>;
// Local matrices
const auto& local_output = dynamic_cast<const GPUMatType&>(l.get_local_activations());
const auto& local_gradient_wrt_output = dynamic_cast<const GPUMatType&>(l.get_local_prev_error_signals());
auto& local_gradient_wrt_input = dynamic_cast<GPUMatType&>(l.get_local_error_signals());
auto& local_workspace = dynamic_cast<GPUMatType&>(l.m_workspace->Matrix());
const auto& local_height = local_output.Height();
const auto& local_width = local_output.Width();
// GPU objects
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_workspace));
// Compute sum of entries in gradient w.r.t. output
El::Zero(local_workspace);
if (!local_gradient_wrt_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
reduce_sum_kernel<block_size, TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.Buffer());
}
l.get_comm()->allreduce(*l.m_workspace, l.m_workspace->RedundantComm());
// Compute gradient w.r.t. input
if (!local_gradient_wrt_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
hydrogen::gpu::LaunchKernel(
bp_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_output.LockedBuffer(),
local_output.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_output.LDim(),
local_workspace.LockedBuffer(),
local_gradient_wrt_input.Buffer(),
local_gradient_wrt_input.LDim());
}
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::fp_compute() {
fp_compute_impl(*this);
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void log_softmax_layer<TensorDataType, Layout, Device>::bp_compute() {
bp_compute_impl(*this);
}
// Template instantiation
#define PROTO(T) \
template class log_softmax_layer<T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class log_softmax_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
bb1f90614c16b9a433260f119ebdd2c8d4f99e66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include "Sender.hpp"
#include "../cudaErr.h"
__global__ void kernel(GPU_DATA_TYPE *);
__global__ void empty_kernel();
__global__ void empty_kernel()
{
}
Sender::Sender ()
{
Sender::shmid = -1;
Sender::ptr = 0;
Sender::mem_handle = (hipIpcMemHandle_t *)malloc(sizeof (hipIpcMemHandle_t));
Sender::d_data = 0;
}
Sender::Sender (int id, TYPE *shm_ptr, GPU_DATA_TYPE *device_data)
{
Sender::shmid = id;
Sender::ptr = shm_ptr;
Sender::mem_handle = (hipIpcMemHandle_t *)malloc(sizeof (hipIpcMemHandle_t));
Sender::d_data = device_data;
}
Sender::~Sender ()
{
}
void Sender::update()
{
// printf ("sender: starting update()\n");
}
void Sender::wait()
{
// printf ("sender: starting wait()\n");
while (*Sender::ptr != WAIT_VALUE)
{
asm volatile("" ::: "memory");
}
}
void Sender::process()
{
// printf ("sender: starting process()\n");
kernel_wrapper();
// empty_kernel<<<1,1>>>(); // for latency measurement purpose
}
void Sender::notify()
{
// printf ("sender: starting notify()\n");
*ptr = !WAIT_VALUE;
}
void Sender::kernel_wrapper()
{
// kernel <<<128, 1024>>> (d_data);
hipLaunchKernelGGL(( empty_kernel), dim3(1),dim3(1024), 0, 0, );
gpuErrchk (hipDeviceSynchronize ());
}
__global__ void kernel(GPU_DATA_TYPE *d_ptr)
{
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
for (;threadId < GPU_SIZE; threadId += blockDim.x * gridDim.x)
{
d_ptr[threadId] -= 1;
}
}
// Getters
int Sender::get_SHM_id()
{
return Sender::shmid;
}
TYPE *Sender::get_SHM_ptr()
{
return Sender::ptr;
}
hipIpcMemHandle_t *Sender::get_GPUIPC_handle()
{
return Sender::mem_handle;
}
GPU_DATA_TYPE *Sender::get_d_data()
{
return Sender::d_data;
}
// Setters
void Sender::set_SHM_id (int id)
{
Sender::shmid = id;
}
void Sender::set_SHM_ptr (TYPE *ptr)
{
Sender::ptr = ptr;
}
void Sender::set_GPUIPC_handle (hipIpcMemHandle_t *handle)
{
memcpy (mem_handle, handle, sizeof (hipIpcMemHandle_t));
}
void Sender::set_d_data (GPU_DATA_TYPE *device_data)
{
Sender::d_data = device_data;
}
| bb1f90614c16b9a433260f119ebdd2c8d4f99e66.cu | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include "Sender.hpp"
#include "../cudaErr.h"
__global__ void kernel(GPU_DATA_TYPE *);
__global__ void empty_kernel();
__global__ void empty_kernel()
{
}
Sender::Sender ()
{
Sender::shmid = -1;
Sender::ptr = 0;
Sender::mem_handle = (cudaIpcMemHandle_t *)malloc(sizeof (cudaIpcMemHandle_t));
Sender::d_data = 0;
}
Sender::Sender (int id, TYPE *shm_ptr, GPU_DATA_TYPE *device_data)
{
Sender::shmid = id;
Sender::ptr = shm_ptr;
Sender::mem_handle = (cudaIpcMemHandle_t *)malloc(sizeof (cudaIpcMemHandle_t));
Sender::d_data = device_data;
}
Sender::~Sender ()
{
}
void Sender::update()
{
// printf ("sender: starting update()\n");
}
void Sender::wait()
{
// printf ("sender: starting wait()\n");
while (*Sender::ptr != WAIT_VALUE)
{
asm volatile("" ::: "memory");
}
}
void Sender::process()
{
// printf ("sender: starting process()\n");
kernel_wrapper();
// empty_kernel<<<1,1>>>(); // for latency measurement purpose
}
void Sender::notify()
{
// printf ("sender: starting notify()\n");
*ptr = !WAIT_VALUE;
}
void Sender::kernel_wrapper()
{
// kernel <<<128, 1024>>> (d_data);
empty_kernel<<<1,1024>>>();
gpuErrchk (cudaDeviceSynchronize ());
}
__global__ void kernel(GPU_DATA_TYPE *d_ptr)
{
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
for (;threadId < GPU_SIZE; threadId += blockDim.x * gridDim.x)
{
d_ptr[threadId] -= 1;
}
}
// Getters
int Sender::get_SHM_id()
{
return Sender::shmid;
}
TYPE *Sender::get_SHM_ptr()
{
return Sender::ptr;
}
cudaIpcMemHandle_t *Sender::get_GPUIPC_handle()
{
return Sender::mem_handle;
}
GPU_DATA_TYPE *Sender::get_d_data()
{
return Sender::d_data;
}
// Setters
void Sender::set_SHM_id (int id)
{
Sender::shmid = id;
}
void Sender::set_SHM_ptr (TYPE *ptr)
{
Sender::ptr = ptr;
}
void Sender::set_GPUIPC_handle (cudaIpcMemHandle_t *handle)
{
memcpy (mem_handle, handle, sizeof (cudaIpcMemHandle_t));
}
void Sender::set_d_data (GPU_DATA_TYPE *device_data)
{
Sender::d_data = device_data;
}
|
a6650f13f25f64e363ada0f5e516b201e7047b5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/diagonal_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, int X_DIM_SIZE, int OUT_DIM_SIZE>
__global__ void Diagonal(const T* data1, T* data2, const int64_t offset_,
int64_t axis1_, int64_t axis2_, int64_t* x_stride,
int64_t* out_stride, int64_t numel, bool is_grad) {
CUDA_KERNEL_LOOP(idx, numel) {
int64_t idx_dim[X_DIM_SIZE] = {0};
int64_t temp = 0;
for (size_t i = 0; i < X_DIM_SIZE - 1; i++) {
idx_dim[i] = (idx - temp) / x_stride[i];
temp = temp + idx_dim[i] * x_stride[i];
}
idx_dim[X_DIM_SIZE - 1] = idx - temp;
int64_t axis1_dim = idx_dim[axis1_];
int64_t axis2_dim = idx_dim[axis2_];
int64_t out_dim[OUT_DIM_SIZE] = {0};
int temp_pos = 0;
for (int i = 0; i < X_DIM_SIZE; i++) {
if (i != axis1_ && i != axis2_) {
out_dim[temp_pos] = idx_dim[i];
temp_pos++;
}
}
bool flag = false;
if (offset_ == 0 && axis1_dim == axis2_dim) {
out_dim[temp_pos] = axis1_dim;
flag = true;
} else if (offset_ > 0 && (axis1_dim + offset_) == axis2_dim) {
out_dim[temp_pos] = axis1_dim;
flag = true;
} else if (offset_ < 0 && (axis1_dim + offset_) == axis2_dim) {
out_dim[temp_pos] = axis2_dim;
flag = true;
}
if (!is_grad) {
if (flag) {
int64_t idx_output = 0;
for (size_t i = 0; i < OUT_DIM_SIZE - 1; i++) {
idx_output = idx_output + out_dim[i] * out_stride[i];
}
idx_output = idx_output + out_dim[OUT_DIM_SIZE - 1];
data2[idx_output] = data1[idx];
}
} else {
if (flag) {
int64_t idx_output = 0;
for (size_t i = 0; i < OUT_DIM_SIZE - 1; i++) {
idx_output = idx_output + out_dim[i] * out_stride[i];
}
idx_output = idx_output + out_dim[OUT_DIM_SIZE - 1];
data2[idx] = data1[idx_output];
} else {
data2[idx] = static_cast<T>(0);
}
}
}
}
template <typename T>
class DiagonalCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("Input");
const auto* input_data = input->data<T>();
auto input_dim = input->dims().Get();
auto input_dim_size = input->dims().size();
std::vector<int64_t> res_in = vectorize(framework::stride(input->dims()));
paddle::framework::Tensor input_stride_tensor;
framework::TensorFromVector<int64_t>(res_in, context.device_context(),
&input_stride_tensor);
int64_t* input_stride = input_stride_tensor.data<int64_t>();
auto* output = context.Output<framework::Tensor>("Out");
auto* output_data = output->mutable_data<T>(context.GetPlace());
auto output_dim = output->dims().Get();
auto output_dim_size = output->dims().size();
std::vector<int64_t> res_out = vectorize(framework::stride(output->dims()));
paddle::framework::Tensor output_stride_tensor;
framework::TensorFromVector<int64_t>(res_out, context.device_context(),
&output_stride_tensor);
int64_t* output_stride = output_stride_tensor.data<int64_t>();
const int64_t offset_ = context.Attr<int>("offset");
const int64_t axis1 = context.Attr<int>("axis1");
int64_t axis1_ = axis1 < 0 ? input_dim_size + axis1 : axis1;
const int64_t axis2 = context.Attr<int>("axis2");
int64_t axis2_ = axis2 < 0 ? input_dim_size + axis2 : axis2;
int64_t numel = input->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (input_dim_size) {
case 2:
hipLaunchKernelGGL(( Diagonal<T, 2, 1>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 3:
hipLaunchKernelGGL(( Diagonal<T, 3, 2>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 4:
hipLaunchKernelGGL(( Diagonal<T, 4, 3>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 5:
hipLaunchKernelGGL(( Diagonal<T, 5, 4>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 6:
hipLaunchKernelGGL(( Diagonal<T, 6, 5>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 7:
hipLaunchKernelGGL(( Diagonal<T, 7, 6>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 8:
hipLaunchKernelGGL(( Diagonal<T, 8, 7>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 9:
hipLaunchKernelGGL(( Diagonal<T, 9, 8>), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"The rank of input should be less than 10, but received %d.",
input_dim_size));
}
}
};
template <typename T>
class DiagonalGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* dout =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
const auto* dout_data = dout->data<T>();
auto dout_dim = dout->dims().Get();
auto dout_dim_size = dout->dims().size();
std::vector<int64_t> res_dout = vectorize(framework::stride(dout->dims()));
paddle::framework::Tensor dout_stride_tensor;
framework::TensorFromVector<int64_t>(res_dout, context.device_context(),
&dout_stride_tensor);
int64_t* dout_stride = dout_stride_tensor.data<int64_t>();
auto* dx =
context.Output<framework::Tensor>(framework::GradVarName("Input"));
auto* dx_data = dx->mutable_data<T>(context.GetPlace());
auto dx_dim = dx->dims().Get();
auto dx_dim_size = dx->dims().size();
std::vector<int64_t> res_dx = vectorize(framework::stride(dx->dims()));
paddle::framework::Tensor dx_stride_tensor;
framework::TensorFromVector<int64_t>(res_dx, context.device_context(),
&dx_stride_tensor);
int64_t* dx_stride = dx_stride_tensor.data<int64_t>();
const int64_t offset_ = context.Attr<int>("offset");
const int64_t axis1 = context.Attr<int>("axis1");
int64_t axis1_ = axis1 < 0 ? dx_dim_size + axis1 : axis1;
const int64_t axis2 = context.Attr<int>("axis2");
int64_t axis2_ = axis2 < 0 ? dx_dim_size + axis2 : axis2;
int64_t numel = dx->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (dx_dim_size) {
case 2:
hipLaunchKernelGGL(( Diagonal<T, 2, 1>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 3:
hipLaunchKernelGGL(( Diagonal<T, 3, 2>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 4:
hipLaunchKernelGGL(( Diagonal<T, 4, 3>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 5:
hipLaunchKernelGGL(( Diagonal<T, 5, 4>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 6:
hipLaunchKernelGGL(( Diagonal<T, 6, 5>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 7:
hipLaunchKernelGGL(( Diagonal<T, 7, 6>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 8:
hipLaunchKernelGGL(( Diagonal<T, 8, 7>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 9:
hipLaunchKernelGGL(( Diagonal<T, 9, 8>), dim3(blocks), dim3(threads), 0, 0, dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"The rank of output(input@Grad) should be less than 10, but "
"received %d.",
dx_dim_size));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(diagonal, ops::DiagonalCUDAKernel<int>,
ops::DiagonalCUDAKernel<int64_t>,
ops::DiagonalCUDAKernel<float>,
ops::DiagonalCUDAKernel<double>,
ops::DiagonalCUDAKernel<plat::float16>,
ops::DiagonalCUDAKernel<bool>);
REGISTER_OP_CUDA_KERNEL(diagonal_grad, ops::DiagonalGradCUDAKernel<int>,
ops::DiagonalGradCUDAKernel<int64_t>,
ops::DiagonalGradCUDAKernel<float>,
ops::DiagonalGradCUDAKernel<double>,
ops::DiagonalGradCUDAKernel<plat::float16>);
| a6650f13f25f64e363ada0f5e516b201e7047b5e.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/diagonal_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, int X_DIM_SIZE, int OUT_DIM_SIZE>
__global__ void Diagonal(const T* data1, T* data2, const int64_t offset_,
int64_t axis1_, int64_t axis2_, int64_t* x_stride,
int64_t* out_stride, int64_t numel, bool is_grad) {
CUDA_KERNEL_LOOP(idx, numel) {
int64_t idx_dim[X_DIM_SIZE] = {0};
int64_t temp = 0;
for (size_t i = 0; i < X_DIM_SIZE - 1; i++) {
idx_dim[i] = (idx - temp) / x_stride[i];
temp = temp + idx_dim[i] * x_stride[i];
}
idx_dim[X_DIM_SIZE - 1] = idx - temp;
int64_t axis1_dim = idx_dim[axis1_];
int64_t axis2_dim = idx_dim[axis2_];
int64_t out_dim[OUT_DIM_SIZE] = {0};
int temp_pos = 0;
for (int i = 0; i < X_DIM_SIZE; i++) {
if (i != axis1_ && i != axis2_) {
out_dim[temp_pos] = idx_dim[i];
temp_pos++;
}
}
bool flag = false;
if (offset_ == 0 && axis1_dim == axis2_dim) {
out_dim[temp_pos] = axis1_dim;
flag = true;
} else if (offset_ > 0 && (axis1_dim + offset_) == axis2_dim) {
out_dim[temp_pos] = axis1_dim;
flag = true;
} else if (offset_ < 0 && (axis1_dim + offset_) == axis2_dim) {
out_dim[temp_pos] = axis2_dim;
flag = true;
}
if (!is_grad) {
if (flag) {
int64_t idx_output = 0;
for (size_t i = 0; i < OUT_DIM_SIZE - 1; i++) {
idx_output = idx_output + out_dim[i] * out_stride[i];
}
idx_output = idx_output + out_dim[OUT_DIM_SIZE - 1];
data2[idx_output] = data1[idx];
}
} else {
if (flag) {
int64_t idx_output = 0;
for (size_t i = 0; i < OUT_DIM_SIZE - 1; i++) {
idx_output = idx_output + out_dim[i] * out_stride[i];
}
idx_output = idx_output + out_dim[OUT_DIM_SIZE - 1];
data2[idx] = data1[idx_output];
} else {
data2[idx] = static_cast<T>(0);
}
}
}
}
template <typename T>
class DiagonalCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("Input");
const auto* input_data = input->data<T>();
auto input_dim = input->dims().Get();
auto input_dim_size = input->dims().size();
std::vector<int64_t> res_in = vectorize(framework::stride(input->dims()));
paddle::framework::Tensor input_stride_tensor;
framework::TensorFromVector<int64_t>(res_in, context.device_context(),
&input_stride_tensor);
int64_t* input_stride = input_stride_tensor.data<int64_t>();
auto* output = context.Output<framework::Tensor>("Out");
auto* output_data = output->mutable_data<T>(context.GetPlace());
auto output_dim = output->dims().Get();
auto output_dim_size = output->dims().size();
std::vector<int64_t> res_out = vectorize(framework::stride(output->dims()));
paddle::framework::Tensor output_stride_tensor;
framework::TensorFromVector<int64_t>(res_out, context.device_context(),
&output_stride_tensor);
int64_t* output_stride = output_stride_tensor.data<int64_t>();
const int64_t offset_ = context.Attr<int>("offset");
const int64_t axis1 = context.Attr<int>("axis1");
int64_t axis1_ = axis1 < 0 ? input_dim_size + axis1 : axis1;
const int64_t axis2 = context.Attr<int>("axis2");
int64_t axis2_ = axis2 < 0 ? input_dim_size + axis2 : axis2;
int64_t numel = input->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (input_dim_size) {
case 2:
Diagonal<T, 2, 1><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 3:
Diagonal<T, 3, 2><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 4:
Diagonal<T, 4, 3><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 5:
Diagonal<T, 5, 4><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 6:
Diagonal<T, 6, 5><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 7:
Diagonal<T, 7, 6><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 8:
Diagonal<T, 8, 7><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
case 9:
Diagonal<T, 9, 8><<<blocks, threads>>>(input_data, output_data, offset_,
axis1_, axis2_, input_stride,
output_stride, numel, false);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"The rank of input should be less than 10, but received %d.",
input_dim_size));
}
}
};
template <typename T>
class DiagonalGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* dout =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
const auto* dout_data = dout->data<T>();
auto dout_dim = dout->dims().Get();
auto dout_dim_size = dout->dims().size();
std::vector<int64_t> res_dout = vectorize(framework::stride(dout->dims()));
paddle::framework::Tensor dout_stride_tensor;
framework::TensorFromVector<int64_t>(res_dout, context.device_context(),
&dout_stride_tensor);
int64_t* dout_stride = dout_stride_tensor.data<int64_t>();
auto* dx =
context.Output<framework::Tensor>(framework::GradVarName("Input"));
auto* dx_data = dx->mutable_data<T>(context.GetPlace());
auto dx_dim = dx->dims().Get();
auto dx_dim_size = dx->dims().size();
std::vector<int64_t> res_dx = vectorize(framework::stride(dx->dims()));
paddle::framework::Tensor dx_stride_tensor;
framework::TensorFromVector<int64_t>(res_dx, context.device_context(),
&dx_stride_tensor);
int64_t* dx_stride = dx_stride_tensor.data<int64_t>();
const int64_t offset_ = context.Attr<int>("offset");
const int64_t axis1 = context.Attr<int>("axis1");
int64_t axis1_ = axis1 < 0 ? dx_dim_size + axis1 : axis1;
const int64_t axis2 = context.Attr<int>("axis2");
int64_t axis2_ = axis2 < 0 ? dx_dim_size + axis2 : axis2;
int64_t numel = dx->numel();
int threads = PADDLE_CUDA_NUM_THREADS;
int blocks = (numel + threads - 1) / threads;
switch (dx_dim_size) {
case 2:
Diagonal<T, 2, 1><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 3:
Diagonal<T, 3, 2><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 4:
Diagonal<T, 4, 3><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 5:
Diagonal<T, 5, 4><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 6:
Diagonal<T, 6, 5><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 7:
Diagonal<T, 7, 6><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 8:
Diagonal<T, 8, 7><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
case 9:
Diagonal<T, 9, 8><<<blocks, threads>>>(dout_data, dx_data, offset_,
axis1_, axis2_, dx_stride,
dout_stride, numel, true);
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"The rank of output(input@Grad) should be less than 10, but "
"received %d.",
dx_dim_size));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(diagonal, ops::DiagonalCUDAKernel<int>,
ops::DiagonalCUDAKernel<int64_t>,
ops::DiagonalCUDAKernel<float>,
ops::DiagonalCUDAKernel<double>,
ops::DiagonalCUDAKernel<plat::float16>,
ops::DiagonalCUDAKernel<bool>);
REGISTER_OP_CUDA_KERNEL(diagonal_grad, ops::DiagonalGradCUDAKernel<int>,
ops::DiagonalGradCUDAKernel<int64_t>,
ops::DiagonalGradCUDAKernel<float>,
ops::DiagonalGradCUDAKernel<double>,
ops::DiagonalGradCUDAKernel<plat::float16>);
|
d08da9484ca60c8db131beab737155ed22b0f554.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o pwd_crack pwd_crack.cu
To Run:
./pwd_crack > results.txt
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char password1[] = "CV78";
char password2[] = "ES81";
char password3[] = "GT34";
char password4[] = "RD48";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *pswd1 = password1;
char *pswd2 = password2;
char *pswd3 = password3;
char *pswd4 = password4;
while(*a == *pswd1) {
if(*a== '\0')
{
printf("Found password: %s\n",password1);
break;
}
a++;
pswd1++;
}
while(*b == *pswd2) {
if(*b == '\0')
{
printf("Found password: %s\n",password2);
break;
}
b++;
pswd2++;
}
while(*c == *pswd3) {
if(*c == '\0')
{
printf("Found password: %s\n",password3);
break;
}
c++;
pswd3++;
}
while(*d == *pswd4) {
if(*d == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
d++;
pswd4++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char p,s;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(p='0'; p<='9'; p++){
for(s='0'; s<='9'; s++){
password[2] = p;
password[3] = s;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL((
kernel) , dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
| d08da9484ca60c8db131beab737155ed22b0f554.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o pwd_crack pwd_crack.cu
To Run:
./pwd_crack > results.txt
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char password1[] = "CV78";
char password2[] = "ES81";
char password3[] = "GT34";
char password4[] = "RD48";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *pswd1 = password1;
char *pswd2 = password2;
char *pswd3 = password3;
char *pswd4 = password4;
while(*a == *pswd1) {
if(*a== '\0')
{
printf("Found password: %s\n",password1);
break;
}
a++;
pswd1++;
}
while(*b == *pswd2) {
if(*b == '\0')
{
printf("Found password: %s\n",password2);
break;
}
b++;
pswd2++;
}
while(*c == *pswd3) {
if(*c == '\0')
{
printf("Found password: %s\n",password3);
break;
}
c++;
pswd3++;
}
while(*d == *pswd4) {
if(*d == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
d++;
pswd4++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char p,s;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(p='0'; p<='9'; p++){
for(s='0'; s<='9'; s++){
password[2] = p;
password[3] = s;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
32a91cc93d7271b9ec5d0a6e4dc7721d44214286.hip | // !!! This is a file automatically generated by hipify!!!
//===----------------------------------------------------------------------===//
//
// KernelGen -- A prototype of LLVM-based auto-parallelizing Fortran/C
// compiler for NVIDIA GPUs, targeting numerical modeling code.
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <hip/hip_runtime.h>
#include <map>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <string.h>
using namespace std;
#ifdef __cplusplus
extern "C" {
#endif
static char* wrapper_funcname = 0;
static long wrapper_lineno = 0;
map<string, int> regcounts;
int kernelgen_enable_openacc_regcount(char* funcname, long lineno)
{
wrapper_funcname = funcname;
wrapper_lineno = lineno;
return 0;
}
int kernelgen_disable_openacc_regcount()
{
wrapper_funcname = 0;
return 0;
}
struct uaccbinrec_t
{
int binaryid;
int fill;
size_t binlen;
char** binary;
};
struct uaccfuncrec_t
{
int flags;
int fill;
long int lineno;
char* functionname;
// More args follow.
};
struct vinfo_t
{
int magic;
int flags;
int pflags;
int numfunctions;
int numbinaries;
int lock;
void** handle;
void** funchandle;
uaccbinrec_t* bin;
uaccfuncrec_t* func;
};
void __real___pgi_uacc_cuda_launch(vinfo_t* vinfo, int funcnum, void* argptr,
long int* sargs, long int async, int dindex);
void __wrap___pgi_uacc_cuda_launch(vinfo_t* vinfo, int funcnum, void* argptr,
long int* sargs, long int async, int dindex)
{
if (__builtin_expect(wrapper_funcname != NULL, 1))
{
char* funcname = vinfo->func->functionname;
long int lineno = vinfo->func->lineno;
if (!strcmp(wrapper_funcname, funcname) &&
(wrapper_lineno == lineno))
{
map<string, int>::iterator it = regcounts.find(funcname);
if (it == regcounts.end())
{
// Get the register count for the underlying image.
hipModule_t module;
hipError_t curesult = hipModuleLoadData(&module, (char*)vinfo->bin->binary);
if (curesult != hipSuccess)
{
fprintf(stderr, "Failed to load module from handle %p\n", vinfo->bin->binary);
exit(-1);
}
hipFunction_t func;
curesult = hipModuleGetFunction(&func, module, funcname);
if (curesult != hipSuccess)
{
fprintf(stderr, "Failed to load function %s from module handle %p\n",
funcname, vinfo->bin->binary);
exit(-1);
}
int regcount = -1;
curesult = hipFuncGetAttribute(®count, hipFuncAttributeNumRegs, func);
if (curesult != hipSuccess)
{
fprintf(stderr, "Failed to determine regcount for function %s\n", funcname);
exit(-1);
}
regcounts[funcname] = regcount;
curesult = hipModuleUnload(module);
if (curesult != hipSuccess)
{
fprintf(stderr, "Failed to unload module from handle %p\n", vinfo->bin->binary);
exit(-1);
}
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
lineno, regcount);
}
else
{
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
lineno, it->second);
}
}
}
__real___pgi_uacc_cuda_launch(vinfo, funcnum, argptr, sargs, async, dindex);
}
#ifdef __cplusplus
}
#endif
| 32a91cc93d7271b9ec5d0a6e4dc7721d44214286.cu | //===----------------------------------------------------------------------===//
//
// KernelGen -- A prototype of LLVM-based auto-parallelizing Fortran/C
// compiler for NVIDIA GPUs, targeting numerical modeling code.
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <cuda.h>
#include <map>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <string.h>
using namespace std;
#ifdef __cplusplus
extern "C" {
#endif
static char* wrapper_funcname = 0;
static long wrapper_lineno = 0;
map<string, int> regcounts;
int kernelgen_enable_openacc_regcount(char* funcname, long lineno)
{
wrapper_funcname = funcname;
wrapper_lineno = lineno;
return 0;
}
int kernelgen_disable_openacc_regcount()
{
wrapper_funcname = 0;
return 0;
}
struct uaccbinrec_t
{
int binaryid;
int fill;
size_t binlen;
char** binary;
};
struct uaccfuncrec_t
{
int flags;
int fill;
long int lineno;
char* functionname;
// More args follow.
};
struct vinfo_t
{
int magic;
int flags;
int pflags;
int numfunctions;
int numbinaries;
int lock;
void** handle;
void** funchandle;
uaccbinrec_t* bin;
uaccfuncrec_t* func;
};
void __real___pgi_uacc_cuda_launch(vinfo_t* vinfo, int funcnum, void* argptr,
long int* sargs, long int async, int dindex);
void __wrap___pgi_uacc_cuda_launch(vinfo_t* vinfo, int funcnum, void* argptr,
long int* sargs, long int async, int dindex)
{
if (__builtin_expect(wrapper_funcname != NULL, 1))
{
char* funcname = vinfo->func->functionname;
long int lineno = vinfo->func->lineno;
if (!strcmp(wrapper_funcname, funcname) &&
(wrapper_lineno == lineno))
{
map<string, int>::iterator it = regcounts.find(funcname);
if (it == regcounts.end())
{
// Get the register count for the underlying image.
CUmodule module;
CUresult curesult = cuModuleLoadData(&module, (char*)vinfo->bin->binary);
if (curesult != CUDA_SUCCESS)
{
fprintf(stderr, "Failed to load module from handle %p\n", vinfo->bin->binary);
exit(-1);
}
CUfunction func;
curesult = cuModuleGetFunction(&func, module, funcname);
if (curesult != CUDA_SUCCESS)
{
fprintf(stderr, "Failed to load function %s from module handle %p\n",
funcname, vinfo->bin->binary);
exit(-1);
}
int regcount = -1;
curesult = cuFuncGetAttribute(®count, CU_FUNC_ATTRIBUTE_NUM_REGS, func);
if (curesult != CUDA_SUCCESS)
{
fprintf(stderr, "Failed to determine regcount for function %s\n", funcname);
exit(-1);
}
regcounts[funcname] = regcount;
curesult = cuModuleUnload(module);
if (curesult != CUDA_SUCCESS)
{
fprintf(stderr, "Failed to unload module from handle %p\n", vinfo->bin->binary);
exit(-1);
}
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
lineno, regcount);
}
else
{
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
lineno, it->second);
}
}
}
__real___pgi_uacc_cuda_launch(vinfo, funcnum, argptr, sargs, async, dindex);
}
#ifdef __cplusplus
}
#endif
|
8b53c40cb7cd0d24b4f49549a8a28c802e87336d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void ReverseBackScanData(int* OCTData, int SizeX, int SizeY, int SizeZ)
{
//
int id = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9
blockIdx.x * gridDim.z * blockDim.x + // X => X * (125 * 2) * (2 * 1024)
blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2)
threadIdx.x;
int changeID = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9
(gridDim.y * 2 - blockIdx.x - 1) * gridDim.z * blockDim.x + // X => (250 - X - 1) * (125 * 2) * (2 * 1024)
blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2)
threadIdx.x;
int value = OCTData[id];
OCTData[id] = OCTData[changeID];
OCTData[changeID] = value;
} | 8b53c40cb7cd0d24b4f49549a8a28c802e87336d.cu | #include "includes.h"
__global__ static void ReverseBackScanData(int* OCTData, int SizeX, int SizeY, int SizeZ)
{
// 這邊是要反轉 反掃的資料
int id = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9
blockIdx.x * gridDim.z * blockDim.x + // X => X * (125 * 2) * (2 * 1024)
blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2)
threadIdx.x;
int changeID = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9
(gridDim.y * 2 - blockIdx.x - 1) * gridDim.z * blockDim.x + // X => (250 - X - 1) * (125 * 2) * (2 * 1024)
blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2)
threadIdx.x;
int value = OCTData[id];
OCTData[id] = OCTData[changeID];
OCTData[changeID] = value;
} |
5193e0a586ec75abab6f484b18d342e78ab8658b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_96_4_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 5193e0a586ec75abab6f484b18d342e78ab8658b.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_96_4_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
98fffa11bb94d5d27421c6f64e23ffd772012a60.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "FindDesirableMergeSplits.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int minSize = XSIZE*YSIZE;
int maxSize = XSIZE*YSIZE;
int desiredSize = XSIZE*YSIZE;
int *adjIndices = NULL;
hipMalloc(&adjIndices, XSIZE*YSIZE);
int *adjacency = NULL;
hipMalloc(&adjacency, XSIZE*YSIZE);
int *partSizes = NULL;
hipMalloc(&partSizes, XSIZE*YSIZE);
int *desiredMerges = NULL;
hipMalloc(&desiredMerges, XSIZE*YSIZE);
int *merging = NULL;
hipMalloc(&merging, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
FindDesirableMergeSplits), dim3(gridBlock),dim3(threadBlock), 0, 0, size,minSize,maxSize,desiredSize,adjIndices,adjacency,partSizes,desiredMerges,merging);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
FindDesirableMergeSplits), dim3(gridBlock),dim3(threadBlock), 0, 0, size,minSize,maxSize,desiredSize,adjIndices,adjacency,partSizes,desiredMerges,merging);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
FindDesirableMergeSplits), dim3(gridBlock),dim3(threadBlock), 0, 0, size,minSize,maxSize,desiredSize,adjIndices,adjacency,partSizes,desiredMerges,merging);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 98fffa11bb94d5d27421c6f64e23ffd772012a60.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "FindDesirableMergeSplits.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int minSize = XSIZE*YSIZE;
int maxSize = XSIZE*YSIZE;
int desiredSize = XSIZE*YSIZE;
int *adjIndices = NULL;
cudaMalloc(&adjIndices, XSIZE*YSIZE);
int *adjacency = NULL;
cudaMalloc(&adjacency, XSIZE*YSIZE);
int *partSizes = NULL;
cudaMalloc(&partSizes, XSIZE*YSIZE);
int *desiredMerges = NULL;
cudaMalloc(&desiredMerges, XSIZE*YSIZE);
int *merging = NULL;
cudaMalloc(&merging, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
FindDesirableMergeSplits<<<gridBlock,threadBlock>>>(size,minSize,maxSize,desiredSize,adjIndices,adjacency,partSizes,desiredMerges,merging);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
FindDesirableMergeSplits<<<gridBlock,threadBlock>>>(size,minSize,maxSize,desiredSize,adjIndices,adjacency,partSizes,desiredMerges,merging);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
FindDesirableMergeSplits<<<gridBlock,threadBlock>>>(size,minSize,maxSize,desiredSize,adjIndices,adjacency,partSizes,desiredMerges,merging);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1988fa7ca3fd76ff6bc699bd7c6156c9f24e4eba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "commoncu.cuh"
#include <stdlib.h>
#include <stdio.h>
#include <cutil_inline.h>
namespace eagleeye
{
bool checkCUDAProfile(int dev, int min_runtime_ver, int min_compute)
{
int runtime_ver = 0;
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, dev);
fprintf(stderr,"\nDevice %d: \"%s\"\n", dev, device_prop.name);
hipRuntimeGetVersion(&runtime_ver);
if (min_runtime_ver>runtime_ver||min_compute>device_prop.major)
{
fprintf(stderr," CUDA Runtime Version :\t%d.%d\n", runtime_ver/1000, (runtime_ver%100)/10);
fprintf(stderr," CUDA Compute Capability :\t%d.%d\n", device_prop.major, device_prop.minor);
return false;
}
return true;
}
int findCapableDevice(int argc,char **argv)
{
int device_count=0;
hipError_t error_id=hipGetDeviceCount(&device_count);
if (error_id!=hipSuccess)
{
printf("hipGetDeviceCount returned %d\n->%s\n",(int)error_id,hipGetErrorString(error_id));
return -1;
}
if (device_count==0)
{
fprintf(stderr,"There is no device supporting CUDA.\n");
return -1;
}
else
{
fprintf(stderr,"Found %d CUDA Capable Device(s).\n",device_count);
}
int best_dev=-1;
hipDeviceProp_t best_device_prop;
for (int dev=0;dev<device_count;++dev)
{
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop,dev);
if ((best_dev==-1)||(best_device_prop.major<device_prop.major))
{
best_dev=dev;
best_device_prop=device_prop;
}
}
if (best_dev!=-1)
{
fprintf(stderr,"Setting active device to %d\n",best_dev);
}
return best_dev;
}
bool iniCuda(int argc,char ** argv)
{
int dev=findCapableDevice(argc,argv);
if (dev!=-1)
{
hipSetDevice(dev);
return true;
}
else
return false;
}
//////////////////////////////////////////////////////////////////////////
__global__ void conjugate_kernel(fComplex* d_data,unsigned int count)
{
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<count)
{
d_data[x].y=-d_data[x].y;
}
}
void conjugate(fComplex* d_data,unsigned int count)
{
dim3 threads(256,1);
dim3 grid(iDivUp(count,threads.x),1);
hipLaunchKernelGGL(( conjugate_kernel), dim3(grid),dim3(threads), 0, 0, d_data,count);
}
//////////////////////////////////////////////////////////////////////////
__global__ void subtractScalar_kernel(float* d_data,unsigned int d_h,unsigned int d_w,float value)
{
const unsigned int y=blockDim.y*blockIdx.y+threadIdx.y;
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (y<d_h&&x<d_w)
{
d_data[y*d_h+x]-=value;
}
}
void subtractScalar(float* d_data,unsigned int d_h,unsigned int d_w,float value)
{
dim3 threads(32,8);
dim3 grid(iDivUp(d_w,threads.x),iDivUp(d_h,threads.y));
hipLaunchKernelGGL(( subtractScalar_kernel), dim3(grid),dim3(threads), 0, 0, d_data,d_h,d_w,value);
}
//////////////////////////////////////////////////////////////////////////
__global__ void multiply_kernel(fComplex* a,fComplex* b,fComplex* c,unsigned int count)
{
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<count)
{
unsigned int index=x;
float real_part,image_part;
real_part=a[index].x*b[index].x-a[index].y*b[index].y;
image_part=a[index].x*b[index].y+b[index].x*a[index].y;
c[index].x=real_part;
c[index].y=image_part;
}
}
void multiply(fComplex* d_multiply_term1,fComplex* d_multiply_term2,fComplex* d_result,unsigned int count)
{
dim3 threads(256,1);
dim3 grid(iDivUp(count,threads.x),1);
hipLaunchKernelGGL(( multiply_kernel), dim3(grid),dim3(threads), 0, 0, d_multiply_term1,d_multiply_term2,d_result,count);
}
//////////////////////////////////////////////////////////////////////////
__global__ void multiplyAndScale_kernel(fComplex* a,fComplex* b,float scale,fComplex* c,unsigned int count)
{
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<count)
{
unsigned int index=x;
float real_part,image_part;
real_part=a[index].x*b[index].x-a[index].y*b[index].y;
image_part=a[index].x*b[index].y+b[index].x*a[index].y;
c[index].x=real_part*scale;
c[index].y=image_part*scale;
}
}
void multiplyAndScale(fComplex* a,fComplex* b,float scale,fComplex* c,unsigned int count)
{
dim3 threads(256,1);
dim3 grid(iDivUp(count,threads.x),1);
hipLaunchKernelGGL(( multiplyAndScale_kernel), dim3(grid),dim3(threads), 0, 0, a,b,scale,c,count);
}
//////////////////////////////////////////////////////////////////////////
__global__ void padData_kernel(float* d_dst,unsigned int dst_h,unsigned int dst_w,
float* d_src,unsigned int src_h,unsigned int src_w,int offset_h,int offset_w)
{
const unsigned int y=blockDim.y*blockIdx.y+threadIdx.y;
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (y<src_h&&x<src_w)
{
d_dst[(y+offset_h)*dst_w+x+offset_w]=LOAD_FLOAT(y*src_w+x);
}
}
void padData(float *d_dst,unsigned int dst_h,unsigned int dst_w,float *d_src,unsigned int src_h,unsigned int src_w,int offset_h,int offset_w)
{
dim3 threads(32, 8);
dim3 grid(iDivUp(src_w, threads.x), iDivUp(src_h, threads.y));
SET_FLOAT_BASE;
hipLaunchKernelGGL(( padData_kernel), dim3(grid),dim3(threads), 0, 0, d_dst,dst_h,dst_w,d_src,src_h,src_w,offset_h,offset_w);
}
}
| 1988fa7ca3fd76ff6bc699bd7c6156c9f24e4eba.cu | #include "commoncu.cuh"
#include <stdlib.h>
#include <stdio.h>
#include <cutil_inline.h>
namespace eagleeye
{
bool checkCUDAProfile(int dev, int min_runtime_ver, int min_compute)
{
int runtime_ver = 0;
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, dev);
fprintf(stderr,"\nDevice %d: \"%s\"\n", dev, device_prop.name);
cudaRuntimeGetVersion(&runtime_ver);
if (min_runtime_ver>runtime_ver||min_compute>device_prop.major)
{
fprintf(stderr," CUDA Runtime Version :\t%d.%d\n", runtime_ver/1000, (runtime_ver%100)/10);
fprintf(stderr," CUDA Compute Capability :\t%d.%d\n", device_prop.major, device_prop.minor);
return false;
}
return true;
}
int findCapableDevice(int argc,char **argv)
{
int device_count=0;
cudaError_t error_id=cudaGetDeviceCount(&device_count);
if (error_id!=cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n->%s\n",(int)error_id,cudaGetErrorString(error_id));
return -1;
}
if (device_count==0)
{
fprintf(stderr,"There is no device supporting CUDA.\n");
return -1;
}
else
{
fprintf(stderr,"Found %d CUDA Capable Device(s).\n",device_count);
}
int best_dev=-1;
cudaDeviceProp best_device_prop;
for (int dev=0;dev<device_count;++dev)
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop,dev);
if ((best_dev==-1)||(best_device_prop.major<device_prop.major))
{
best_dev=dev;
best_device_prop=device_prop;
}
}
if (best_dev!=-1)
{
fprintf(stderr,"Setting active device to %d\n",best_dev);
}
return best_dev;
}
bool iniCuda(int argc,char ** argv)
{
int dev=findCapableDevice(argc,argv);
if (dev!=-1)
{
cudaSetDevice(dev);
return true;
}
else
return false;
}
//////////////////////////////////////////////////////////////////////////
__global__ void conjugate_kernel(fComplex* d_data,unsigned int count)
{
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<count)
{
d_data[x].y=-d_data[x].y;
}
}
void conjugate(fComplex* d_data,unsigned int count)
{
dim3 threads(256,1);
dim3 grid(iDivUp(count,threads.x),1);
conjugate_kernel<<<grid,threads>>>(d_data,count);
}
//////////////////////////////////////////////////////////////////////////
__global__ void subtractScalar_kernel(float* d_data,unsigned int d_h,unsigned int d_w,float value)
{
const unsigned int y=blockDim.y*blockIdx.y+threadIdx.y;
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (y<d_h&&x<d_w)
{
d_data[y*d_h+x]-=value;
}
}
void subtractScalar(float* d_data,unsigned int d_h,unsigned int d_w,float value)
{
dim3 threads(32,8);
dim3 grid(iDivUp(d_w,threads.x),iDivUp(d_h,threads.y));
subtractScalar_kernel<<<grid,threads>>>(d_data,d_h,d_w,value);
}
//////////////////////////////////////////////////////////////////////////
__global__ void multiply_kernel(fComplex* a,fComplex* b,fComplex* c,unsigned int count)
{
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<count)
{
unsigned int index=x;
float real_part,image_part;
real_part=a[index].x*b[index].x-a[index].y*b[index].y;
image_part=a[index].x*b[index].y+b[index].x*a[index].y;
c[index].x=real_part;
c[index].y=image_part;
}
}
void multiply(fComplex* d_multiply_term1,fComplex* d_multiply_term2,fComplex* d_result,unsigned int count)
{
dim3 threads(256,1);
dim3 grid(iDivUp(count,threads.x),1);
multiply_kernel<<<grid,threads>>>(d_multiply_term1,d_multiply_term2,d_result,count);
}
//////////////////////////////////////////////////////////////////////////
__global__ void multiplyAndScale_kernel(fComplex* a,fComplex* b,float scale,fComplex* c,unsigned int count)
{
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (x<count)
{
unsigned int index=x;
float real_part,image_part;
real_part=a[index].x*b[index].x-a[index].y*b[index].y;
image_part=a[index].x*b[index].y+b[index].x*a[index].y;
c[index].x=real_part*scale;
c[index].y=image_part*scale;
}
}
void multiplyAndScale(fComplex* a,fComplex* b,float scale,fComplex* c,unsigned int count)
{
dim3 threads(256,1);
dim3 grid(iDivUp(count,threads.x),1);
multiplyAndScale_kernel<<<grid,threads>>>(a,b,scale,c,count);
}
//////////////////////////////////////////////////////////////////////////
__global__ void padData_kernel(float* d_dst,unsigned int dst_h,unsigned int dst_w,
float* d_src,unsigned int src_h,unsigned int src_w,int offset_h,int offset_w)
{
const unsigned int y=blockDim.y*blockIdx.y+threadIdx.y;
const unsigned int x=blockDim.x*blockIdx.x+threadIdx.x;
if (y<src_h&&x<src_w)
{
d_dst[(y+offset_h)*dst_w+x+offset_w]=LOAD_FLOAT(y*src_w+x);
}
}
void padData(float *d_dst,unsigned int dst_h,unsigned int dst_w,float *d_src,unsigned int src_h,unsigned int src_w,int offset_h,int offset_w)
{
dim3 threads(32, 8);
dim3 grid(iDivUp(src_w, threads.x), iDivUp(src_h, threads.y));
SET_FLOAT_BASE;
padData_kernel<<<grid,threads>>>(d_dst,dst_h,dst_w,d_src,src_h,src_w,offset_h,offset_w);
}
}
|
1695628368a2c8137868b8b91cb15ade315fb29d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zparilut_candidates.cu, normal z -> c, Thu Oct 8 23:05:49 2020
*/
#include "magmasparse_internal.h"
#define PRECISION_c
__global__ void
cparilut_candidates_count_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int numaddrowL = 0;
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowL++;
ilu0++;
}
} while (ilut < endilut && ilu0 < endilu0);
// do the rest if existing
if(ilu0<endilu0 ){
do{
numaddrowL++;
ilu0++;
}while(ilu0<endilu0 );
}
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
magma_int_t numaddrowU = 0;
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
numaddrowU++;
ilu0++;
}while(ilu0<endilu0 );
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
}
}
__global__ void
cparilut_candidates_count_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
// how to determine candidates:
// for each node i, look at any "intermediate" neighbor nodes numbered
// less, and then see if this neighbor has another neighbor j numbered
// more than the intermediate; if so, fill in is (i,j) if it is not
// already nonzero
int numaddrowL = 0, numaddrowU = 0;
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
// check whether this element already exists in L
// int exist = 0;
// for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
// if(L_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if it does not exist, increase counter for this location
// use the entry one further down to allow for parallel insertion
// if(exist == 0 ){
numaddrowL++;
// }
} else {
// check whether this element already exists in U
// int exist = 0;
// for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
// if(U_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if(exist == 0 ){
//printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col);
numaddrowU++;
// }
}
}
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
}
}
__global__ void
cparilut_candidates_insert_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaFloatComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaFloatComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int laddL = 0;
int offsetL = L_new_row[row];
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddL++;
ilu0++;
}
} while(ilut<endilut && ilu0<endilu0 );
if (ilu0<endilu0){
do{
ilu0col = L0_col[ ilu0 ];
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddL++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedL[row] = laddL;
int laddU = 0;
int offsetU = U_new_row[row];
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
ilu0col = U0_col[ ilu0 ];
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddU++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedU[row] = laddU;
}
}
__global__ void
cparilut_candidates_insert_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaFloatComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaFloatComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int cand_row = row;
int laddL = 0;
int laddU = 0;
int offsetL = L_new_row[row] + insertedL[row];
int offsetU = U_new_row[row] + insertedU[row];
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
int exist = 0;
for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
if(L_col[ k ] == cand_col ){
exist = -1;
// printf("already exists:(%d,%d\n", row, cand_col);
//break;
}
}
for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){
if(L_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d\n", row, cand_col);
exist = -2;
//break;
}
}
L_new_rowidx[ offsetL + laddL ] = cand_row;
L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist;
L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_C_ONE : MAGMA_C_ZERO;
laddL++;
} else {
// check whether this element already exists in U
int exist = 0;
for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
if(U_col[ k ] == cand_col ){
// printf("already exists:(%d,%d\n", row, cand_col);
exist = -1;
//break;
}
}
for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){
if(U_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] );
exist = -2;
//break;
}
}
U_new_rowidx[ offsetU + laddU ] = cand_row;
U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist;
U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_C_ONE : MAGMA_C_ZERO;
laddU++;
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
This function identifies the locations with a potential nonzero ILU residual
R = A - L*U where L and U are the current incomplete factors.
Nonzero ILU residuals are possible
1 where A is nonzero but L and U have no nonzero entry
2 where the product L*U has fill-in but the location is not included
in L or U
We assume that the incomplete factors are exact fro the elements included in
the current pattern.
This is the GPU implementation of the candidate search.
2 GPU kernels are used: the first is a dry run assessing the memory need,
the second then computes the candidate locations, the third eliminates
float entries. The fourth kernel ensures the elements in a row are sorted
for increasing column index.
Arguments
---------
@param[in]
L0 magma_c_matrix
tril(ILU(0) ) pattern of original system matrix.
@param[in]
U0 magma_c_matrix
triu(ILU(0) ) pattern of original system matrix.
@param[in]
L magma_c_matrix
Current lower triangular factor.
@param[in]
U magma_c_matrix
Current upper triangular factor.
@param[in,out]
L_new magma_c_matrix*
List of candidates for L in COO format.
@param[in,out]
U_new magma_c_matrix*
List of candidates for U in COO format.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
*******************************************************************************/
extern "C" magma_int_t
magma_cparilut_candidates_gpu(
magma_c_matrix L0,
magma_c_matrix U0,
magma_c_matrix L,
magma_c_matrix U,
magma_c_matrix *L_new,
magma_c_matrix *U_new,
magma_queue_t queue )
{
magma_int_t info = 0;
int num_rows = L.num_rows;
float thrs = 1e-8;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr insertedL = NULL;
magmaIndex_ptr insertedU = NULL;
magma_cmfree(L_new, queue);
magma_cmfree(U_new, queue);
CHECK(magma_index_malloc(&insertedL, num_rows));
CHECK(magma_index_malloc(&insertedU, num_rows));
CHECK(magma_index_malloc(&L_new->drow, num_rows+1));
CHECK(magma_index_malloc(&U_new->drow, num_rows+1));
CHECK(magma_cindexinit_gpu(num_rows+1, L_new->drow, queue));
CHECK(magma_cindexinit_gpu(num_rows+1, U_new->drow, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedU, queue));
L_new->num_rows = L.num_rows;
L_new->num_cols = L.num_cols;
L_new->storage_type = Magma_CSR;
L_new->memory_location = Magma_DEV;
U_new->num_rows = L.num_rows;
U_new->num_cols = L.num_cols;
U_new->storage_type = Magma_CSR;
U_new->memory_location = Magma_DEV;
hipLaunchKernelGGL(( cparilut_candidates_count_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
hipLaunchKernelGGL(( cparilut_candidates_count_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
CHECK(magma_cget_row_ptr(num_rows, &L_new->nnz, insertedL,
L_new->drow, queue));
CHECK(magma_cget_row_ptr(num_rows, &U_new->nnz, insertedU,
U_new->drow, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedU, queue));
CHECK(magma_cmalloc(&L_new->dval, L_new->nnz));
CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz));
CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz));
CHECK(magma_cmalloc(&U_new->dval, U_new->nnz));
CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz));
CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz));
CHECK(magma_cvalinit_gpu(L_new->nnz, L_new->dval, queue));
CHECK(magma_cvalinit_gpu(U_new->nnz, U_new->dval, queue));
//CHECK(magma_cindexinit_gpu(L_new->nnz, L_new->dcol, queue));
//CHECK(magma_cindexinit_gpu(U_new->nnz, U_new->dcol, queue));
//CHECK(magma_cindexinit_gpu(L_new->nnz, L_new->drowidx, queue));
//CHECK(magma_cindexinit_gpu(U_new->nnz, U_new->drowidx, queue));
// we don't need to init rowidx and col
// the uninitilazed values will be removed anyways
hipLaunchKernelGGL(( cparilut_candidates_insert_1), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
hipLaunchKernelGGL(( cparilut_candidates_insert_2), dim3(grid1), dim3(block1), 0, queue->cuda_stream(),
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
CHECK(magma_cthrsholdrm_gpu(1, L_new, &thrs, queue));
CHECK(magma_cthrsholdrm_gpu(1, U_new, &thrs, queue));
cleanup:
magma_free(insertedL);
magma_free(insertedU);
return info;
}
| 1695628368a2c8137868b8b91cb15ade315fb29d.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zparilut_candidates.cu, normal z -> c, Thu Oct 8 23:05:49 2020
*/
#include "magmasparse_internal.h"
#define PRECISION_c
__global__ void
cparilut_candidates_count_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int numaddrowL = 0;
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowL++;
ilu0++;
}
} while (ilut < endilut && ilu0 < endilu0);
// do the rest if existing
if(ilu0<endilu0 ){
do{
numaddrowL++;
ilu0++;
}while(ilu0<endilu0 );
}
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
magma_int_t numaddrowU = 0;
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
numaddrowU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
numaddrowU++;
ilu0++;
}while(ilu0<endilu0 );
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
}
}
__global__ void
cparilut_candidates_count_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* U_new_row)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
// how to determine candidates:
// for each node i, look at any "intermediate" neighbor nodes numbered
// less, and then see if this neighbor has another neighbor j numbered
// more than the intermediate; if so, fill in is (i,j) if it is not
// already nonzero
int numaddrowL = 0, numaddrowU = 0;
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
// check whether this element already exists in L
// int exist = 0;
// for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
// if(L_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if it does not exist, increase counter for this location
// use the entry one further down to allow for parallel insertion
// if(exist == 0 ){
numaddrowL++;
// }
} else {
// check whether this element already exists in U
// int exist = 0;
// for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
// if(U_col[ k ] == cand_col ){
// exist = 1;
// //break;
// }
// }
// if(exist == 0 ){
//printf("checked row: %d this element does not yet exist in L: (%d,%d)\n", cand_row, cand_col);
numaddrowU++;
// }
}
}
}
U_new_row[ row ] = U_new_row[ row ]+numaddrowU;
L_new_row[ row ] = L_new_row[ row ]+numaddrowL;
}
}
__global__ void
cparilut_candidates_insert_1(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaFloatComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaFloatComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int laddL = 0;
int offsetL = L_new_row[row];
int ilu0 = L0_row[row];
int ilut = L_row[row];
int endilu0 = L0_row[ row+1 ];
int endilut = L_row[ row+1 ];
int ilu0col;
int ilutcol;
do{
ilu0col = L0_col[ ilu0 ];
ilutcol = L_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddL++;
ilu0++;
}
} while(ilut<endilut && ilu0<endilu0 );
if (ilu0<endilu0){
do{
ilu0col = L0_col[ ilu0 ];
L_new_col[ offsetL + laddL ] = ilu0col;
L_new_rowidx[ offsetL + laddL ] = row;
L_new_val[ offsetL + laddL ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddL++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedL[row] = laddL;
int laddU = 0;
int offsetU = U_new_row[row];
ilu0 = U0_row[row];
ilut = U_row[row];
endilu0 = U0_row[ row+1 ];
endilut = U_row[ row+1 ];
do{
ilu0col = U0_col[ ilu0 ];
ilutcol = U_col[ ilut ];
if(ilu0col == ilutcol ){
ilu0++;
ilut++;
}
else if(ilutcol<ilu0col ){
ilut++;
}
else {
// this element is missing in the current approximation
// mark it as candidate
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddU++;
ilu0++;
}
}while(ilut<endilut && ilu0<endilu0 );
if(ilu0<endilu0 ){
do{
ilu0col = U0_col[ ilu0 ];
U_new_col[ offsetU + laddU ] = ilu0col;
U_new_rowidx[ offsetU + laddU ] = row;
U_new_val[ offsetU + laddU ] = MAGMA_C_ONE + MAGMA_C_ONE + MAGMA_C_ONE;
laddU++;
ilu0++;
}while(ilu0<endilu0 );
}
insertedU[row] = laddU;
}
}
__global__ void
cparilut_candidates_insert_2(
const magma_int_t num_rows,
const magma_index_t* L0_row,
const magma_index_t* L0_col,
const magma_index_t* U0_row,
const magma_index_t* U0_col,
const magma_index_t* L_row,
const magma_index_t* L_col,
const magma_index_t* U_row,
const magma_index_t* U_col,
magma_index_t* L_new_row,
magma_index_t* L_new_rowidx,
magma_index_t* L_new_col,
magmaFloatComplex* L_new_val,
magma_index_t* insertedL,
magma_index_t* U_new_row,
magma_index_t* U_new_rowidx,
magma_index_t* U_new_col,
magmaFloatComplex* U_new_val,
magma_index_t* insertedU)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
//for(int row=0; row<num_rows; row++){
if (row < num_rows) {
int cand_row = row;
int laddL = 0;
int laddU = 0;
int offsetL = L_new_row[row] + insertedL[row];
int offsetU = U_new_row[row] + insertedU[row];
// loop first element over row - only for elements smaller the diagonal
for(int el1=L_row[row]; el1<L_row[row+1]-1; el1++ ){
int col1 = L_col[ el1 ];
// now check the upper triangular
// second loop first element over row - only for elements larger the intermediate
for(int el2 = U_row[ col1 ]+1; el2 < U_row[ col1+1 ]; el2++ ){
int col2 = U_col[ el2 ];
int cand_col = col2;
// check whether this element already exists
// first case: part of L
if(cand_col < row ){
int exist = 0;
for(int k=L_row[cand_row]; k<L_row[cand_row+1]; k++ ){
if(L_col[ k ] == cand_col ){
exist = -1;
// printf("already exists:(%d,%d\n", row, cand_col);
//break;
}
}
for(int k=L_new_row[cand_row]; k<L_new_row[cand_row+1]; k++){
if(L_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d\n", row, cand_col);
exist = -2;
//break;
}
}
L_new_rowidx[ offsetL + laddL ] = cand_row;
L_new_col[ offsetL + laddL ] = (exist == 0) ? cand_col : exist;
L_new_val[ offsetL + laddL ] = (exist == 0) ? MAGMA_C_ONE : MAGMA_C_ZERO;
laddL++;
} else {
// check whether this element already exists in U
int exist = 0;
for(int k=U_row[cand_row]; k<U_row[cand_row+1]; k++ ){
if(U_col[ k ] == cand_col ){
// printf("already exists:(%d,%d\n", row, cand_col);
exist = -1;
//break;
}
}
for(int k=U_new_row[cand_row]; k<U_new_row[cand_row+1]; k++){
if(U_new_col[ k ] == cand_col ){
// element included in LU and nonzero
// printf("already inserted:(%d,%d==%d) k:%d -> %d -> %d\n", row, cand_col , U_new_col[ k ], U_new_row[cand_row], k, U_new_row[cand_row+1] );
exist = -2;
//break;
}
}
U_new_rowidx[ offsetU + laddU ] = cand_row;
U_new_col[ offsetU + laddU ] = (exist == 0) ? cand_col : exist;
U_new_val[ offsetU + laddU ] = (exist == 0) ? MAGMA_C_ONE : MAGMA_C_ZERO;
laddU++;
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
This function identifies the locations with a potential nonzero ILU residual
R = A - L*U where L and U are the current incomplete factors.
Nonzero ILU residuals are possible
1 where A is nonzero but L and U have no nonzero entry
2 where the product L*U has fill-in but the location is not included
in L or U
We assume that the incomplete factors are exact fro the elements included in
the current pattern.
This is the GPU implementation of the candidate search.
2 GPU kernels are used: the first is a dry run assessing the memory need,
the second then computes the candidate locations, the third eliminates
float entries. The fourth kernel ensures the elements in a row are sorted
for increasing column index.
Arguments
---------
@param[in]
L0 magma_c_matrix
tril(ILU(0) ) pattern of original system matrix.
@param[in]
U0 magma_c_matrix
triu(ILU(0) ) pattern of original system matrix.
@param[in]
L magma_c_matrix
Current lower triangular factor.
@param[in]
U magma_c_matrix
Current upper triangular factor.
@param[in,out]
L_new magma_c_matrix*
List of candidates for L in COO format.
@param[in,out]
U_new magma_c_matrix*
List of candidates for U in COO format.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
*******************************************************************************/
extern "C" magma_int_t
magma_cparilut_candidates_gpu(
magma_c_matrix L0,
magma_c_matrix U0,
magma_c_matrix L,
magma_c_matrix U,
magma_c_matrix *L_new,
magma_c_matrix *U_new,
magma_queue_t queue )
{
magma_int_t info = 0;
int num_rows = L.num_rows;
float thrs = 1e-8;
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid11 = magma_ceildiv(num_rows, blocksize1 );
int dimgrid12 = 1;
int dimgrid13 = 1;
dim3 grid1(dimgrid11, dimgrid12, dimgrid13 );
dim3 block1(blocksize1, blocksize2, 1 );
magmaIndex_ptr insertedL = NULL;
magmaIndex_ptr insertedU = NULL;
magma_cmfree(L_new, queue);
magma_cmfree(U_new, queue);
CHECK(magma_index_malloc(&insertedL, num_rows));
CHECK(magma_index_malloc(&insertedU, num_rows));
CHECK(magma_index_malloc(&L_new->drow, num_rows+1));
CHECK(magma_index_malloc(&U_new->drow, num_rows+1));
CHECK(magma_cindexinit_gpu(num_rows+1, L_new->drow, queue));
CHECK(magma_cindexinit_gpu(num_rows+1, U_new->drow, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedU, queue));
L_new->num_rows = L.num_rows;
L_new->num_cols = L.num_cols;
L_new->storage_type = Magma_CSR;
L_new->memory_location = Magma_DEV;
U_new->num_rows = L.num_rows;
U_new->num_cols = L.num_cols;
U_new->storage_type = Magma_CSR;
U_new->memory_location = Magma_DEV;
cparilut_candidates_count_1<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
cparilut_candidates_count_2<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
insertedL, insertedU);
CHECK(magma_cget_row_ptr(num_rows, &L_new->nnz, insertedL,
L_new->drow, queue));
CHECK(magma_cget_row_ptr(num_rows, &U_new->nnz, insertedU,
U_new->drow, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedL, queue));
CHECK(magma_cindexinit_gpu(num_rows, insertedU, queue));
CHECK(magma_cmalloc(&L_new->dval, L_new->nnz));
CHECK(magma_index_malloc(&L_new->drowidx, L_new->nnz));
CHECK(magma_index_malloc(&L_new->dcol, L_new->nnz));
CHECK(magma_cmalloc(&U_new->dval, U_new->nnz));
CHECK(magma_index_malloc(&U_new->drowidx, U_new->nnz));
CHECK(magma_index_malloc(&U_new->dcol, U_new->nnz));
CHECK(magma_cvalinit_gpu(L_new->nnz, L_new->dval, queue));
CHECK(magma_cvalinit_gpu(U_new->nnz, U_new->dval, queue));
//CHECK(magma_cindexinit_gpu(L_new->nnz, L_new->dcol, queue));
//CHECK(magma_cindexinit_gpu(U_new->nnz, U_new->dcol, queue));
//CHECK(magma_cindexinit_gpu(L_new->nnz, L_new->drowidx, queue));
//CHECK(magma_cindexinit_gpu(U_new->nnz, U_new->drowidx, queue));
// we don't need to init rowidx and col
// the uninitilazed values will be removed anyways
cparilut_candidates_insert_1<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
cparilut_candidates_insert_2<<<grid1, block1, 0, queue->cuda_stream()>>>(
L0.num_rows, L0.drow, L0.dcol, U0.drow, U0.dcol,
L.drow, L.dcol, U.drow, U.dcol,
L_new->drow, L_new->drowidx, L_new->dcol, L_new->dval, insertedL,
U_new->drow, U_new->drowidx, U_new->dcol, U_new->dval, insertedU);
CHECK(magma_cthrsholdrm_gpu(1, L_new, &thrs, queue));
CHECK(magma_cthrsholdrm_gpu(1, U_new, &thrs, queue));
cleanup:
magma_free(insertedL);
magma_free(insertedU);
return info;
}
|
ace355c58d32e1d06da9bbb2cfee9375c8a447b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <common/grid_sync.cuh>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace MLCommon {
__global__ void gridSyncTestKernel(void* workspace, int* out, SyncType type)
{
GridSync gs(workspace, type, true);
bool master;
int updatePosition;
if (type == ACROSS_ALL) {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 &&
blockIdx.y == 0 && blockIdx.z == 0;
updatePosition = 0;
} else {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0;
updatePosition = blockIdx.y + blockIdx.z * gridDim.y;
}
if (master) {
out[updatePosition] = 1;
__threadfence();
}
gs.sync();
int val = out[updatePosition];
// make sure everybody has read the updated value!
gs.sync();
raft::myAtomicAdd(out + updatePosition, val);
}
struct GridSyncInputs {
dim3 gridDim, blockDim;
bool checkWorkspaceReuse;
SyncType type;
};
void gridSyncTest(int* out, int* out1, const GridSyncInputs& params, hipStream_t stream)
{
size_t workspaceSize = GridSync::computeWorkspaceSize(params.gridDim, params.type, true);
rmm::device_uvector<char> workspace(workspaceSize, stream);
CUDA_CHECK(hipMemset(workspace.data(), 0, workspace.size()));
hipLaunchKernelGGL(( gridSyncTestKernel), dim3(params.gridDim), dim3(params.blockDim), 0, 0, workspace.data(), out, params.type);
CUDA_CHECK(hipPeekAtLastError());
if (params.checkWorkspaceReuse) {
CUDA_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( gridSyncTestKernel), dim3(params.gridDim), dim3(params.blockDim), 0, 0, workspace.data(), out1, params.type);
CUDA_CHECK(hipPeekAtLastError());
}
}
::std::ostream& operator<<(::std::ostream& os, const GridSyncInputs& dims) { return os; }
class GridSyncTest : public ::testing::TestWithParam<GridSyncInputs> {
protected:
GridSyncTest() : out(0, stream), out1(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<GridSyncInputs>::GetParam();
size_t len = computeOutLen();
CUDA_CHECK(hipStreamCreate(&stream));
out.resize(len, stream);
out1.resize(len, stream);
gridSyncTest(out.data(), out1.data(), params, stream);
}
size_t computeOutLen() const
{
size_t len;
if (params.type == ACROSS_ALL) {
len = 1;
} else {
len = params.gridDim.y * params.gridDim.z;
}
return len;
}
protected:
hipStream_t stream = 0;
GridSyncInputs params;
rmm::device_uvector<int> out, out1;
};
const std::vector<GridSyncInputs> inputs = {
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 1, 1}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 1}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 4}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, false, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, false, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, false, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, false, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 1, 1}, true, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, true, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, true, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, true, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, true, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, true, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, true, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, true, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, true, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, true, ACROSS_X}};
TEST_P(GridSyncTest, Result)
{
size_t len = computeOutLen();
// number of blocks raft::myAtomicAdd'ing the same location
int nblks = params.type == ACROSS_X ? params.gridDim.x
: params.gridDim.x * params.gridDim.y * params.gridDim.z;
int nthreads = params.blockDim.x * params.blockDim.y * params.blockDim.z;
int expected = (nblks * nthreads) + 1;
ASSERT_TRUE(raft::devArrMatch(expected, out.data(), len, raft::Compare<int>()));
if (params.checkWorkspaceReuse) {
ASSERT_TRUE(raft::devArrMatch(expected, out1.data(), len, raft::Compare<int>()));
}
}
INSTANTIATE_TEST_CASE_P(GridSyncTests, GridSyncTest, ::testing::ValuesIn(inputs));
} // end namespace MLCommon
| ace355c58d32e1d06da9bbb2cfee9375c8a447b3.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <common/grid_sync.cuh>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace MLCommon {
__global__ void gridSyncTestKernel(void* workspace, int* out, SyncType type)
{
GridSync gs(workspace, type, true);
bool master;
int updatePosition;
if (type == ACROSS_ALL) {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 &&
blockIdx.y == 0 && blockIdx.z == 0;
updatePosition = 0;
} else {
master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0;
updatePosition = blockIdx.y + blockIdx.z * gridDim.y;
}
if (master) {
out[updatePosition] = 1;
__threadfence();
}
gs.sync();
int val = out[updatePosition];
// make sure everybody has read the updated value!
gs.sync();
raft::myAtomicAdd(out + updatePosition, val);
}
struct GridSyncInputs {
dim3 gridDim, blockDim;
bool checkWorkspaceReuse;
SyncType type;
};
void gridSyncTest(int* out, int* out1, const GridSyncInputs& params, cudaStream_t stream)
{
size_t workspaceSize = GridSync::computeWorkspaceSize(params.gridDim, params.type, true);
rmm::device_uvector<char> workspace(workspaceSize, stream);
CUDA_CHECK(cudaMemset(workspace.data(), 0, workspace.size()));
gridSyncTestKernel<<<params.gridDim, params.blockDim>>>(workspace.data(), out, params.type);
CUDA_CHECK(cudaPeekAtLastError());
if (params.checkWorkspaceReuse) {
CUDA_CHECK(cudaDeviceSynchronize());
gridSyncTestKernel<<<params.gridDim, params.blockDim>>>(workspace.data(), out1, params.type);
CUDA_CHECK(cudaPeekAtLastError());
}
}
::std::ostream& operator<<(::std::ostream& os, const GridSyncInputs& dims) { return os; }
class GridSyncTest : public ::testing::TestWithParam<GridSyncInputs> {
protected:
GridSyncTest() : out(0, stream), out1(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<GridSyncInputs>::GetParam();
size_t len = computeOutLen();
CUDA_CHECK(cudaStreamCreate(&stream));
out.resize(len, stream);
out1.resize(len, stream);
gridSyncTest(out.data(), out1.data(), params, stream);
}
size_t computeOutLen() const
{
size_t len;
if (params.type == ACROSS_ALL) {
len = 1;
} else {
len = params.gridDim.y * params.gridDim.z;
}
return len;
}
protected:
cudaStream_t stream = 0;
GridSyncInputs params;
rmm::device_uvector<int> out, out1;
};
const std::vector<GridSyncInputs> inputs = {
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 1, 1}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 2, 1}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 4}, true, ACROSS_ALL},
{{2, 1, 1}, {32, 1, 1}, false, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, false, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, false, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, false, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, false, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, false, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, false, ACROSS_X},
{{2, 1, 1}, {32, 1, 1}, true, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, true, ACROSS_X},
{{2, 2, 2}, {32, 1, 1}, true, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, true, ACROSS_X},
{{2, 2, 1}, {32, 2, 1}, true, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, true, ACROSS_X},
{{2, 1, 1}, {32, 2, 4}, true, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, true, ACROSS_X},
{{2, 2, 2}, {32, 2, 4}, true, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, true, ACROSS_X}};
TEST_P(GridSyncTest, Result)
{
size_t len = computeOutLen();
// number of blocks raft::myAtomicAdd'ing the same location
int nblks = params.type == ACROSS_X ? params.gridDim.x
: params.gridDim.x * params.gridDim.y * params.gridDim.z;
int nthreads = params.blockDim.x * params.blockDim.y * params.blockDim.z;
int expected = (nblks * nthreads) + 1;
ASSERT_TRUE(raft::devArrMatch(expected, out.data(), len, raft::Compare<int>()));
if (params.checkWorkspaceReuse) {
ASSERT_TRUE(raft::devArrMatch(expected, out1.data(), len, raft::Compare<int>()));
}
}
INSTANTIATE_TEST_CASE_P(GridSyncTests, GridSyncTest, ::testing::ValuesIn(inputs));
} // end namespace MLCommon
|
2c00776a0529e25795ed86b40018a25c3a548356.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#define N 20
#define M 3
__global__ void sum(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
void fill_matrix(int *a, int n){
int i;
for(i=0;i<n;i++){
a[i]=rand()%99;
}
}
void print_matrix(int *a, int n){
int i;
for(i=0;i<n;i++){
printf("%d ",a[i]);
}
printf("\n");
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
a = (int *)malloc(size);
fill_matrix(a, N);
b = (int *)malloc(size);
fill_matrix(b, N);
c = (int *)malloc(size);
print_matrix(a,N);
print_matrix(b,N);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum), dim3((N + M-1) / M),dim3(M), 0, 0, d_a, d_b, d_c, N);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
print_matrix(c,N);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 2c00776a0529e25795ed86b40018a25c3a548356.cu | #include<stdio.h>
#include<stdlib.h>
#define N 20
#define M 3
__global__ void sum(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
void fill_matrix(int *a, int n){
int i;
for(i=0;i<n;i++){
a[i]=rand()%99;
}
}
void print_matrix(int *a, int n){
int i;
for(i=0;i<n;i++){
printf("%d ",a[i]);
}
printf("\n");
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = (int *)malloc(size);
fill_matrix(a, N);
b = (int *)malloc(size);
fill_matrix(b, N);
c = (int *)malloc(size);
print_matrix(a,N);
print_matrix(b,N);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
sum<<<(N + M-1) / M,M>>>(d_a, d_b, d_c, N);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
print_matrix(c,N);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
5a13d9f64a8f59f2be6fd59013b93e3b6b1d5934.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/partitioning.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently
* via a single bitwise AND as: n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a device_table will belong to
based on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the
global size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a
partition number
* @param[out] row_partition_numbers Array that holds which partition each row
belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in
its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition
for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1
partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Given an array of partition numbers, computes the final output
location for each element in the output such that all rows with the same
partition are contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for
each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each
partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1
partition(num_partitions -1) offset, ...} }
*/
/* ----------------------------------------------------------------------------*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the
// corresponding partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output
* table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row
* belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in
* its partition of the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition
* for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
/* ----------------------------------------------------------------------------*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global = partition_offset_shared + num_partitions + 1;
typedef hipcub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support upto 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread
// block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread
// block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
hipLaunchKernelGGL(( copy_block_partitions), dim3(grid_size), dim3(OPTIMIZED_BLOCK_SIZE), smem, stream.value(),
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_vector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_vector<size_type> gather_map(num_rows);
copy_block_partitions_impl(sequence,
gather_map.data().get(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType, std::enable_if_t<is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(input.type(), input.size(), std::move(output));
}
template <typename DataType, std::enable_if_t<not is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
// Use gather instead for non-fixed width types
return type_dispatcher(input.type(),
detail::column_gatherer{},
input,
gather_map.begin(),
gather_map.end(),
false,
stream,
mr);
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <template <typename> class hash_function, bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_vector<size_type>(num_rows);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1
// partition(num_partitions -1) size, ...} }
auto block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
auto scanned_block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
// Holds the total number of rows in each partition
auto global_partition_sizes = rmm::device_vector<size_type>(num_partitions, size_type{0});
auto row_partition_offset = rmm::device_vector<size_type>(num_rows);
auto const device_input = table_device_view::create(table_to_hash, stream);
auto const hasher = row_hasher<hash_function, hash_has_nulls>(*device_input);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(), hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
} else {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(), hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream.value()),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data().get());
// Compute exclusive scan of size of each partition to determine offset
// location of each partition in final output.
// TODO This can be done independently on a separate stream
size_type* scanned_global_partition_sizes{global_partition_sizes.data().get()};
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream.value()),
global_partition_sizes.begin(),
global_partition_sizes.end(),
scanned_global_partition_sizes);
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
std::vector<size_type> partition_offsets(num_partitions);
CUDA_TRY(hipMemcpyAsync(partition_offsets.data(),
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
hipMemcpyDeviceToHost,
stream.value()));
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// NOTE these pointers are non-const to workaround lambda capture bug in
// gcc 5.4
auto row_partition_numbers_ptr{row_partition_numbers.data().get()};
auto row_partition_offset_ptr{row_partition_offset.data().get()};
auto block_partition_sizes_ptr{block_partition_sizes.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [=](auto const& col) {
return cudf::type_dispatcher(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
stream,
mr);
});
if (has_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, stream, mr);
}
auto output{std::make_unique<table>(std::move(output_cols))};
return std::make_pair(std::move(output), std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
hipLaunchKernelGGL(( compute_row_output_locations), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(),
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(
input, row_partition_numbers.begin(), row_partition_numbers.end(), input, false, stream, mr);
return std::make_pair(std::move(output), std::move(partition_offsets));
}
}
struct dispatch_map_type {
/**
* @brief Partitions the table `t` according to the `partition_map`.
*
* Algorithm:
* - Compute the histogram of the size each partition
* - Compute the exclusive scan of the histogram to get the offset for each
* partition in the final partitioned output
* - Use a transform iterator to materialize the scatter map of the rows from
* `t` into the final output.
*
* @note JH: It would likely be more efficient to avoid the atomic increments
* in the transform iterator. It would probably be faster to compute a
* per-thread block histogram and compute an exclusive scan of all of the
* per-block histograms (like in hash partition). But I'm purposefully trying
* to reduce memory pressure by avoiding intermediate materializations. Plus,
* atomics resolve in L2 and should be pretty fast since all the offsets will
* fit in L2.
*
*/
template <typename MapType>
std::enable_if_t<is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Build a histogram of the number of rows in each partition
rmm::device_vector<size_type> histogram(num_partitions + 1);
std::size_t temp_storage_bytes{};
std::size_t const num_levels = num_partitions + 1;
size_type const lower_level = 0;
size_type const upper_level = num_partitions;
cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data().get(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
rmm::device_buffer temp_storage(temp_storage_bytes, stream);
cub::DeviceHistogram::HistogramEven(temp_storage.data(),
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data().get(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
// `histogram` was created with an extra entry at the end such that an
// exclusive scan will put the total number of rows at the end
thrust::exclusive_scan(rmm::exec_policy()->on(stream.value()),
histogram.begin(),
histogram.end(),
histogram.begin());
// Copy offsets to host
std::vector<size_type> partition_offsets(histogram.size());
thrust::copy(histogram.begin(), histogram.end(), partition_offsets.begin());
// Unfortunately need to materialize the scatter map because
// `detail::scatter` requires multiple passes through the iterator
rmm::device_vector<MapType> scatter_map(partition_map.size());
// For each `partition_map[i]`, atomically increment the corresponding
// partition offset to determine `i`s location in the output
thrust::transform(rmm::exec_policy(stream)->on(stream.value()),
partition_map.begin<MapType>(),
partition_map.end<MapType>(),
scatter_map.begin(),
[offsets = histogram.data().get()] __device__(auto partition_number) {
return atomicAdd(&offsets[partition_number], 1);
});
// Scatter the rows into their partitions
auto scattered =
cudf::detail::scatter(t, scatter_map.begin(), scatter_map.end(), t, false, stream, mr);
return std::make_pair(std::move(scattered), std::move(partition_offsets));
}
template <typename MapType>
std::enable_if_t<not is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
CUDF_FAIL("Unexpected, non-integral partition map.");
}
};
} // namespace
namespace detail {
namespace local {
template <template <typename> class hash_function>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::make_pair(empty_like(input), std::vector<size_type>{});
}
if (has_nulls(table_to_hash)) {
return hash_partition_table<hash_function, true>(
input, table_to_hash, num_partitions, stream, mr);
} else {
return hash_partition_table<hash_function, false>(
input, table_to_hash, num_partitions, stream, mr);
}
}
} // namespace local
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(t.num_rows() == partition_map.size(),
"Size mismatch between table and partition map.");
CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map.");
if (num_partitions == 0 or t.num_rows() == 0) {
return std::make_pair(empty_like(t), std::vector<size_type>{});
}
return cudf::type_dispatcher(
partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, stream, mr);
}
} // namespace detail
// Partition based on hash values
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
switch (hash_function) {
case (hash_id::HASH_IDENTITY):
for (const size_type& column_id : columns_to_hash) {
if (!is_numeric(input.column(column_id).type()))
CUDF_FAIL("IdentityHash does not support this data type");
}
return detail::local::hash_partition<IdentityHash>(
input, columns_to_hash, num_partitions, stream, mr);
case (hash_id::HASH_MURMUR3):
return detail::local::hash_partition<MurmurHash3_32>(
input, columns_to_hash, num_partitions, stream, mr);
default: CUDF_FAIL("Unsupported hash function in hash_partition");
}
}
// Partition based on an explicit partition map
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(t, partition_map, num_partitions, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 5a13d9f64a8f59f2be6fd59013b93e3b6b1d5934.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/partitioning.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently
* via a single bitwise AND as: n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a device_table will belong to
based on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the
global size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a
partition number
* @param[out] row_partition_numbers Array that holds which partition each row
belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in
its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition
for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1
partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Given an array of partition numbers, computes the final output
location for each element in the output such that all rows with the same
partition are contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for
each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each
partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1
partition(num_partitions -1) offset, ...} }
*/
/* ----------------------------------------------------------------------------*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the
// corresponding partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output
* table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row
* belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in
* its partition of the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition
* for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
/* ----------------------------------------------------------------------------*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global = partition_offset_shared + num_partitions + 1;
typedef cub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support upto 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread
// block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread
// block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
copy_block_partitions<<<grid_size, OPTIMIZED_BLOCK_SIZE, smem, stream.value()>>>(
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_vector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_vector<size_type> gather_map(num_rows);
copy_block_partitions_impl(sequence,
gather_map.data().get(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType, std::enable_if_t<is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(input.type(), input.size(), std::move(output));
}
template <typename DataType, std::enable_if_t<not is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
// Use gather instead for non-fixed width types
return type_dispatcher(input.type(),
detail::column_gatherer{},
input,
gather_map.begin(),
gather_map.end(),
false,
stream,
mr);
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <template <typename> class hash_function, bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_vector<size_type>(num_rows);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1
// partition(num_partitions -1) size, ...} }
auto block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
auto scanned_block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
// Holds the total number of rows in each partition
auto global_partition_sizes = rmm::device_vector<size_type>(num_partitions, size_type{0});
auto row_partition_offset = rmm::device_vector<size_type>(num_rows);
auto const device_input = table_device_view::create(table_to_hash, stream);
auto const hasher = row_hasher<hash_function, hash_has_nulls>(*device_input);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
} else {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream.value()),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data().get());
// Compute exclusive scan of size of each partition to determine offset
// location of each partition in final output.
// TODO This can be done independently on a separate stream
size_type* scanned_global_partition_sizes{global_partition_sizes.data().get()};
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream.value()),
global_partition_sizes.begin(),
global_partition_sizes.end(),
scanned_global_partition_sizes);
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
std::vector<size_type> partition_offsets(num_partitions);
CUDA_TRY(cudaMemcpyAsync(partition_offsets.data(),
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
cudaMemcpyDeviceToHost,
stream.value()));
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// NOTE these pointers are non-const to workaround lambda capture bug in
// gcc 5.4
auto row_partition_numbers_ptr{row_partition_numbers.data().get()};
auto row_partition_offset_ptr{row_partition_offset.data().get()};
auto block_partition_sizes_ptr{block_partition_sizes.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [=](auto const& col) {
return cudf::type_dispatcher(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
stream,
mr);
});
if (has_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, stream, mr);
}
auto output{std::make_unique<table>(std::move(output_cols))};
return std::make_pair(std::move(output), std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
compute_row_output_locations<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(
input, row_partition_numbers.begin(), row_partition_numbers.end(), input, false, stream, mr);
return std::make_pair(std::move(output), std::move(partition_offsets));
}
}
struct dispatch_map_type {
/**
* @brief Partitions the table `t` according to the `partition_map`.
*
* Algorithm:
* - Compute the histogram of the size each partition
* - Compute the exclusive scan of the histogram to get the offset for each
* partition in the final partitioned output
* - Use a transform iterator to materialize the scatter map of the rows from
* `t` into the final output.
*
* @note JH: It would likely be more efficient to avoid the atomic increments
* in the transform iterator. It would probably be faster to compute a
* per-thread block histogram and compute an exclusive scan of all of the
* per-block histograms (like in hash partition). But I'm purposefully trying
* to reduce memory pressure by avoiding intermediate materializations. Plus,
* atomics resolve in L2 and should be pretty fast since all the offsets will
* fit in L2.
*
*/
template <typename MapType>
std::enable_if_t<is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Build a histogram of the number of rows in each partition
rmm::device_vector<size_type> histogram(num_partitions + 1);
std::size_t temp_storage_bytes{};
std::size_t const num_levels = num_partitions + 1;
size_type const lower_level = 0;
size_type const upper_level = num_partitions;
cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data().get(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
rmm::device_buffer temp_storage(temp_storage_bytes, stream);
cub::DeviceHistogram::HistogramEven(temp_storage.data(),
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data().get(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
// `histogram` was created with an extra entry at the end such that an
// exclusive scan will put the total number of rows at the end
thrust::exclusive_scan(rmm::exec_policy()->on(stream.value()),
histogram.begin(),
histogram.end(),
histogram.begin());
// Copy offsets to host
std::vector<size_type> partition_offsets(histogram.size());
thrust::copy(histogram.begin(), histogram.end(), partition_offsets.begin());
// Unfortunately need to materialize the scatter map because
// `detail::scatter` requires multiple passes through the iterator
rmm::device_vector<MapType> scatter_map(partition_map.size());
// For each `partition_map[i]`, atomically increment the corresponding
// partition offset to determine `i`s location in the output
thrust::transform(rmm::exec_policy(stream)->on(stream.value()),
partition_map.begin<MapType>(),
partition_map.end<MapType>(),
scatter_map.begin(),
[offsets = histogram.data().get()] __device__(auto partition_number) {
return atomicAdd(&offsets[partition_number], 1);
});
// Scatter the rows into their partitions
auto scattered =
cudf::detail::scatter(t, scatter_map.begin(), scatter_map.end(), t, false, stream, mr);
return std::make_pair(std::move(scattered), std::move(partition_offsets));
}
template <typename MapType>
std::enable_if_t<not is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
CUDF_FAIL("Unexpected, non-integral partition map.");
}
};
} // namespace
namespace detail {
namespace local {
template <template <typename> class hash_function>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::make_pair(empty_like(input), std::vector<size_type>{});
}
if (has_nulls(table_to_hash)) {
return hash_partition_table<hash_function, true>(
input, table_to_hash, num_partitions, stream, mr);
} else {
return hash_partition_table<hash_function, false>(
input, table_to_hash, num_partitions, stream, mr);
}
}
} // namespace local
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(t.num_rows() == partition_map.size(),
"Size mismatch between table and partition map.");
CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map.");
if (num_partitions == 0 or t.num_rows() == 0) {
return std::make_pair(empty_like(t), std::vector<size_type>{});
}
return cudf::type_dispatcher(
partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, stream, mr);
}
} // namespace detail
// Partition based on hash values
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
switch (hash_function) {
case (hash_id::HASH_IDENTITY):
for (const size_type& column_id : columns_to_hash) {
if (!is_numeric(input.column(column_id).type()))
CUDF_FAIL("IdentityHash does not support this data type");
}
return detail::local::hash_partition<IdentityHash>(
input, columns_to_hash, num_partitions, stream, mr);
case (hash_id::HASH_MURMUR3):
return detail::local::hash_partition<MurmurHash3_32>(
input, columns_to_hash, num_partitions, stream, mr);
default: CUDF_FAIL("Unsupported hash function in hash_partition");
}
}
// Partition based on an explicit partition map
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(t, partition_map, num_partitions, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
39e0e3a843fdd421919869d54846f4c2bb9e1610.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "histogram_common.h"
#include "../benchmark_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define USE_SMEM_ATOMICS 0
#if(!USE_SMEM_ATOMICS)
#define TAG_MASK ( (1U << (UINT_BITS - LOG2_WARP_SIZE)) - 1U )
inline __device__ void addByte(volatile uint *s_WarpHist, uint data, uint threadTag){
uint count;
do{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
#else
#ifdef CUDA_NO_SM12_ATOMIC_INTRINSICS
#error Compilation target does not support shared-memory atomics
#endif
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag){
atomicAdd(s_WarpHist + data, 1);
}
#endif
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag){
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount){
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for(uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for(uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x)){
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for(uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE){
uint sum = 0;
for(uint i = 0; i < WARP_COUNT; i++)
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
){
uint sum = 0;
for(uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for(uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240; // 120; //240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void){
cutilSafeCall( hipMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)) );
}
//Internal memory deallocation
extern "C" void closeHistogram256(void){
cutilSafeCall( hipFree(d_PartialHistograms) );
}
uchar *d_Data;
uint *d_Histogram;
uint byteCount = 1920 * 1080 * 4;
extern "C" void histogram256(
hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag
){
assert( byteCount % sizeof(uint) == 0 );
hipLaunchKernelGGL(( histogram256Kernel), dim3(PARTIAL_HISTOGRAM256_COUNT), dim3(HISTOGRAM256_THREADBLOCK_SIZE), 0, stream_app,
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
cutilCheckMsg("histogram256Kernel() execution failed\n");
hipLaunchKernelGGL(( mergeHistogram256Kernel), dim3(HISTOGRAM256_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, stream_app,
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
cutilCheckMsg("mergeHistogram256Kernel() execution failed\n");
}
uchar *h_Data;
uint *h_HistogramCPU, *h_HistogramGPU;
extern "C" void
initBuffer_HIST(hipStream_t stream_app) {
// allocating CPU memory
h_Data = (uchar*)malloc(byteCount);
h_HistogramCPU = (uint*)malloc(HISTOGRAM256_BIN_COUNT * sizeof(uint));
h_HistogramGPU = (uint*)malloc(HISTOGRAM256_BIN_COUNT * sizeof(uint));
// data fill-in
for(uint i = 0; i < byteCount; i++)
h_Data[i] = rand() % 256;
// allocating GPU memory
cutilSafeCall(hipMalloc((void **)&d_Data, byteCount));
cutilSafeCall(hipMalloc((void **)&d_Histogram, HISTOGRAM256_BIN_COUNT * sizeof(uint)));
cutilSafeCall(hipMemcpyAsync(d_Data, h_Data, byteCount, hipMemcpyHostToDevice, stream_app));
}
int main_histo(hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag){
initBuffer_HIST(stream_app);
initHistogram256();
if(flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
else
cutilSafeCall(cutilDeviceSynchronize());
histogram256(stream_app, mutexapp, flag);
pthread_mutex_unlock (mutexapp);
if(flag)
cutilSafeCall( hipStreamSynchronize(stream_app) );
cutilSafeCall(hipMemcpyAsync(h_HistogramGPU, d_Histogram, HISTOGRAM256_BIN_COUNT * sizeof(uint), hipMemcpyDeviceToHost, stream_app) );
if(flag)
cutilSafeCall( hipStreamSynchronize(stream_app) );
closeHistogram256();
cutilSafeCall( hipFree(d_Histogram) );
cutilSafeCall( hipFree(d_Data) );
free(h_HistogramGPU);
free(h_HistogramCPU);
free(h_Data);
printf("Execution of Histogram is successful");
return 0;
}
| 39e0e3a843fdd421919869d54846f4c2bb9e1610.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "histogram_common.h"
#include "../benchmark_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define USE_SMEM_ATOMICS 0
#if(!USE_SMEM_ATOMICS)
#define TAG_MASK ( (1U << (UINT_BITS - LOG2_WARP_SIZE)) - 1U )
inline __device__ void addByte(volatile uint *s_WarpHist, uint data, uint threadTag){
uint count;
do{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
#else
#ifdef CUDA_NO_SM12_ATOMIC_INTRINSICS
#error Compilation target does not support shared-memory atomics
#endif
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag){
atomicAdd(s_WarpHist + data, 1);
}
#endif
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag){
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount){
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for(uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for(uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x)){
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for(uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE){
uint sum = 0;
for(uint i = 0; i < WARP_COUNT; i++)
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
){
uint sum = 0;
for(uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for(uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240; // 120; //240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void){
cutilSafeCall( cudaMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)) );
}
//Internal memory deallocation
extern "C" void closeHistogram256(void){
cutilSafeCall( cudaFree(d_PartialHistograms) );
}
uchar *d_Data;
uint *d_Histogram;
uint byteCount = 1920 * 1080 * 4;
extern "C" void histogram256(
cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag
){
assert( byteCount % sizeof(uint) == 0 );
histogram256Kernel<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE, 0, stream_app>>>(
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
cutilCheckMsg("histogram256Kernel() execution failed\n");
mergeHistogram256Kernel<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE, 0, stream_app>>>(
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
cutilCheckMsg("mergeHistogram256Kernel() execution failed\n");
}
uchar *h_Data;
uint *h_HistogramCPU, *h_HistogramGPU;
extern "C" void
initBuffer_HIST(cudaStream_t stream_app) {
// allocating CPU memory
h_Data = (uchar*)malloc(byteCount);
h_HistogramCPU = (uint*)malloc(HISTOGRAM256_BIN_COUNT * sizeof(uint));
h_HistogramGPU = (uint*)malloc(HISTOGRAM256_BIN_COUNT * sizeof(uint));
// data fill-in
for(uint i = 0; i < byteCount; i++)
h_Data[i] = rand() % 256;
// allocating GPU memory
cutilSafeCall(cudaMalloc((void **)&d_Data, byteCount));
cutilSafeCall(cudaMalloc((void **)&d_Histogram, HISTOGRAM256_BIN_COUNT * sizeof(uint)));
cutilSafeCall(cudaMemcpyAsync(d_Data, h_Data, byteCount, cudaMemcpyHostToDevice, stream_app));
}
int main_histo(cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag){
initBuffer_HIST(stream_app);
initHistogram256();
if(flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
else
cutilSafeCall(cutilDeviceSynchronize());
histogram256(stream_app, mutexapp, flag);
pthread_mutex_unlock (mutexapp);
if(flag)
cutilSafeCall( cudaStreamSynchronize(stream_app) );
cutilSafeCall(cudaMemcpyAsync(h_HistogramGPU, d_Histogram, HISTOGRAM256_BIN_COUNT * sizeof(uint), cudaMemcpyDeviceToHost, stream_app) );
if(flag)
cutilSafeCall( cudaStreamSynchronize(stream_app) );
closeHistogram256();
cutilSafeCall( cudaFree(d_Histogram) );
cutilSafeCall( cudaFree(d_Data) );
free(h_HistogramGPU);
free(h_HistogramCPU);
free(h_Data);
printf("Execution of Histogram is successful");
return 0;
}
|
011c2f0ff51ef4984c5a8a3dd457cbc49854874a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_lapack.h>
#include <algorithm>
#ifdef AMGX_USE_MAGMA
#define ADD_ 1
#define HAVE_CUBLAS 1
#include <magma.h>
#endif
#include <amgx_cublas.h>
namespace amgx
{
#define lapackCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Lapack error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Lapack error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
#define magmaCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Magma error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Magma error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_lapack_enabled()
{
#ifndef AMGX_USE_LAPACK
FatalError("Error: LAPACK not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on host.", AMGX_ERR_CONFIGURATION);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on device.", AMGX_ERR_CONFIGURATION);
}
namespace
{
#ifdef AMGX_USE_LAPACK
struct _fcomplex { float re, im; };
typedef struct _fcomplex fcomplex;
struct _dcomplex { double re, im; };
typedef struct _dcomplex dcomplex;
extern "C"
int dgeev_(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info);
extern "C"
int sgeev_(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info);
extern "C"
int cgeev_(char *jobvl, char *jobvr, int *n, fcomplex *a,
int *lda, fcomplex *wr, fcomplex *wi, fcomplex *vl,
int *ldvl, fcomplex *vr, int *ldvr, fcomplex *work,
int *lwork, int *info);
extern "C"
int zgeev_(char *jobvl, char *jobvr, int *n, dcomplex *a,
int *lda, dcomplex *wr, dcomplex *wi, dcomplex *vl,
int *ldvl, dcomplex *vr, int *ldvr, dcomplex *work,
int *lwork, int *info);
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info)
{
return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info)
{
return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, hipComplex *a,
int *lda, hipComplex *wr, hipComplex *wi, hipComplex *vl,
int *ldvl, hipComplex *vr, int *ldvr, hipComplex *work,
int *lwork, int *info)
{
return cgeev_(jobvl, jobvr, n,
reinterpret_cast<fcomplex *>(a),
lda,
reinterpret_cast<fcomplex *>(wr),
reinterpret_cast<fcomplex *>(wi),
reinterpret_cast<fcomplex *>(vl),
ldvl,
reinterpret_cast<fcomplex *>(vr),
ldvr,
reinterpret_cast<fcomplex *>(work),
lwork,
info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, hipDoubleComplex *a,
int *lda, hipDoubleComplex *wr, hipDoubleComplex *wi, hipDoubleComplex *vl,
int *ldvl, hipDoubleComplex *vr, int *ldvr, hipDoubleComplex *work,
int *lwork, int *info)
{
return zgeev_(jobvl, jobvr, n,
reinterpret_cast<dcomplex *>(a),
lda,
reinterpret_cast<dcomplex *>(wr),
reinterpret_cast<dcomplex *>(wi),
reinterpret_cast<dcomplex *>(vl),
ldvl,
reinterpret_cast<dcomplex *>(vr),
ldvr,
reinterpret_cast<dcomplex *>(work),
lwork,
info);
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, int dim, int lda)
{
char job = 'N';
T *WI = new T[dim];
int ldv = 1;
T *vl = 0;
int work_size = 6 * dim;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&job, &job, &dim, A, &lda, eigenvalues, WI, vl, &ldv,
vl, &ldv, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
T *WI = new T[dim * dim];
int work_size = 6 * dim;
T *vl = 0;
int ldvl = 1;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues, WI, vl, &ldvl,
eigenvectors, &ldvr, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
#endif
} // end anonymous namespace
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A,
Vector<TConfig> &eigenvalues,
Vector<TConfig> &eigenvector)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), eigenvector.raw(), dim, lda, eigenvector.get_lda());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dtrtri_(char *uplo, char *diag, int *n, double *
a, int *lda, int *info);
extern "C"
int strtri_(char *uplo, char *diag, int *n, float *
a, int *lda, int *info);
extern "C"
int ctrtri_(char *uplo, char *diag, int *n, fcomplex *
a, int *lda, int *info);
extern "C"
int ztrtri_(char *uplo, char *diag, int *n, dcomplex *
a, int *lda, int *info);
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, float *a,
int *lda, int *info)
{
return strtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, double *a,
int *lda, int *info)
{
return dtrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, fcomplex *a,
int *lda, int *info)
{
return ctrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, dcomplex *a,
int *lda, int *info)
{
return ztrtri_(uplo, diag, n, a, lda, info);
}
template <typename T>
void lapack_trtri(T *A, int dim, int lda)
{
char uplo = 'U';
char diag = 'N';
int info;
lapack_trtri_dispatch(&uplo, &diag, &dim, A, &lda, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_trtri(A.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, float *a,
int lda, int *info)
{
return magma_strtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, double *a,
int lda, int *info)
{
return magma_dtrtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, hipComplex *a,
int lda, int *info)
{
return magma_ctrtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, hipDoubleComplex *a,
int lda, int *info)
{
return magma_ztrtri_gpu(uplo, diag, n, a, lda, info);
}
template <typename T>
void magma_trtri(T *A, int dim, int lda)
{
magma_uplo_t uplo = MagmaUpper;
magma_diag_t diag = MagmaNonUnit;
int info;
magma_trtri_dispatch(uplo, diag, dim, A, lda, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_trtri(A.raw(), dim, lda);;
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dsygv_(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info);
extern "C"
int ssygv_(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info);
extern "C"
int chegv_(int *itype, char *jobz, char *uplo, int *n,
fcomplex *a, int *lda, fcomplex *b, int *ldb,
fcomplex *w, fcomplex *work, int *lwork, int *info);
extern "C"
int zhegv_(int *itype, char *jobz, char *uplo, int *n,
dcomplex *a, int *lda, dcomplex *b, int *ldb,
dcomplex *w, dcomplex *work, int *lwork, int *info);
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info)
{
return dsygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info)
{
return ssygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
hipComplex *a, int *lda, hipComplex *b, int *ldb,
hipComplex *w, hipComplex *work, int *lwork, int *info)
{
return chegv_(itype, jobz, uplo, n,
reinterpret_cast<fcomplex *>(a), lda, reinterpret_cast<fcomplex *>(b), ldb,
reinterpret_cast<fcomplex *>(w), reinterpret_cast<fcomplex *>(work), lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
hipComplex *a, int *lda, hipDoubleComplex *b, int *ldb,
hipDoubleComplex *w, hipDoubleComplex *work, int *lwork, int *info)
{
return zhegv_(itype, jobz, uplo, n,
reinterpret_cast<dcomplex *>(a), lda, reinterpret_cast<dcomplex *>(b), ldb,
reinterpret_cast<dcomplex *>(w), reinterpret_cast<dcomplex *>(work), lwork, info);
}
template <typename T>
void lapack_sygv(T *gramA, T *gramB, T *eigenvector, int dim, int lda, T *work)
{
int itype = 1;
char jobz = 'V';
char uplo = 'U';
int ldb = lda;
int lwork = 1024;
int info = 0;
lapack_sygv_dispatch(&itype, &jobz, &uplo, &dim, gramA, &lda, gramB, &ldb, eigenvector, work, &lwork, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig> &eigenvalues, Vector<TConfig> &work)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_sygv(A.raw(), B.raw(), eigenvalues.raw(), dim, lda, work.raw());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, float *A, int lda, int *info)
{
return magma_spotrf_gpu(uplo, n, A, lda, info);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, double *A, int lda, int *info)
{
return magma_dpotrf_gpu(uplo, n, A, lda, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, float *da,
int ldda, float *B, int lddb, int *info)
{
return magma_ssygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, double *da,
int ldda, double *B, int lddb, int *info)
{
return magma_dsygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, double *da, int ldda,
double *w, double *wa, int ldwa, double *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_dsyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, float *da, int ldda,
float *w, float *wa, int ldwa, float *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_ssyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
// This is a simple modification of the magma_?sygvd() source code
// from magma where the matrices are already on the device.
template <typename T>
magma_int_t magma_sygvd_gpu_impl(magma_int_t itype, magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n,
T *da, magma_int_t lda, T *db, magma_int_t ldb, T *w,
T *work, magma_int_t lwork, magma_int_t *iwork, magma_int_t liwork,
T *wa, magma_int_t *info)
{
magma_uplo_t uplo_[2] = {uplo, MagmaLower}; // {uplo, 0}
magma_vec_t jobz_[2] = {jobz, MagmaVec};//{jobz, 0};
T d_one = MAGMA_D_ONE;
magma_int_t ldda = n;
magma_int_t lddb = n;
static magma_int_t lower;
static char trans[1];
static magma_int_t wantz, lquery;
static magma_int_t lopt, lwmin, liopt, liwmin;
static hipStream_t stream;
magma_queue_create( &stream );
wantz = jobz_[0] == MagmaVec;
lower = uplo_[0] == MagmaLower;
lquery = lwork == -1 || liwork == -1;
*info = 0;
if (itype < 1 || itype > 3)
{
*info = -1;
}
else if (! (wantz || jobz_[0] == MagmaNoVec))
{
*info = -2;
}
else if (! (lower || uplo_[0] == MagmaUpper))
{
*info = -3;
}
else if (n < 0)
{
*info = -4;
}
else if (lda < max(1, n))
{
*info = -6;
}
else if (ldb < max(1, n))
{
*info = -8;
}
magma_int_t nb = magma_get_dsytrd_nb(n);
if (n < 1)
{
liwmin = 1;
lwmin = 1;
}
else if (wantz)
{
lwmin = 1 + 6 * n * nb + 2 * n * n;
liwmin = 5 * n + 3;
}
else
{
lwmin = 2 * n * nb + 1;
liwmin = 1;
}
lopt = lwmin;
liopt = liwmin;
work[ 0] = lopt;
iwork[0] = liopt;
if (lwork < lwmin && ! lquery)
{
*info = -11;
}
else if (liwork < liwmin && ! lquery)
{
*info = -13;
}
if (*info != 0)
{
magma_xerbla( __func__, -(*info) );
return MAGMA_ERR_ILLEGAL_VALUE;
}
else if (lquery)
{
return MAGMA_SUCCESS;
}
/* Quick return if possible */
if (n == 0)
{
return 0;
}
magma_potrf_gpu_dispatch(uplo_[0], n, db, lddb, info);
if (*info != 0)
{
*info = n + *info;
return 0;
}
/* Transform problem to standard eigenvalue problem and solve. */
magma_sygst_gpu_dispatch(itype, uplo_[0], n, da, ldda, db, lddb, info);
magma_syevd_gpu_dispatch(jobz_[0], uplo_[0], n, da, ldda, w, wa, lda,
work, lwork, iwork, liwork, info);
lopt = max( lopt, (magma_int_t) work[0]);
liopt = max(liopt, iwork[0]);
if (wantz && *info == 0)
{
/* Backtransform eigenvectors to the original problem. */
if (itype == 1 || itype == 2)
{
/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x;
backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
if (lower)
{
*(unsigned char *)trans = MagmaTrans;
}
else
{
*(unsigned char *)trans = MagmaNoTrans;
}
magma_trsm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
else if (itype == 3)
{
/* For B*A*x=(lambda)*x;
backtransform eigenvectors: x = L*y or U'*y */
if (lower)
{
*(unsigned char *)trans = MagmaNoTrans;
}
else
{
*(unsigned char *)trans = MagmaTrans;
}
magma_trmm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
}
magma_queue_sync( stream );
magma_queue_destroy( stream );
work[0] = (T) lopt;
iwork[0] = liopt;
return MAGMA_SUCCESS;
}
hipblasStatus_t cublas_trsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb)
{
return hipblasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
hipblasStatus_t cublas_trsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb)
{
return hipblasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
template <typename T>
void magma_sygvd_gpu(T *A, T *B, T *eigenvalues, int dim, int lda)
{
int itype = 1;
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int ldb = lda;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
/*
magma_sygvd_gpu_impl(itype, jobz, uplo, N, A, lda, B, ldb, eigenvalues, work, lwork, iwork, liwork, wa, &info);
*/
magma_potrf_gpu_dispatch(uplo, N, B, lda, &info);
magmaCheckError(info);
magma_sygst_gpu_dispatch(itype, uplo, N, A, lda, B, ldb, &info);
magmaCheckError(info);
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
T one = 1;
hipblasHandle_t handle = Cublas::get_handle();
cublas_trsm(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, N, N, &one, B, ldb, A, lda);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig_h> &eigenvalues, Vector<TConfig> &work)
{
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_sygvd_gpu(A.raw(), B.raw(), eigenvalues.raw(), dim, lda);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
template <typename T>
void magma_syevd_gpu(T *A, T *eigenvalues, int dim, int lda)
{
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig_h> &eigenvalues)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = ::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_syevd_gpu(A.raw(), eigenvalues.raw(), dim, lda);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_stedx_dispatch(magma_range_t range, int n,
double vl, double vu,
int il, int iu,
double *d, double *e, double *z, int ldz,
double *work, int lwork, int *iwork, int liwork,
double *dwork, int *info)
{
return magma_dstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
int magma_stedx_dispatch(magma_range_t range, int n,
float vl, float vu,
int il, int iu,
float *d, float *e, float *z, int ldz,
float *work, int lwork, int *iwork, int liwork,
float *dwork, int *info)
{
return magma_sstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
template <typename T>
void magma_stedx(T *diagonal, T *subdiagonal, T *eigenvectors,
int lower, int upper, int dim, int ldz, T *dwork, int dwork_size)
{
magma_range_t range = MagmaRangeI;
int N = dim;
T vl = 0;
T vu = 0;
int il = lower;
int iu = upper;
int lwork = 1 + 4 * N + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
int liwork = 3 + 6 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int info;
magma_stedx_dispatch(range, N, vl, vu, il, iu, diagonal, subdiagonal, eigenvectors, ldz,
&s_work[0], lwork, &s_iwork[0], liwork, dwork, &info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::stedx(Vector<TConfig> &diagonal,
Vector<TConfig> &subdiagonal,
Vector<TConfig> &eigenvectors,
int dim,
Vector<TConfig_d> &dwork)
{
check_magma_enabled();
#ifdef AMGX_USE_MAGMA
magma_stedx(diagonal.raw(), subdiagonal.raw(), eigenvectors.raw(),
dim, dim, dim, eigenvectors.get_lda(),
dwork.raw(), dwork.size());
#endif
}
namespace
{
template <typename T>
void larf(int m, int n, T *v,
int incv, T *tau, T *c, int ldc,
T *work)
{
/* Table of constant values */
static T c_b4 = 1.;
static T c_b5 = 0.;
static int c1 = 1;
/* Form H * C */
/* w := C' * v */
Cublas::gemv(true, m, n, &c_b4, c, ldc,
v, incv, &c_b5, work, c1);
/* C := C - v * w' */
Cublas::ger(m, n, tau, v, incv, work, c1, c, ldc);
}
template <typename T>
__global__
void set1(T *a)
{
*a = 1.;
}
template <typename T>
__global__
void add_tau(T *a, T tau)
{
*a = 1 + tau;
}
template <typename T>
void gpu_orgqr(int m, int n, int k,
T *a, int lda, T *tau, T *work, int lwork)
{
int i1, i2;
for (int i = k - 1; i >= 0; --i)
{
/* Apply H(i) to A(i:m,i:n) from the left */
if (i < n - 1)
{
hipLaunchKernelGGL(( set1) , dim3(1), dim3(1), 0, 0, &a[i + i * lda]);
i1 = m - i;
i2 = n - i - 1;
larf(i1, i2, &a[i + i * lda], 1, &tau[i],
&a[i + (i + 1) * lda], lda, work);
}
if (i < m - 1)
{
i1 = m - i - 1;
Cublas::scal(i1, &tau[i], &a[i + 1 + i * lda], 1);
}
hipLaunchKernelGGL(( add_tau) , dim3(1), dim3(1), 0, 0, &a[i + i * lda], tau[i]);
/* Set A(1:i-1,i) to zero */
hipMemset(&a[i * lda], 0, sizeof(T) * i);
}
cudaCheckError();
}
template <typename T>
__device__ __host__
T lapy2_(T *a, T *b)
{
T va = *a;
T vb = *b;
return sqrt(va * va + vb * vb);
}
template <typename T>
__device__ __host__
T d_sign(T a, T b)
{
T x;
x = (a >= 0 ? a : - a);
return (b >= 0 ? x : -x);
}
template <typename T>
void compute_tau_host(T *alpha, T *norm,
T *tau, T *d1)
{
*d1 = lapy2_(alpha, norm);
T beta = -d_sign(*d1, *alpha);
// LAPACK: skipped part about scaling.
// Negated compared to LAPACK code, avoid negating value on device later.
*tau = -(beta - *alpha) / beta;
*d1 = 1. / (*alpha - beta);
*alpha = beta;
}
template <typename T>
void larfg(int n, T *alpha, T *x,
int incx, T *tau)
{
if (n <= 1)
{
*tau = 0.;
return;
}
int i1 = n - 1;
T xnorm;
Cublas::nrm2(i1, x, incx, &xnorm);
T h_alpha;
hipMemcpy(&h_alpha, alpha, sizeof(T), hipMemcpyDeviceToHost);
T d1;
compute_tau_host(&h_alpha, &xnorm, tau, &d1);
Cublas::scal(i1, d1, x, incx);
// Update the diagonal value on the device.
hipMemcpy(alpha, &h_alpha, sizeof(T), hipMemcpyHostToDevice);
}
template <typename T>
void gpu_geqrf(int m, int n, T *a, int lda,
T *tau, T *work)
{
int k = ::min(m, n);
T *aii;
hipMalloc(&aii, sizeof(T));
for (int i = 0; i < k; ++i)
{
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
int i2 = m - i;
/* Computing MIN */
int i3 = i + 1;
larfg(i2, &a[i + i * lda],
&a[::min(i3, m - 1) + i * lda],
1, &tau[i]);
if (i < n - 1)
{
/* Apply H(i) to A(i:m,i+1:n) from the left */
hipMemcpy(aii, &a[i + i * lda], sizeof(T), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( set1) , dim3(1), dim3(1), 0, 0, &a[i + i * lda]);
cudaCheckError();
i2 = m - i;
i3 = n - i - 1;
larf(i2, i3, &a[i + i * lda], 1,
&tau[i], &a[i + (i + 1) * lda], lda, work);
hipMemcpy(&a[i + i * lda], aii, sizeof(T), hipMemcpyDeviceToDevice);
}
}
hipFree(aii);
}
} // end anonymous namespace
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_geqrf_dispatch(int m, int n, float *A, int lda,
float *tau, float *work, int *info)
{
return magma_sgeqrf_gpu(m, n, A, lda, tau, work, info);
}
int magma_geqrf_dispatch(int m, int n, double *A, int lda,
double *tau, double *work, int *info)
{
return magma_dgeqrf_gpu(m, n, A, lda, tau, work, info);
}
template <typename T>
void magma_geqrf(int m, int n, T *A, int lda,
T *tau, T *work)
{
int info;
magma_geqrf_dispatch(m, n, A, lda, tau, work, &info);
magmaCheckError(info);
}
int magma_orgqr_dispatch(int m, int n, int k, float *A, int lda,
float *tau, float *work, int lwork, int *info)
{
return magma_sorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
int magma_orgqr_dispatch(int m, int n, int k, double *A, int lda,
double *tau, double *work, int lwork, int *info)
{
return magma_dorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
template <typename T>
void magma_orgqr(int m, int n, int k, T *A, int lda,
T *tau, T *work, int lwork)
{
int info;
magma_orgqr_dispatch(m, n, k, A, lda, tau, work, lwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#else
gpu_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#else
gpu_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#endif
}
#define AMGX_CASE_LINE(CASE) \
template class Lapack<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
| 011c2f0ff51ef4984c5a8a3dd457cbc49854874a.cu | /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amgx_lapack.h>
#include <algorithm>
#ifdef AMGX_USE_MAGMA
#define ADD_ 1
#define HAVE_CUBLAS 1
#include <magma.h>
#endif
#include <amgx_cublas.h>
namespace amgx
{
#define lapackCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Lapack error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Lapack error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
#define magmaCheckError(status) \
{ \
if (status < 0) \
{ \
std::stringstream ss; \
ss << "Magma error: argument number " \
<< -status << " had an illegal value."; \
FatalError(ss.str(), AMGX_ERR_INTERNAL); \
} \
else if (status > 0) \
FatalError("Magma error: internal error.", \
AMGX_ERR_INTERNAL); \
} \
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_lapack_enabled()
{
#ifndef AMGX_USE_LAPACK
FatalError("Error: LAPACK not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::check_magma_enabled()
{
#ifndef AMGX_USE_MAGMA
FatalError("Error: MAGMA not enabled.", AMGX_ERR_CONFIGURATION);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on host.", AMGX_ERR_CONFIGURATION);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::not_implemented()
{
FatalError("Error: LAPACK operation not implemented on device.", AMGX_ERR_CONFIGURATION);
}
namespace
{
#ifdef AMGX_USE_LAPACK
struct _fcomplex { float re, im; };
typedef struct _fcomplex fcomplex;
struct _dcomplex { double re, im; };
typedef struct _dcomplex dcomplex;
extern "C"
int dgeev_(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info);
extern "C"
int sgeev_(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info);
extern "C"
int cgeev_(char *jobvl, char *jobvr, int *n, fcomplex *a,
int *lda, fcomplex *wr, fcomplex *wi, fcomplex *vl,
int *ldvl, fcomplex *vr, int *ldvr, fcomplex *work,
int *lwork, int *info);
extern "C"
int zgeev_(char *jobvl, char *jobvr, int *n, dcomplex *a,
int *lda, dcomplex *wr, dcomplex *wi, dcomplex *vl,
int *ldvl, dcomplex *vr, int *ldvr, dcomplex *work,
int *lwork, int *info);
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, double *a,
int *lda, double *wr, double *wi, double *vl,
int *ldvl, double *vr, int *ldvr, double *work,
int *lwork, int *info)
{
return dgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, float *a,
int *lda, float *wr, float *wi, float *vl,
int *ldvl, float *vr, int *ldvr, float *work,
int *lwork, int *info)
{
return sgeev_(jobvl, jobvr, n, a, lda, wr, wi, vl, ldvl, vr, ldvr, work, lwork, info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, cuComplex *a,
int *lda, cuComplex *wr, cuComplex *wi, cuComplex *vl,
int *ldvl, cuComplex *vr, int *ldvr, cuComplex *work,
int *lwork, int *info)
{
return cgeev_(jobvl, jobvr, n,
reinterpret_cast<fcomplex *>(a),
lda,
reinterpret_cast<fcomplex *>(wr),
reinterpret_cast<fcomplex *>(wi),
reinterpret_cast<fcomplex *>(vl),
ldvl,
reinterpret_cast<fcomplex *>(vr),
ldvr,
reinterpret_cast<fcomplex *>(work),
lwork,
info);
}
int lapack_geev_dispatch(char *jobvl, char *jobvr, int *n, cuDoubleComplex *a,
int *lda, cuDoubleComplex *wr, cuDoubleComplex *wi, cuDoubleComplex *vl,
int *ldvl, cuDoubleComplex *vr, int *ldvr, cuDoubleComplex *work,
int *lwork, int *info)
{
return zgeev_(jobvl, jobvr, n,
reinterpret_cast<dcomplex *>(a),
lda,
reinterpret_cast<dcomplex *>(wr),
reinterpret_cast<dcomplex *>(wi),
reinterpret_cast<dcomplex *>(vl),
ldvl,
reinterpret_cast<dcomplex *>(vr),
ldvr,
reinterpret_cast<dcomplex *>(work),
lwork,
info);
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, int dim, int lda)
{
char job = 'N';
T *WI = new T[dim];
int ldv = 1;
T *vl = 0;
int work_size = 6 * dim;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&job, &job, &dim, A, &lda, eigenvalues, WI, vl, &ldv,
vl, &ldv, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
template <typename T>
void lapack_geev(T *A, T *eigenvalues, T *eigenvectors, int dim, int lda, int ldvr)
{
char jobvl = 'N';
char jobvr = 'V';
T *WI = new T[dim * dim];
int work_size = 6 * dim;
T *vl = 0;
int ldvl = 1;
T *work = new T[work_size];
int info;
lapack_geev_dispatch(&jobvl, &jobvr, &dim, A, &lda, eigenvalues, WI, vl, &ldvl,
eigenvectors, &ldvr, work, &work_size, &info);
lapackCheckError(info);
delete [] WI;
delete [] work;
}
#endif
} // end anonymous namespace
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A,
Vector<TConfig> &eigenvalues,
Vector<TConfig> &eigenvector)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
// It is possible the matrix has an extra row (e.g. Arnoldi).
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
value_type *A_ptr = const_cast<value_type *>(A.raw());
#ifdef AMGX_USE_LAPACK
lapack_geev(A_ptr, eigenvalues.raw(), eigenvector.raw(), dim, lda, eigenvector.get_lda());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geev(const Vector<TConfig> &A, Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dtrtri_(char *uplo, char *diag, int *n, double *
a, int *lda, int *info);
extern "C"
int strtri_(char *uplo, char *diag, int *n, float *
a, int *lda, int *info);
extern "C"
int ctrtri_(char *uplo, char *diag, int *n, fcomplex *
a, int *lda, int *info);
extern "C"
int ztrtri_(char *uplo, char *diag, int *n, dcomplex *
a, int *lda, int *info);
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, float *a,
int *lda, int *info)
{
return strtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, double *a,
int *lda, int *info)
{
return dtrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, fcomplex *a,
int *lda, int *info)
{
return ctrtri_(uplo, diag, n, a, lda, info);
}
int lapack_trtri_dispatch(char *uplo, char *diag, int *n, dcomplex *a,
int *lda, int *info)
{
return ztrtri_(uplo, diag, n, a, lda, info);
}
template <typename T>
void lapack_trtri(T *A, int dim, int lda)
{
char uplo = 'U';
char diag = 'N';
int info;
lapack_trtri_dispatch(&uplo, &diag, &dim, A, &lda, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_trtri(A.raw(), dim, lda);
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, float *a,
int lda, int *info)
{
return magma_strtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, double *a,
int lda, int *info)
{
return magma_dtrtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, cuComplex *a,
int lda, int *info)
{
return magma_ctrtri_gpu(uplo, diag, n, a, lda, info);
}
int magma_trtri_dispatch(magma_uplo_t uplo, magma_diag_t diag, int n, cuDoubleComplex *a,
int lda, int *info)
{
return magma_ztrtri_gpu(uplo, diag, n, a, lda, info);
}
template <typename T>
void magma_trtri(T *A, int dim, int lda)
{
magma_uplo_t uplo = MagmaUpper;
magma_diag_t diag = MagmaNonUnit;
int info;
magma_trtri_dispatch(uplo, diag, dim, A, lda, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::trtri(Vector<TConfig> &A)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_trtri(A.raw(), dim, lda);;
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_LAPACK
extern "C"
int dsygv_(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info);
extern "C"
int ssygv_(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info);
extern "C"
int chegv_(int *itype, char *jobz, char *uplo, int *n,
fcomplex *a, int *lda, fcomplex *b, int *ldb,
fcomplex *w, fcomplex *work, int *lwork, int *info);
extern "C"
int zhegv_(int *itype, char *jobz, char *uplo, int *n,
dcomplex *a, int *lda, dcomplex *b, int *ldb,
dcomplex *w, dcomplex *work, int *lwork, int *info);
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
double *a, int *lda, double *b, int *ldb,
double *w, double *work, int *lwork, int *info)
{
return dsygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
float *a, int *lda, float *b, int *ldb,
float *w, float *work, int *lwork, int *info)
{
return ssygv_(itype, jobz, uplo, n, a, lda, b, ldb, w, work, lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
cuComplex *a, int *lda, cuComplex *b, int *ldb,
cuComplex *w, cuComplex *work, int *lwork, int *info)
{
return chegv_(itype, jobz, uplo, n,
reinterpret_cast<fcomplex *>(a), lda, reinterpret_cast<fcomplex *>(b), ldb,
reinterpret_cast<fcomplex *>(w), reinterpret_cast<fcomplex *>(work), lwork, info);
}
int lapack_sygv_dispatch(int *itype, char *jobz, char *uplo, int *n,
cuComplex *a, int *lda, cuDoubleComplex *b, int *ldb,
cuDoubleComplex *w, cuDoubleComplex *work, int *lwork, int *info)
{
return zhegv_(itype, jobz, uplo, n,
reinterpret_cast<dcomplex *>(a), lda, reinterpret_cast<dcomplex *>(b), ldb,
reinterpret_cast<dcomplex *>(w), reinterpret_cast<dcomplex *>(work), lwork, info);
}
template <typename T>
void lapack_sygv(T *gramA, T *gramB, T *eigenvector, int dim, int lda, T *work)
{
int itype = 1;
char jobz = 'V';
char uplo = 'U';
int ldb = lda;
int lwork = 1024;
int info = 0;
lapack_sygv_dispatch(&itype, &jobz, &uplo, &dim, gramA, &lda, gramB, &ldb, eigenvector, work, &lwork, &info);
lapackCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig> &eigenvalues, Vector<TConfig> &work)
{
check_lapack_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_LAPACK
lapack_sygv(A.raw(), B.raw(), eigenvalues.raw(), dim, lda, work.raw());
#else
FatalError("Lapack is not supported in this build", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trsm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrsm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
float alpha, float const *dA, magma_int_t lda,
float *dB, magma_int_t ldb)
{
return magma_strmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
void magma_trmm_dispatch(magma_side_t side, magma_uplo_t uplo, magma_trans_t trans,
magma_diag_t diag, magma_int_t m, magma_int_t n,
double alpha, double const *dA, magma_int_t lda,
double *dB, magma_int_t ldb)
{
return magma_dtrmm(side, uplo, trans, diag, m, n, alpha, dA, lda, dB, ldb);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, float *A, int lda, int *info)
{
return magma_spotrf_gpu(uplo, n, A, lda, info);
}
int magma_potrf_gpu_dispatch(magma_uplo_t uplo, int n, double *A, int lda, int *info)
{
return magma_dpotrf_gpu(uplo, n, A, lda, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, float *da,
int ldda, float *B, int lddb, int *info)
{
return magma_ssygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_sygst_gpu_dispatch(int itype, magma_uplo_t uplo, magma_int_t n, double *da,
int ldda, double *B, int lddb, int *info)
{
return magma_dsygst_gpu(itype, uplo, n, da, ldda, B, lddb, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, double *da, int ldda,
double *w, double *wa, int ldwa, double *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_dsyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
int magma_syevd_gpu_dispatch(magma_vec_t jobz, magma_uplo_t uplo, int n, float *da, int ldda,
float *w, float *wa, int ldwa, float *work,
int lwork, int *iwork, int liwork, int *info)
{
return magma_ssyevd_gpu(jobz, uplo, n, da, ldda, w, wa, ldwa, work, lwork, iwork, liwork, info);
}
// This is a simple modification of the magma_?sygvd() source code
// from magma where the matrices are already on the device.
template <typename T>
magma_int_t magma_sygvd_gpu_impl(magma_int_t itype, magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n,
T *da, magma_int_t lda, T *db, magma_int_t ldb, T *w,
T *work, magma_int_t lwork, magma_int_t *iwork, magma_int_t liwork,
T *wa, magma_int_t *info)
{
magma_uplo_t uplo_[2] = {uplo, MagmaLower}; // {uplo, 0}
magma_vec_t jobz_[2] = {jobz, MagmaVec};//{jobz, 0};
T d_one = MAGMA_D_ONE;
magma_int_t ldda = n;
magma_int_t lddb = n;
static magma_int_t lower;
static char trans[1];
static magma_int_t wantz, lquery;
static magma_int_t lopt, lwmin, liopt, liwmin;
static cudaStream_t stream;
magma_queue_create( &stream );
wantz = jobz_[0] == MagmaVec;
lower = uplo_[0] == MagmaLower;
lquery = lwork == -1 || liwork == -1;
*info = 0;
if (itype < 1 || itype > 3)
{
*info = -1;
}
else if (! (wantz || jobz_[0] == MagmaNoVec))
{
*info = -2;
}
else if (! (lower || uplo_[0] == MagmaUpper))
{
*info = -3;
}
else if (n < 0)
{
*info = -4;
}
else if (lda < max(1, n))
{
*info = -6;
}
else if (ldb < max(1, n))
{
*info = -8;
}
magma_int_t nb = magma_get_dsytrd_nb(n);
if (n < 1)
{
liwmin = 1;
lwmin = 1;
}
else if (wantz)
{
lwmin = 1 + 6 * n * nb + 2 * n * n;
liwmin = 5 * n + 3;
}
else
{
lwmin = 2 * n * nb + 1;
liwmin = 1;
}
lopt = lwmin;
liopt = liwmin;
work[ 0] = lopt;
iwork[0] = liopt;
if (lwork < lwmin && ! lquery)
{
*info = -11;
}
else if (liwork < liwmin && ! lquery)
{
*info = -13;
}
if (*info != 0)
{
magma_xerbla( __func__, -(*info) );
return MAGMA_ERR_ILLEGAL_VALUE;
}
else if (lquery)
{
return MAGMA_SUCCESS;
}
/* Quick return if possible */
if (n == 0)
{
return 0;
}
magma_potrf_gpu_dispatch(uplo_[0], n, db, lddb, info);
if (*info != 0)
{
*info = n + *info;
return 0;
}
/* Transform problem to standard eigenvalue problem and solve. */
magma_sygst_gpu_dispatch(itype, uplo_[0], n, da, ldda, db, lddb, info);
magma_syevd_gpu_dispatch(jobz_[0], uplo_[0], n, da, ldda, w, wa, lda,
work, lwork, iwork, liwork, info);
lopt = max( lopt, (magma_int_t) work[0]);
liopt = max(liopt, iwork[0]);
if (wantz && *info == 0)
{
/* Backtransform eigenvectors to the original problem. */
if (itype == 1 || itype == 2)
{
/* For A*x=(lambda)*B*x and A*B*x=(lambda)*x;
backtransform eigenvectors: x = inv(L)'*y or inv(U)*y */
if (lower)
{
*(unsigned char *)trans = MagmaTrans;
}
else
{
*(unsigned char *)trans = MagmaNoTrans;
}
magma_trsm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
else if (itype == 3)
{
/* For B*A*x=(lambda)*x;
backtransform eigenvectors: x = L*y or U'*y */
if (lower)
{
*(unsigned char *)trans = MagmaNoTrans;
}
else
{
*(unsigned char *)trans = MagmaTrans;
}
magma_trmm_dispatch(MagmaLeft, uplo_[0], *trans, MagmaNonUnit,
n, n, d_one, db, lddb, da, ldda);
}
}
magma_queue_sync( stream );
magma_queue_destroy( stream );
work[0] = (T) lopt;
iwork[0] = liopt;
return MAGMA_SUCCESS;
}
cublasStatus_t cublas_trsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb)
{
return cublasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
cublasStatus_t cublas_trsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb)
{
return cublasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb);
}
template <typename T>
void magma_sygvd_gpu(T *A, T *B, T *eigenvalues, int dim, int lda)
{
int itype = 1;
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int ldb = lda;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
/*
magma_sygvd_gpu_impl(itype, jobz, uplo, N, A, lda, B, ldb, eigenvalues, work, lwork, iwork, liwork, wa, &info);
*/
magma_potrf_gpu_dispatch(uplo, N, B, lda, &info);
magmaCheckError(info);
magma_sygst_gpu_dispatch(itype, uplo, N, A, lda, B, ldb, &info);
magmaCheckError(info);
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
T one = 1;
cublasHandle_t handle = Cublas::get_handle();
cublas_trsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, N, N, &one, B, ldb, A, lda);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::sygv(Vector<TConfig> &A, Vector<TConfig> &B,
Vector<TConfig_h> &eigenvalues, Vector<TConfig> &work)
{
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_sygvd_gpu(A.raw(), B.raw(), eigenvalues.raw(), dim, lda);
#endif
}
namespace
{
#ifdef AMGX_USE_MAGMA
template <typename T>
void magma_syevd_gpu(T *A, T *eigenvalues, int dim, int lda)
{
magma_vec_t jobz = MagmaVec;
magma_uplo_t uplo = MagmaUpper;
int N = dim;
int nb = 32;
int lwork = 1 + 6 * N * nb + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
T *work = &s_work[0];
int liwork = 3 + 5 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int *iwork = &s_iwork[0];
static std::vector<T> s_wa;
s_wa.resize(lda * N);
T *wa = &s_wa[0];
int ldwa = N;
int info;
magma_syevd_gpu_dispatch(jobz, uplo, N, A, lda, eigenvalues, wa, ldwa, work, lwork, iwork, liwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig_h> &eigenvalues)
{
check_magma_enabled();
typedef typename Vector<TConfig>::value_type value_type;
int dim = std::min(A.get_num_rows(), A.get_num_cols());
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_syevd_gpu(A.raw(), eigenvalues.raw(), dim, lda);
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::syevd(Vector<TConfig> &A,
Vector<TConfig> &eigenvalues)
{
not_implemented();
}
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_stedx_dispatch(magma_range_t range, int n,
double vl, double vu,
int il, int iu,
double *d, double *e, double *z, int ldz,
double *work, int lwork, int *iwork, int liwork,
double *dwork, int *info)
{
return magma_dstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
int magma_stedx_dispatch(magma_range_t range, int n,
float vl, float vu,
int il, int iu,
float *d, float *e, float *z, int ldz,
float *work, int lwork, int *iwork, int liwork,
float *dwork, int *info)
{
return magma_sstedx(range, n, vl, vu, il, iu, d, e, z, ldz, work, lwork, iwork, liwork, dwork, info);
}
template <typename T>
void magma_stedx(T *diagonal, T *subdiagonal, T *eigenvectors,
int lower, int upper, int dim, int ldz, T *dwork, int dwork_size)
{
magma_range_t range = MagmaRangeI;
int N = dim;
T vl = 0;
T vu = 0;
int il = lower;
int iu = upper;
int lwork = 1 + 4 * N + 2 * N * N;
static std::vector<T> s_work;
s_work.resize(lwork);
int liwork = 3 + 6 * N;
static std::vector<int> s_iwork;
s_iwork.resize(liwork);
int info;
magma_stedx_dispatch(range, N, vl, vu, il, iu, diagonal, subdiagonal, eigenvectors, ldz,
&s_work[0], lwork, &s_iwork[0], liwork, dwork, &info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::stedx(Vector<TConfig> &diagonal,
Vector<TConfig> &subdiagonal,
Vector<TConfig> &eigenvectors,
int dim,
Vector<TConfig_d> &dwork)
{
check_magma_enabled();
#ifdef AMGX_USE_MAGMA
magma_stedx(diagonal.raw(), subdiagonal.raw(), eigenvectors.raw(),
dim, dim, dim, eigenvectors.get_lda(),
dwork.raw(), dwork.size());
#endif
}
namespace
{
template <typename T>
void larf(int m, int n, T *v,
int incv, T *tau, T *c, int ldc,
T *work)
{
/* Table of constant values */
static T c_b4 = 1.;
static T c_b5 = 0.;
static int c1 = 1;
/* Form H * C */
/* w := C' * v */
Cublas::gemv(true, m, n, &c_b4, c, ldc,
v, incv, &c_b5, work, c1);
/* C := C - v * w' */
Cublas::ger(m, n, tau, v, incv, work, c1, c, ldc);
}
template <typename T>
__global__
void set1(T *a)
{
*a = 1.;
}
template <typename T>
__global__
void add_tau(T *a, T tau)
{
*a = 1 + tau;
}
template <typename T>
void gpu_orgqr(int m, int n, int k,
T *a, int lda, T *tau, T *work, int lwork)
{
int i1, i2;
for (int i = k - 1; i >= 0; --i)
{
/* Apply H(i) to A(i:m,i:n) from the left */
if (i < n - 1)
{
set1 <<< 1, 1>>>(&a[i + i * lda]);
i1 = m - i;
i2 = n - i - 1;
larf(i1, i2, &a[i + i * lda], 1, &tau[i],
&a[i + (i + 1) * lda], lda, work);
}
if (i < m - 1)
{
i1 = m - i - 1;
Cublas::scal(i1, &tau[i], &a[i + 1 + i * lda], 1);
}
add_tau <<< 1, 1>>>(&a[i + i * lda], tau[i]);
/* Set A(1:i-1,i) to zero */
cudaMemset(&a[i * lda], 0, sizeof(T) * i);
}
cudaCheckError();
}
template <typename T>
__device__ __host__
T lapy2_(T *a, T *b)
{
T va = *a;
T vb = *b;
return sqrt(va * va + vb * vb);
}
template <typename T>
__device__ __host__
T d_sign(T a, T b)
{
T x;
x = (a >= 0 ? a : - a);
return (b >= 0 ? x : -x);
}
template <typename T>
void compute_tau_host(T *alpha, T *norm,
T *tau, T *d1)
{
*d1 = lapy2_(alpha, norm);
T beta = -d_sign(*d1, *alpha);
// LAPACK: skipped part about scaling.
// Negated compared to LAPACK code, avoid negating value on device later.
*tau = -(beta - *alpha) / beta;
*d1 = 1. / (*alpha - beta);
*alpha = beta;
}
template <typename T>
void larfg(int n, T *alpha, T *x,
int incx, T *tau)
{
if (n <= 1)
{
*tau = 0.;
return;
}
int i1 = n - 1;
T xnorm;
Cublas::nrm2(i1, x, incx, &xnorm);
T h_alpha;
cudaMemcpy(&h_alpha, alpha, sizeof(T), cudaMemcpyDeviceToHost);
T d1;
compute_tau_host(&h_alpha, &xnorm, tau, &d1);
Cublas::scal(i1, d1, x, incx);
// Update the diagonal value on the device.
cudaMemcpy(alpha, &h_alpha, sizeof(T), cudaMemcpyHostToDevice);
}
template <typename T>
void gpu_geqrf(int m, int n, T *a, int lda,
T *tau, T *work)
{
int k = std::min(m, n);
T *aii;
cudaMalloc(&aii, sizeof(T));
for (int i = 0; i < k; ++i)
{
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i) */
int i2 = m - i;
/* Computing MIN */
int i3 = i + 1;
larfg(i2, &a[i + i * lda],
&a[std::min(i3, m - 1) + i * lda],
1, &tau[i]);
if (i < n - 1)
{
/* Apply H(i) to A(i:m,i+1:n) from the left */
cudaMemcpy(aii, &a[i + i * lda], sizeof(T), cudaMemcpyDeviceToDevice);
set1 <<< 1, 1>>>(&a[i + i * lda]);
cudaCheckError();
i2 = m - i;
i3 = n - i - 1;
larf(i2, i3, &a[i + i * lda], 1,
&tau[i], &a[i + (i + 1) * lda], lda, work);
cudaMemcpy(&a[i + i * lda], aii, sizeof(T), cudaMemcpyDeviceToDevice);
}
}
cudaFree(aii);
}
} // end anonymous namespace
namespace
{
#ifdef AMGX_USE_MAGMA
int magma_geqrf_dispatch(int m, int n, float *A, int lda,
float *tau, float *work, int *info)
{
return magma_sgeqrf_gpu(m, n, A, lda, tau, work, info);
}
int magma_geqrf_dispatch(int m, int n, double *A, int lda,
double *tau, double *work, int *info)
{
return magma_dgeqrf_gpu(m, n, A, lda, tau, work, info);
}
template <typename T>
void magma_geqrf(int m, int n, T *A, int lda,
T *tau, T *work)
{
int info;
magma_geqrf_dispatch(m, n, A, lda, tau, work, &info);
magmaCheckError(info);
}
int magma_orgqr_dispatch(int m, int n, int k, float *A, int lda,
float *tau, float *work, int lwork, int *info)
{
return magma_sorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
int magma_orgqr_dispatch(int m, int n, int k, double *A, int lda,
double *tau, double *work, int lwork, int *info)
{
return magma_dorgqr_gpu(m, n, k, A, lda, tau, work, lwork, info);
}
template <typename T>
void magma_orgqr(int m, int n, int k, T *A, int lda,
T *tau, T *work, int lwork)
{
int info;
magma_orgqr_dispatch(m, n, k, A, lda, tau, work, lwork, &info);
magmaCheckError(info);
}
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::geqrf(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#else
gpu_geqrf(rows, cols, A.raw(), lda, tau.raw(), work.raw());
#endif
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig> &tau,
Vector<TConfig> &work)
{
not_implemented();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Lapack< TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::orgqr(Vector<TConfig> &A,
Vector<TConfig_h> &tau,
Vector<TConfig> &work)
{
int rows = A.get_num_rows();
int cols = A.get_num_cols();
int lda = A.get_lda();
#ifdef AMGX_USE_MAGMA
magma_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#else
gpu_orgqr(rows, cols, cols, A.raw(), lda, tau.raw(), work.raw(), 1);
#endif
}
#define AMGX_CASE_LINE(CASE) \
template class Lapack<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
|
6574c859128a14dd9b0ada51d61c098b7a608c14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/memory/allocation/allocator.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "cub/hipcub/hipcub.hpp"
#include "gather.cu.h"
#include "math_function.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/for_range.h"
#include "safe_ref.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
#define PI 3.141592654
namespace {
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
int const kThreadsPerBlock = sizeof(uint64_t) * 8;
static const double kBBoxClipDefault = ::log(1000.0 / 16.0);
struct RangeInitFunctor {
int start_;
int delta_;
int *out_;
__device__ void operator()(size_t i) { out_[i] = start_ + i * delta_; }
};
template <typename T>
static void RSortDescending(const platform::CUDADeviceContext &ctx,
const Tensor &value,
Tensor *value_out,
Tensor *index_out) {
int num = static_cast<int>(value.numel());
Tensor index_in_t;
int *idx_in = index_in_t.mutable_data<int>({num}, ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, num);
for_range(RangeInitFunctor{0, 1, idx_in});
int *idx_out = index_out->mutable_data<int>({num}, ctx.GetPlace());
const T *keys_in = value.data<T>();
T *keys_out = value_out->mutable_data<T>({num}, ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(
nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num);
// Allocate temporary storage
auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(),
temp_storage_bytes,
keys_in,
keys_out,
idx_in,
idx_out,
num);
}
template <typename T>
struct RBoxDecodeAndClipFunctor {
const T *anchor;
const T *deltas;
const T *var;
const int *index;
const T *im_info;
T *proposals;
RBoxDecodeAndClipFunctor(const T *anchor,
const T *deltas,
const T *var,
const int *index,
const T *im_info,
T *proposals)
: anchor(anchor),
deltas(deltas),
var(var),
index(index),
im_info(im_info),
proposals(proposals) {}
T bbox_clip_default{static_cast<T>(kBBoxClipDefault)};
__device__ void operator()(size_t i) {
int k = index[i] * 5;
T w = anchor[k + 2];
T h = anchor[k + 3];
T cx = anchor[k];
T cy = anchor[k + 1];
T angle = anchor[k + 4];
T de_cx = deltas[k];
T de_cy = deltas[k + 1];
T de_w = deltas[k + 2];
T de_h = deltas[k + 3];
T de_g = deltas[k + 4];
T d_cx, d_cy, d_w, d_h, d_g;
if (var) {
d_cx = cx + de_cx * w / var[k];
d_cy = cy + de_cy * h / var[k + 1];
d_w = exp(Min(de_w / var[k + 2], bbox_clip_default)) * w;
d_h = exp(Min(de_h / var[k + 3], bbox_clip_default)) * h;
d_g = de_g / var[k + 4] * 1.0 / PI * 180 + angle;
} else {
d_cx = cx + de_cx * w;
d_cy = cy + de_cy * h;
d_w = exp(Min(de_w, bbox_clip_default)) * w;
d_h = exp(Min(de_h, bbox_clip_default)) * h;
d_g = de_g * 1.0 / PI * 180 + angle;
}
proposals[i * 5] = d_cx;
proposals[i * 5 + 1] = d_cy;
proposals[i * 5 + 2] = d_w;
proposals[i * 5 + 3] = d_h;
proposals[i * 5 + 4] = d_g;
}
__device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; }
__device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; }
};
template <typename T, int BlockSize>
static __global__ void RFilterBBoxes(const T *bboxes,
const T *im_info,
const T min_size,
const int num,
int *keep_num,
int *keep) {
T im_h = im_info[0];
T im_w = im_info[1];
T im_scale = im_info[2];
int cnt = 0;
__shared__ int keep_index[BlockSize];
CUDA_1D_KERNEL_LOOP(i, num) {
keep_index[threadIdx.x] = -1;
__syncthreads();
int k = i * 5;
T cx = bboxes[k];
T cy = bboxes[k + 1];
T w_s = bboxes[k + 2];
T h_s = bboxes[k + 3];
if (w_s >= min_size && h_s >= min_size) {
keep_index[threadIdx.x] = i;
}
__syncthreads();
if (threadIdx.x == 0) {
int size = (num - i) < BlockSize ? num - i : BlockSize;
for (int j = 0; j < size; ++j) {
if (keep_index[j] > -1) {
keep[cnt++] = keep_index[j];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
keep_num[0] = cnt;
}
}
__device__ inline float trangle_area(float *a, float *b, float *c) {
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0;
}
__device__ inline float area(float *int_pts, int num_of_inter) {
float area = 0.0;
for (int i = 0; i < num_of_inter - 2; i++) {
area +=
fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4));
}
return area;
}
__device__ inline void reorder_pts(float *int_pts, int num_of_inter) {
if (num_of_inter > 0) {
float center[2] = {0.0, 0.0};
// center[0] = 0.0;
// center[1] = 0.0;
for (int i = 0; i < num_of_inter; i++) {
center[0] += int_pts[2 * i];
center[1] += int_pts[2 * i + 1];
}
center[0] /= num_of_inter;
center[1] /= num_of_inter;
float vs[16];
float v[2];
float d;
for (int i = 0; i < num_of_inter; i++) {
v[0] = int_pts[2 * i] - center[0];
v[1] = int_pts[2 * i + 1] - center[1];
d = sqrt(v[0] * v[0] + v[1] * v[1]);
v[0] = v[0] / d;
v[1] = v[1] / d;
if (v[1] < 0) {
v[0] = -2 - v[0];
}
vs[i] = v[0];
}
float temp, tx, ty;
int j;
for (int i = 1; i < num_of_inter; ++i) {
if (vs[i - 1] > vs[i]) {
temp = vs[i];
tx = int_pts[2 * i];
ty = int_pts[2 * i + 1];
j = i;
while (j > 0 && vs[j - 1] > temp) {
vs[j] = vs[j - 1];
int_pts[j * 2] = int_pts[j * 2 - 2];
int_pts[j * 2 + 1] = int_pts[j * 2 - 1];
j--;
}
vs[j] = temp;
int_pts[j * 2] = tx;
int_pts[j * 2 + 1] = ty;
}
}
}
}
__device__ inline bool inter2line(
float *pts1, float *pts2, int i, int j, float *temp_pts) {
float a[2] = {pts1[2 * i], pts1[2 * i + 1]};
float b[2] = {pts1[2 * ((i + 1) % 4)], pts1[2 * ((i + 1) % 4) + 1]};
float c[2] = {pts2[2 * j], pts2[2 * j + 1]};
float d[2] = {pts2[2 * ((j + 1) % 4)], pts2[2 * ((j + 1) % 4) + 1]};
// T area_abc, area_abd, area_cda, area_cdb;
// a[0] = pts1[2 * i];
// a[1] = pts1[2 * i + 1];
// b[0] = pts1[2 * ((i + 1) % 4)];
// b[1] = pts1[2 * ((i + 1) % 4) + 1];
// c[0] = pts2[2 * j];
// c[1] = pts2[2 * j + 1];
// d[0] = pts2[2 * ((j + 1) % 4)];
// d[1] = pts2[2 * ((j + 1) % 4) + 1];
float area_abc = trangle_area(a, b, c);
float area_abd = trangle_area(a, b, d);
if (area_abc * area_abd >= 0) {
return false;
}
float area_cda = trangle_area(c, d, a);
float area_cdb = area_cda + area_abc - area_abd;
if (area_cda * area_cdb >= 0) {
return false;
}
float t = area_cda / (area_abd - area_abc);
float dx = t * (b[0] - a[0]);
float dy = t * (b[1] - a[1]);
temp_pts[0] = a[0] + dx;
temp_pts[1] = a[1] + dy;
return true;
}
__device__ inline bool in_rect(float pt_x, float pt_y, float *pts) {
float ab[2] = {pts[2] - pts[0], pts[3] - pts[1]};
float ad[2] = {pts[6] - pts[0], pts[7] - pts[1]};
float ap[2] = {pt_x - pts[0], pt_y - pts[1]};
// float abab;
// float abap;
// float adad;
// float adap;
// ab[0] = pts[2] - pts[0];
// ab[1] = pts[3] - pts[1];
//
// ad[0] = pts[6] - pts[0];
// ad[1] = pts[7] - pts[1];
//
// ap[0] = pt_x - pts[0];
// ap[1] = pt_y - pts[1];
float abab = ab[0] * ab[0] + ab[1] * ab[1];
float abap = ab[0] * ap[0] + ab[1] * ap[1];
float adad = ad[0] * ad[0] + ad[1] * ad[1];
float adap = ad[0] * ap[0] + ad[1] * ap[1];
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0;
}
__device__ inline int inter_pts(float *pts1, float *pts2, float *int_pts) {
int num_of_inter = 0;
for (int i = 0; i < 4; i++) {
if (in_rect(pts1[2 * i], pts1[2 * i + 1], pts2)) {
int_pts[num_of_inter * 2] = pts1[2 * i];
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
num_of_inter++;
}
if (in_rect(pts2[2 * i], pts2[2 * i + 1], pts1)) {
int_pts[num_of_inter * 2] = pts2[2 * i];
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
num_of_inter++;
}
}
float temp_pts[2];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
bool has_pts = inter2line(pts1, pts2, i, j, temp_pts);
if (has_pts) {
int_pts[num_of_inter * 2] = temp_pts[0];
int_pts[num_of_inter * 2 + 1] = temp_pts[1];
num_of_inter++;
}
}
}
return num_of_inter;
}
__device__ inline void convert_region(float *pts, const float *region) {
float angle = region[4];
float a_cos = cos(angle / 180.0 * PI);
float a_sin = -sin(angle / 180.0 * PI); // anti clock-wise
float ctr_x = region[0];
float ctr_y = region[1];
float h = region[3];
float w = region[2];
float pts_x[4] = {-w / 2, -w / 2, w / 2, w / 2};
float pts_y[4] = {-h / 2, h / 2, h / 2, -h / 2};
// pts_x[0] = -w / 2;
// pts_x[1] = -w / 2;
// pts_x[2] = w / 2;
// pts_x[3] = w / 2;
//
// pts_y[0] = -h / 2;
// pts_y[1] = h / 2;
// pts_y[2] = h / 2;
// pts_y[3] = -h / 2;
for (int i = 0; i < 4; i++) {
pts[2 * i] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x;
pts[2 * i + 1] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y;
}
}
__device__ inline float inter(const float *region1,
const float *region2) {
float pts1[8];
float pts2[8];
float int_pts[16];
int num_of_inter;
convert_region(pts1, region1);
convert_region(pts2, region2);
num_of_inter = inter_pts(pts1, pts2, int_pts);
reorder_pts(int_pts, num_of_inter);
return area(int_pts, num_of_inter);
}
__device__ inline float IoU(const float *region1,
const float *region2) {
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
return area_inter / (area1 + area2 - area_inter);
}
static __global__ void RNMSKernel(const int n_boxes,
const float nms_overlap_thresh,
const float *dev_boxes,
uint64_t *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock);
const int col_size =
min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock);
__shared__ float block_boxes[kThreadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (IoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
template <typename T>
static void RNMS(const platform::CUDADeviceContext &ctx,
const Tensor &proposals,
const Tensor &sorted_indices,
const T nms_threshold,
Tensor *keep_out) {
int boxes_num = proposals.dims()[0];
PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]);
const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock);
dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock),
DIVUP(boxes_num, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
const T *boxes = proposals.data<T>();
auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
framework::Vector<uint64_t> mask(boxes_num * col_blocks);
hipLaunchKernelGGL(( RNMSKernel), dim3(blocks), dim3(threads), 0, 0,
boxes_num,
nms_threshold,
boxes,
mask.CUDAMutableData(boost::get<platform::CUDAPlace>(ctx.GetPlace())));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
std::vector<int> keep_vec;
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / kThreadsPerBlock;
int inblock = i % kThreadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
++num_to_keep;
keep_vec.push_back(i);
uint64_t *p = &mask[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
int *keep = keep_out->mutable_data<int>({num_to_keep}, ctx.GetPlace());
memory::Copy(place,
keep,
platform::CPUPlace(),
keep_vec.data(),
sizeof(int) * num_to_keep,
ctx.stream());
ctx.Wait();
}
template <typename T>
static std::pair<Tensor, Tensor> RRPNProposalForOneImage(
const platform::CUDADeviceContext &ctx,
const Tensor &im_info,
const Tensor &anchors,
const Tensor &variances,
const Tensor &bbox_deltas, // [M, 5]
const Tensor &scores, // [N, 1]
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size) {
// 1. pre nms
Tensor scores_sort, index_sort;
RSortDescending<T>(ctx, scores, &scores_sort, &index_sort);
int num = scores.numel();
int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel()
: pre_nms_top_n;
scores_sort.Resize({pre_nms_num, 1});
index_sort.Resize({pre_nms_num, 1});
// 2. box decode and clipping
Tensor proposals;
proposals.mutable_data<T>({pre_nms_num, 5}, ctx.GetPlace());
{
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num);
for_range(RBoxDecodeAndClipFunctor<T>{anchors.data<T>(),
bbox_deltas.data<T>(),
variances.data<T>(),
index_sort.data<int>(),
im_info.data<T>(),
proposals.data<T>()});
}
// 3. filter
Tensor keep_index, keep_num_t;
keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace());
keep_num_t.mutable_data<int>({1}, ctx.GetPlace());
min_size = ::max(min_size, 0.0f);
auto stream = ctx.stream();
hipLaunchKernelGGL(( RFilterBBoxes<T, 256>), dim3(1), dim3(256), 0, stream, proposals.data<T>(),
im_info.data<T>(),
min_size,
pre_nms_num,
keep_num_t.data<int>(),
keep_index.data<int>());
int keep_num;
const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(platform::CPUPlace(),
&keep_num,
gpu_place,
keep_num_t.data<int>(),
sizeof(int),
ctx.stream());
ctx.Wait();
keep_index.Resize({keep_num});
Tensor scores_filter, proposals_filter;
proposals_filter.mutable_data<T>({keep_num, 5}, ctx.GetPlace());
scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals, keep_index, &proposals_filter);
GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter);
if (nms_thresh <= 0) {
return std::make_pair(proposals_filter, scores_filter);
}
// 4. nms
Tensor keep_nms;
RNMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms);
if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) {
keep_nms.Resize({post_nms_top_n});
}
Tensor scores_nms, proposals_nms;
proposals_nms.mutable_data<T>({keep_nms.numel(), 5}, ctx.GetPlace());
scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms);
GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms);
return std::make_pair(proposals_nms, scores_nms);
}
} // namespace
template <typename DeviceContext, typename T>
class CUDARRPNGenerateProposalsKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *scores = context.Input<Tensor>("Scores");
auto *bbox_deltas = context.Input<Tensor>("BboxDeltas");
auto *im_info = context.Input<Tensor>("ImInfo");
auto anchors = detail::Ref(context.Input<Tensor>("Anchors"),
"Cannot find input Anchors(%s) in scope",
context.Inputs("Anchors")[0]);
auto variances = detail::Ref(context.Input<Tensor>("Variances"),
"Cannot find input Variances(%s) in scope",
context.Inputs("Variances")[0]);
auto *rpn_rois = context.Output<LoDTensor>("RpnRois");
auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs");
int pre_nms_top_n = context.Attr<int>("pre_nms_topN");
int post_nms_top_n = context.Attr<int>("post_nms_topN");
float nms_thresh = context.Attr<float>("nms_thresh");
float min_size = context.Attr<float>("min_size");
// float eta = context.Attr<float>("eta");
// PADDLE_ENFORCE_GE(eta, 1., "Not support adaptive NMS.");
auto &dev_ctx = context.template device_context<DeviceContext>();
auto scores_dim = scores->dims();
int64_t num = scores_dim[0];
int64_t c_score = scores_dim[1];
int64_t h_score = scores_dim[2];
int64_t w_score = scores_dim[3];
auto bbox_dim = bbox_deltas->dims();
int64_t c_bbox = bbox_dim[1];
int64_t h_bbox = bbox_dim[2];
int64_t w_bbox = bbox_dim[3];
Tensor bbox_deltas_swap, scores_swap;
bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox},
dev_ctx.GetPlace());
scores_swap.mutable_data<T>({num, h_score, w_score, c_score},
dev_ctx.GetPlace());
math::Transpose<DeviceContext, T, 4> trans;
std::vector<int> axis = {0, 2, 3, 1};
trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis);
trans(dev_ctx, *scores, &scores_swap, axis);
anchors.Resize({anchors.numel() / 5, 5});
variances.Resize({variances.numel() / 5, 5});
rpn_rois->mutable_data<T>({bbox_deltas->numel() / 5, 5},
context.GetPlace());
rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace());
T *rpn_rois_data = rpn_rois->data<T>();
T *rpn_roi_probs_data = rpn_roi_probs->data<T>();
auto place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace());
int64_t num_proposals = 0;
std::vector<size_t> offset(1, 0);
for (int64_t i = 0; i < num; ++i) {
Tensor im_info_slice = im_info->Slice(i, i + 1);
Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1);
Tensor scores_slice = scores_swap.Slice(i, i + 1);
bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 5, 5});
scores_slice.Resize({h_score * w_score * c_score, 1});
// auto* scores_data = scores_slice.data<T>();
// for(int k=0; k < 256; k++) {
// std::cout << scores_data[k] << std::endl;
// }
std::pair<Tensor, Tensor> box_score_pair =
RRPNProposalForOneImage<T>(dev_ctx,
im_info_slice,
anchors,
variances,
bbox_deltas_slice,
scores_slice,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size);
Tensor &proposals = box_score_pair.first;
Tensor &scores = box_score_pair.second;
memory::Copy(place,
rpn_rois_data + num_proposals * 5,
place,
proposals.data<T>(),
sizeof(T) * proposals.numel(),
dev_ctx.stream());
memory::Copy(place,
rpn_roi_probs_data + num_proposals,
place,
scores.data<T>(),
sizeof(T) * scores.numel(),
dev_ctx.stream());
dev_ctx.Wait();
num_proposals += proposals.dims()[0];
offset.emplace_back(num_proposals);
}
framework::LoD lod;
lod.emplace_back(offset);
rpn_rois->set_lod(lod);
rpn_roi_probs->set_lod(lod);
rpn_rois->Resize({num_proposals, 5});
rpn_roi_probs->Resize({num_proposals, 1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
rrpn_generate_proposals,
ops::CUDARRPNGenerateProposalsKernel<paddle::platform::CUDADeviceContext,
float>);
| 6574c859128a14dd9b0ada51d61c098b7a608c14.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/memory/allocation/allocator.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "cub/cub/cub.cuh"
#include "gather.cu.h"
#include "math_function.h"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/for_range.h"
#include "safe_ref.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
#define PI 3.141592654
namespace {
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
int const kThreadsPerBlock = sizeof(uint64_t) * 8;
static const double kBBoxClipDefault = std::log(1000.0 / 16.0);
struct RangeInitFunctor {
int start_;
int delta_;
int *out_;
__device__ void operator()(size_t i) { out_[i] = start_ + i * delta_; }
};
template <typename T>
static void RSortDescending(const platform::CUDADeviceContext &ctx,
const Tensor &value,
Tensor *value_out,
Tensor *index_out) {
int num = static_cast<int>(value.numel());
Tensor index_in_t;
int *idx_in = index_in_t.mutable_data<int>({num}, ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, num);
for_range(RangeInitFunctor{0, 1, idx_in});
int *idx_out = index_out->mutable_data<int>({num}, ctx.GetPlace());
const T *keys_in = value.data<T>();
T *keys_out = value_out->mutable_data<T>({num}, ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairsDescending<T, int>(
nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num);
// Allocate temporary storage
auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
cub::DeviceRadixSort::SortPairsDescending<T, int>(d_temp_storage->ptr(),
temp_storage_bytes,
keys_in,
keys_out,
idx_in,
idx_out,
num);
}
template <typename T>
struct RBoxDecodeAndClipFunctor {
const T *anchor;
const T *deltas;
const T *var;
const int *index;
const T *im_info;
T *proposals;
RBoxDecodeAndClipFunctor(const T *anchor,
const T *deltas,
const T *var,
const int *index,
const T *im_info,
T *proposals)
: anchor(anchor),
deltas(deltas),
var(var),
index(index),
im_info(im_info),
proposals(proposals) {}
T bbox_clip_default{static_cast<T>(kBBoxClipDefault)};
__device__ void operator()(size_t i) {
int k = index[i] * 5;
T w = anchor[k + 2];
T h = anchor[k + 3];
T cx = anchor[k];
T cy = anchor[k + 1];
T angle = anchor[k + 4];
T de_cx = deltas[k];
T de_cy = deltas[k + 1];
T de_w = deltas[k + 2];
T de_h = deltas[k + 3];
T de_g = deltas[k + 4];
T d_cx, d_cy, d_w, d_h, d_g;
if (var) {
d_cx = cx + de_cx * w / var[k];
d_cy = cy + de_cy * h / var[k + 1];
d_w = exp(Min(de_w / var[k + 2], bbox_clip_default)) * w;
d_h = exp(Min(de_h / var[k + 3], bbox_clip_default)) * h;
d_g = de_g / var[k + 4] * 1.0 / PI * 180 + angle;
} else {
d_cx = cx + de_cx * w;
d_cy = cy + de_cy * h;
d_w = exp(Min(de_w, bbox_clip_default)) * w;
d_h = exp(Min(de_h, bbox_clip_default)) * h;
d_g = de_g * 1.0 / PI * 180 + angle;
}
proposals[i * 5] = d_cx;
proposals[i * 5 + 1] = d_cy;
proposals[i * 5 + 2] = d_w;
proposals[i * 5 + 3] = d_h;
proposals[i * 5 + 4] = d_g;
}
__device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; }
__device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; }
};
template <typename T, int BlockSize>
static __global__ void RFilterBBoxes(const T *bboxes,
const T *im_info,
const T min_size,
const int num,
int *keep_num,
int *keep) {
T im_h = im_info[0];
T im_w = im_info[1];
T im_scale = im_info[2];
int cnt = 0;
__shared__ int keep_index[BlockSize];
CUDA_1D_KERNEL_LOOP(i, num) {
keep_index[threadIdx.x] = -1;
__syncthreads();
int k = i * 5;
T cx = bboxes[k];
T cy = bboxes[k + 1];
T w_s = bboxes[k + 2];
T h_s = bboxes[k + 3];
if (w_s >= min_size && h_s >= min_size) {
keep_index[threadIdx.x] = i;
}
__syncthreads();
if (threadIdx.x == 0) {
int size = (num - i) < BlockSize ? num - i : BlockSize;
for (int j = 0; j < size; ++j) {
if (keep_index[j] > -1) {
keep[cnt++] = keep_index[j];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
keep_num[0] = cnt;
}
}
__device__ inline float trangle_area(float *a, float *b, float *c) {
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0;
}
__device__ inline float area(float *int_pts, int num_of_inter) {
float area = 0.0;
for (int i = 0; i < num_of_inter - 2; i++) {
area +=
fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4));
}
return area;
}
__device__ inline void reorder_pts(float *int_pts, int num_of_inter) {
if (num_of_inter > 0) {
float center[2] = {0.0, 0.0};
// center[0] = 0.0;
// center[1] = 0.0;
for (int i = 0; i < num_of_inter; i++) {
center[0] += int_pts[2 * i];
center[1] += int_pts[2 * i + 1];
}
center[0] /= num_of_inter;
center[1] /= num_of_inter;
float vs[16];
float v[2];
float d;
for (int i = 0; i < num_of_inter; i++) {
v[0] = int_pts[2 * i] - center[0];
v[1] = int_pts[2 * i + 1] - center[1];
d = sqrt(v[0] * v[0] + v[1] * v[1]);
v[0] = v[0] / d;
v[1] = v[1] / d;
if (v[1] < 0) {
v[0] = -2 - v[0];
}
vs[i] = v[0];
}
float temp, tx, ty;
int j;
for (int i = 1; i < num_of_inter; ++i) {
if (vs[i - 1] > vs[i]) {
temp = vs[i];
tx = int_pts[2 * i];
ty = int_pts[2 * i + 1];
j = i;
while (j > 0 && vs[j - 1] > temp) {
vs[j] = vs[j - 1];
int_pts[j * 2] = int_pts[j * 2 - 2];
int_pts[j * 2 + 1] = int_pts[j * 2 - 1];
j--;
}
vs[j] = temp;
int_pts[j * 2] = tx;
int_pts[j * 2 + 1] = ty;
}
}
}
}
__device__ inline bool inter2line(
float *pts1, float *pts2, int i, int j, float *temp_pts) {
float a[2] = {pts1[2 * i], pts1[2 * i + 1]};
float b[2] = {pts1[2 * ((i + 1) % 4)], pts1[2 * ((i + 1) % 4) + 1]};
float c[2] = {pts2[2 * j], pts2[2 * j + 1]};
float d[2] = {pts2[2 * ((j + 1) % 4)], pts2[2 * ((j + 1) % 4) + 1]};
// T area_abc, area_abd, area_cda, area_cdb;
// a[0] = pts1[2 * i];
// a[1] = pts1[2 * i + 1];
// b[0] = pts1[2 * ((i + 1) % 4)];
// b[1] = pts1[2 * ((i + 1) % 4) + 1];
// c[0] = pts2[2 * j];
// c[1] = pts2[2 * j + 1];
// d[0] = pts2[2 * ((j + 1) % 4)];
// d[1] = pts2[2 * ((j + 1) % 4) + 1];
float area_abc = trangle_area(a, b, c);
float area_abd = trangle_area(a, b, d);
if (area_abc * area_abd >= 0) {
return false;
}
float area_cda = trangle_area(c, d, a);
float area_cdb = area_cda + area_abc - area_abd;
if (area_cda * area_cdb >= 0) {
return false;
}
float t = area_cda / (area_abd - area_abc);
float dx = t * (b[0] - a[0]);
float dy = t * (b[1] - a[1]);
temp_pts[0] = a[0] + dx;
temp_pts[1] = a[1] + dy;
return true;
}
__device__ inline bool in_rect(float pt_x, float pt_y, float *pts) {
float ab[2] = {pts[2] - pts[0], pts[3] - pts[1]};
float ad[2] = {pts[6] - pts[0], pts[7] - pts[1]};
float ap[2] = {pt_x - pts[0], pt_y - pts[1]};
// float abab;
// float abap;
// float adad;
// float adap;
// ab[0] = pts[2] - pts[0];
// ab[1] = pts[3] - pts[1];
//
// ad[0] = pts[6] - pts[0];
// ad[1] = pts[7] - pts[1];
//
// ap[0] = pt_x - pts[0];
// ap[1] = pt_y - pts[1];
float abab = ab[0] * ab[0] + ab[1] * ab[1];
float abap = ab[0] * ap[0] + ab[1] * ap[1];
float adad = ad[0] * ad[0] + ad[1] * ad[1];
float adap = ad[0] * ap[0] + ad[1] * ap[1];
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0;
}
__device__ inline int inter_pts(float *pts1, float *pts2, float *int_pts) {
int num_of_inter = 0;
for (int i = 0; i < 4; i++) {
if (in_rect(pts1[2 * i], pts1[2 * i + 1], pts2)) {
int_pts[num_of_inter * 2] = pts1[2 * i];
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
num_of_inter++;
}
if (in_rect(pts2[2 * i], pts2[2 * i + 1], pts1)) {
int_pts[num_of_inter * 2] = pts2[2 * i];
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
num_of_inter++;
}
}
float temp_pts[2];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
bool has_pts = inter2line(pts1, pts2, i, j, temp_pts);
if (has_pts) {
int_pts[num_of_inter * 2] = temp_pts[0];
int_pts[num_of_inter * 2 + 1] = temp_pts[1];
num_of_inter++;
}
}
}
return num_of_inter;
}
__device__ inline void convert_region(float *pts, const float *region) {
float angle = region[4];
float a_cos = cos(angle / 180.0 * PI);
float a_sin = -sin(angle / 180.0 * PI); // anti clock-wise
float ctr_x = region[0];
float ctr_y = region[1];
float h = region[3];
float w = region[2];
float pts_x[4] = {-w / 2, -w / 2, w / 2, w / 2};
float pts_y[4] = {-h / 2, h / 2, h / 2, -h / 2};
// pts_x[0] = -w / 2;
// pts_x[1] = -w / 2;
// pts_x[2] = w / 2;
// pts_x[3] = w / 2;
//
// pts_y[0] = -h / 2;
// pts_y[1] = h / 2;
// pts_y[2] = h / 2;
// pts_y[3] = -h / 2;
for (int i = 0; i < 4; i++) {
pts[2 * i] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x;
pts[2 * i + 1] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y;
}
}
__device__ inline float inter(const float *region1,
const float *region2) {
float pts1[8];
float pts2[8];
float int_pts[16];
int num_of_inter;
convert_region(pts1, region1);
convert_region(pts2, region2);
num_of_inter = inter_pts(pts1, pts2, int_pts);
reorder_pts(int_pts, num_of_inter);
return area(int_pts, num_of_inter);
}
__device__ inline float IoU(const float *region1,
const float *region2) {
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
return area_inter / (area1 + area2 - area_inter);
}
static __global__ void RNMSKernel(const int n_boxes,
const float nms_overlap_thresh,
const float *dev_boxes,
uint64_t *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock);
const int col_size =
min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock);
__shared__ float block_boxes[kThreadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (IoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
template <typename T>
static void RNMS(const platform::CUDADeviceContext &ctx,
const Tensor &proposals,
const Tensor &sorted_indices,
const T nms_threshold,
Tensor *keep_out) {
int boxes_num = proposals.dims()[0];
PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]);
const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock);
dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock),
DIVUP(boxes_num, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
const T *boxes = proposals.data<T>();
auto place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
framework::Vector<uint64_t> mask(boxes_num * col_blocks);
RNMSKernel<<<blocks, threads>>>(
boxes_num,
nms_threshold,
boxes,
mask.CUDAMutableData(boost::get<platform::CUDAPlace>(ctx.GetPlace())));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
std::vector<int> keep_vec;
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / kThreadsPerBlock;
int inblock = i % kThreadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
++num_to_keep;
keep_vec.push_back(i);
uint64_t *p = &mask[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
int *keep = keep_out->mutable_data<int>({num_to_keep}, ctx.GetPlace());
memory::Copy(place,
keep,
platform::CPUPlace(),
keep_vec.data(),
sizeof(int) * num_to_keep,
ctx.stream());
ctx.Wait();
}
template <typename T>
static std::pair<Tensor, Tensor> RRPNProposalForOneImage(
const platform::CUDADeviceContext &ctx,
const Tensor &im_info,
const Tensor &anchors,
const Tensor &variances,
const Tensor &bbox_deltas, // [M, 5]
const Tensor &scores, // [N, 1]
int pre_nms_top_n,
int post_nms_top_n,
float nms_thresh,
float min_size) {
// 1. pre nms
Tensor scores_sort, index_sort;
RSortDescending<T>(ctx, scores, &scores_sort, &index_sort);
int num = scores.numel();
int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel()
: pre_nms_top_n;
scores_sort.Resize({pre_nms_num, 1});
index_sort.Resize({pre_nms_num, 1});
// 2. box decode and clipping
Tensor proposals;
proposals.mutable_data<T>({pre_nms_num, 5}, ctx.GetPlace());
{
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num);
for_range(RBoxDecodeAndClipFunctor<T>{anchors.data<T>(),
bbox_deltas.data<T>(),
variances.data<T>(),
index_sort.data<int>(),
im_info.data<T>(),
proposals.data<T>()});
}
// 3. filter
Tensor keep_index, keep_num_t;
keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace());
keep_num_t.mutable_data<int>({1}, ctx.GetPlace());
min_size = std::max(min_size, 0.0f);
auto stream = ctx.stream();
RFilterBBoxes<T, 256><<<1, 256, 0, stream>>>(proposals.data<T>(),
im_info.data<T>(),
min_size,
pre_nms_num,
keep_num_t.data<int>(),
keep_index.data<int>());
int keep_num;
const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(platform::CPUPlace(),
&keep_num,
gpu_place,
keep_num_t.data<int>(),
sizeof(int),
ctx.stream());
ctx.Wait();
keep_index.Resize({keep_num});
Tensor scores_filter, proposals_filter;
proposals_filter.mutable_data<T>({keep_num, 5}, ctx.GetPlace());
scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals, keep_index, &proposals_filter);
GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter);
if (nms_thresh <= 0) {
return std::make_pair(proposals_filter, scores_filter);
}
// 4. nms
Tensor keep_nms;
RNMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms);
if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) {
keep_nms.Resize({post_nms_top_n});
}
Tensor scores_nms, proposals_nms;
proposals_nms.mutable_data<T>({keep_nms.numel(), 5}, ctx.GetPlace());
scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms);
GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms);
return std::make_pair(proposals_nms, scores_nms);
}
} // namespace
template <typename DeviceContext, typename T>
class CUDARRPNGenerateProposalsKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *scores = context.Input<Tensor>("Scores");
auto *bbox_deltas = context.Input<Tensor>("BboxDeltas");
auto *im_info = context.Input<Tensor>("ImInfo");
auto anchors = detail::Ref(context.Input<Tensor>("Anchors"),
"Cannot find input Anchors(%s) in scope",
context.Inputs("Anchors")[0]);
auto variances = detail::Ref(context.Input<Tensor>("Variances"),
"Cannot find input Variances(%s) in scope",
context.Inputs("Variances")[0]);
auto *rpn_rois = context.Output<LoDTensor>("RpnRois");
auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs");
int pre_nms_top_n = context.Attr<int>("pre_nms_topN");
int post_nms_top_n = context.Attr<int>("post_nms_topN");
float nms_thresh = context.Attr<float>("nms_thresh");
float min_size = context.Attr<float>("min_size");
// float eta = context.Attr<float>("eta");
// PADDLE_ENFORCE_GE(eta, 1., "Not support adaptive NMS.");
auto &dev_ctx = context.template device_context<DeviceContext>();
auto scores_dim = scores->dims();
int64_t num = scores_dim[0];
int64_t c_score = scores_dim[1];
int64_t h_score = scores_dim[2];
int64_t w_score = scores_dim[3];
auto bbox_dim = bbox_deltas->dims();
int64_t c_bbox = bbox_dim[1];
int64_t h_bbox = bbox_dim[2];
int64_t w_bbox = bbox_dim[3];
Tensor bbox_deltas_swap, scores_swap;
bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox},
dev_ctx.GetPlace());
scores_swap.mutable_data<T>({num, h_score, w_score, c_score},
dev_ctx.GetPlace());
math::Transpose<DeviceContext, T, 4> trans;
std::vector<int> axis = {0, 2, 3, 1};
trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis);
trans(dev_ctx, *scores, &scores_swap, axis);
anchors.Resize({anchors.numel() / 5, 5});
variances.Resize({variances.numel() / 5, 5});
rpn_rois->mutable_data<T>({bbox_deltas->numel() / 5, 5},
context.GetPlace());
rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace());
T *rpn_rois_data = rpn_rois->data<T>();
T *rpn_roi_probs_data = rpn_roi_probs->data<T>();
auto place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace());
int64_t num_proposals = 0;
std::vector<size_t> offset(1, 0);
for (int64_t i = 0; i < num; ++i) {
Tensor im_info_slice = im_info->Slice(i, i + 1);
Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1);
Tensor scores_slice = scores_swap.Slice(i, i + 1);
bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 5, 5});
scores_slice.Resize({h_score * w_score * c_score, 1});
// auto* scores_data = scores_slice.data<T>();
// for(int k=0; k < 256; k++) {
// std::cout << scores_data[k] << std::endl;
// }
std::pair<Tensor, Tensor> box_score_pair =
RRPNProposalForOneImage<T>(dev_ctx,
im_info_slice,
anchors,
variances,
bbox_deltas_slice,
scores_slice,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size);
Tensor &proposals = box_score_pair.first;
Tensor &scores = box_score_pair.second;
memory::Copy(place,
rpn_rois_data + num_proposals * 5,
place,
proposals.data<T>(),
sizeof(T) * proposals.numel(),
dev_ctx.stream());
memory::Copy(place,
rpn_roi_probs_data + num_proposals,
place,
scores.data<T>(),
sizeof(T) * scores.numel(),
dev_ctx.stream());
dev_ctx.Wait();
num_proposals += proposals.dims()[0];
offset.emplace_back(num_proposals);
}
framework::LoD lod;
lod.emplace_back(offset);
rpn_rois->set_lod(lod);
rpn_roi_probs->set_lod(lod);
rpn_rois->Resize({num_proposals, 5});
rpn_roi_probs->Resize({num_proposals, 1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
rrpn_generate_proposals,
ops::CUDARRPNGenerateProposalsKernel<paddle::platform::CUDADeviceContext,
float>);
|
78169682ed664306f38faa179d01f9c0350862e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0;
}
}
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
} | 78169682ed664306f38faa179d01f9c0350862e6.cu | #include "includes.h"
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0;
}
}
if(id == 0){
variance[filter] = 0;
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
} |
c43c33ef83a180f97b229529ca31966a0f9e8e17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void update_priority(int *new_priority, int n_selected, const int *new_idx, int n_ws, const int *idx, const int *priority) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) {
int my_new_idx = new_idx[tid];
// The working set size is limited (~1024 elements) so we just loop through it
for (int i = 0; i < n_ws; i++) {
if (idx[i] == my_new_idx) new_priority[tid] = priority[i] + 1;
}
}
} | c43c33ef83a180f97b229529ca31966a0f9e8e17.cu | #include "includes.h"
__global__ void update_priority(int *new_priority, int n_selected, const int *new_idx, int n_ws, const int *idx, const int *priority) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n_selected) {
int my_new_idx = new_idx[tid];
// The working set size is limited (~1024 elements) so we just loop through it
for (int i = 0; i < n_ws; i++) {
if (idx[i] == my_new_idx) new_priority[tid] = priority[i] + 1;
}
}
} |
1ac6b356ccebc6213b62a4ac9a0a65305d1e2b66.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#define blockSize 128
namespace StreamCompaction {
namespace Efficient {
// TODO: __global__
__global__ void kernUpSweep(int n, int offset, int *buf) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= (n >> offset)) return;
int idx = index << offset;
buf[idx + (1 << offset) - 1] += buf[idx + (1 << (offset - 1)) - 1];
}
__global__ void kernDownSweep(int n, int offset, int *buf) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= (n >> offset)) return;
int idx = index << offset;
int t = buf[idx + (1 << offset) - 1];
buf[idx + (1 << offset) - 1] += buf[idx + (1 << (offset - 1)) - 1];
buf[idx + (1 << (offset - 1)) - 1] = t;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *buf;
int padded = 1 << ilog2ceil(n);
hipMalloc((void**)&buf, padded * sizeof(int));
hipMemcpy(buf, idata, n * sizeof(int), hipMemcpyHostToDevice);
int offset;
int fullBlocksPerGrid = 0;
float total = 0;
float milliseconds = 0;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
for (int i = 1; i <= ilog2(padded); i++) {
fullBlocksPerGrid = ((padded >> i) + blockSize - 1) / blockSize;
kernUpSweep << <fullBlocksPerGrid, blockSize >> >(padded, i, buf);
}
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
hipMemset(buf + padded - 1, 0, sizeof(int));
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
for (int i = ilog2(padded); i >= 1; i--) {
fullBlocksPerGrid = ((padded >> i) + blockSize - 1) / blockSize;
kernDownSweep << <fullBlocksPerGrid, blockSize >> >(padded, i, buf);
}
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
printf("Work-Efficient scan: %f ms\n", total);
hipMemcpy(odata, buf, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(buf);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int *bools, *indices, *in, *out;
hipMalloc((void**)&bools, n * sizeof(int));
hipMalloc((void**)&indices, n * sizeof(int));
hipMalloc((void**)&in, n * sizeof(int));
hipMalloc((void**)&out, n * sizeof(int));
hipMemcpy(in, idata, n * sizeof(int), hipMemcpyHostToDevice);
float total = 0;
float milliseconds = 0;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(n, bools, in);
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
hipMemcpy(odata, bools, n * sizeof(int), hipMemcpyDeviceToHost);
scan(n, odata, odata);
int lenCompacted = odata[n - 1];
hipMemcpy(indices, odata, n * sizeof(int), hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(n, out, in, bools, indices);
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
printf("Work-Efficient Compact: %f ms\n", total);
hipMemcpy(odata, out, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(bools);
hipFree(indices);
hipFree(in);
hipFree(out);
return lenCompacted;
}
}
}
| 1ac6b356ccebc6213b62a4ac9a0a65305d1e2b66.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#define blockSize 128
namespace StreamCompaction {
namespace Efficient {
// TODO: __global__
__global__ void kernUpSweep(int n, int offset, int *buf) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= (n >> offset)) return;
int idx = index << offset;
buf[idx + (1 << offset) - 1] += buf[idx + (1 << (offset - 1)) - 1];
}
__global__ void kernDownSweep(int n, int offset, int *buf) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= (n >> offset)) return;
int idx = index << offset;
int t = buf[idx + (1 << offset) - 1];
buf[idx + (1 << offset) - 1] += buf[idx + (1 << (offset - 1)) - 1];
buf[idx + (1 << (offset - 1)) - 1] = t;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *buf;
int padded = 1 << ilog2ceil(n);
cudaMalloc((void**)&buf, padded * sizeof(int));
cudaMemcpy(buf, idata, n * sizeof(int), cudaMemcpyHostToDevice);
int offset;
int fullBlocksPerGrid = 0;
float total = 0;
float milliseconds = 0;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
for (int i = 1; i <= ilog2(padded); i++) {
fullBlocksPerGrid = ((padded >> i) + blockSize - 1) / blockSize;
kernUpSweep << <fullBlocksPerGrid, blockSize >> >(padded, i, buf);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
cudaMemset(buf + padded - 1, 0, sizeof(int));
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
for (int i = ilog2(padded); i >= 1; i--) {
fullBlocksPerGrid = ((padded >> i) + blockSize - 1) / blockSize;
kernDownSweep << <fullBlocksPerGrid, blockSize >> >(padded, i, buf);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
printf("Work-Efficient scan: %f ms\n", total);
cudaMemcpy(odata, buf, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(buf);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int *bools, *indices, *in, *out;
cudaMalloc((void**)&bools, n * sizeof(int));
cudaMalloc((void**)&indices, n * sizeof(int));
cudaMalloc((void**)&in, n * sizeof(int));
cudaMalloc((void**)&out, n * sizeof(int));
cudaMemcpy(in, idata, n * sizeof(int), cudaMemcpyHostToDevice);
float total = 0;
float milliseconds = 0;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(n, bools, in);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
cudaMemcpy(odata, bools, n * sizeof(int), cudaMemcpyDeviceToHost);
scan(n, odata, odata);
int lenCompacted = odata[n - 1];
cudaMemcpy(indices, odata, n * sizeof(int), cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(n, out, in, bools, indices);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&milliseconds, start, end);
total += milliseconds;
printf("Work-Efficient Compact: %f ms\n", total);
cudaMemcpy(odata, out, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(bools);
cudaFree(indices);
cudaFree(in);
cudaFree(out);
return lenCompacted;
}
}
}
|
ec638e25e9079c9a14f8a256a05c9db630872d45.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include "sample-common.h"
#define LENGTH 1024
void sumArrayOnHost(float* A, float* B, float* C, int size){
for(int idx = 0; idx < size; idx++){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArrayOnGpu(float* A, float* B, float* C, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
// printf("blockIdx.x = %d, blockDim.x = %d, threadIdx.x = %d, value = %d\n",
// blockIdx.x,
// blockDim.x,
// threadIdx.x,
// i
// );
if(i < size){
C[i] = A[i] + B[i];
}
}
void testAtHost(float* h_A, float* h_B, float* hostRef, float* gpuRef, int size){
double iStart, iElaps;
iStart = cpuSecond();
sumArrayOnHost(h_A, h_B, hostRef, size);
iElaps = cpuSecond() - iStart;
printf("sumArrayOnHost Time elapsed %f sec\n", iElaps);
}
void testAtGpu(float* h_A, float* h_B, float* gpuRef, int size){
float *d_A, *d_B, *d_C;
int nBytes = size * sizeof(float);
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice));
double iStart, iElasp;
dim3 block(LENGTH);
dim3 grid((size + block.x -1) / block.x);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumArrayOnGpu), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, size);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
iElasp = cpuSecond() - iStart;
printf("<<<grid %d, block %d>>> %f sec\n", grid.x, block.x, iElasp);
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
}
int checkResult(float* A, float* B, int size){
for(int i = 0;i < size; i++){
if(A[i] != B[i]){
return i;
}
}
return 0;
}
int main(int argc, char* argv[]){
printf("%s starting ..\n", argv[0]);
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s,\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
float *h_A, *h_B, *gpuRef, *hostRef;
// init(h_A, h_B, gpuRef, hostRef, nElem);
size_t nBytes = nElem * sizeof(float);
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
hostRef = (float*)malloc(nBytes);
gpuRef = (float*)malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
clearData(hostRef, nElem);
clearData(gpuRef, nElem);
testAtGpu(h_A, h_B, gpuRef, nElem);
testAtHost(h_A, h_B, hostRef, gpuRef, nElem);
int nRet = checkResult(hostRef, gpuRef, nElem);
if( nRet != 0 ){
printf("check result error %d\n", nRet);
}
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
| ec638e25e9079c9a14f8a256a05c9db630872d45.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include "sample-common.h"
#define LENGTH 1024
void sumArrayOnHost(float* A, float* B, float* C, int size){
for(int idx = 0; idx < size; idx++){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArrayOnGpu(float* A, float* B, float* C, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
// printf("blockIdx.x = %d, blockDim.x = %d, threadIdx.x = %d, value = %d\n",
// blockIdx.x,
// blockDim.x,
// threadIdx.x,
// i
// );
if(i < size){
C[i] = A[i] + B[i];
}
}
void testAtHost(float* h_A, float* h_B, float* hostRef, float* gpuRef, int size){
double iStart, iElaps;
iStart = cpuSecond();
sumArrayOnHost(h_A, h_B, hostRef, size);
iElaps = cpuSecond() - iStart;
printf("sumArrayOnHost Time elapsed %f sec\n", iElaps);
}
void testAtGpu(float* h_A, float* h_B, float* gpuRef, int size){
float *d_A, *d_B, *d_C;
int nBytes = size * sizeof(float);
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
double iStart, iElasp;
dim3 block(LENGTH);
dim3 grid((size + block.x -1) / block.x);
iStart = cpuSecond();
sumArrayOnGpu<<<grid, block>>>(d_A, d_B, d_C, size);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
iElasp = cpuSecond() - iStart;
printf("<<<grid %d, block %d>>> %f sec\n", grid.x, block.x, iElasp);
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
}
int checkResult(float* A, float* B, int size){
for(int i = 0;i < size; i++){
if(A[i] != B[i]){
return i;
}
}
return 0;
}
int main(int argc, char* argv[]){
printf("%s starting ..\n", argv[0]);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s,\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
float *h_A, *h_B, *gpuRef, *hostRef;
// init(h_A, h_B, gpuRef, hostRef, nElem);
size_t nBytes = nElem * sizeof(float);
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
hostRef = (float*)malloc(nBytes);
gpuRef = (float*)malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
clearData(hostRef, nElem);
clearData(gpuRef, nElem);
testAtGpu(h_A, h_B, gpuRef, nElem);
testAtHost(h_A, h_B, hostRef, gpuRef, nElem);
int nRet = checkResult(hostRef, gpuRef, nElem);
if( nRet != 0 ){
printf("check result error %d\n", nRet);
}
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
|
27b6b5d7b6cdee4b4fef2978731d736be8ce13ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
float color = 0.0f;
for (int f_y = 0; f_y < filterWidth; ++f_y) {
for (int f_x = 0; f_x < filterWidth; ++f_x) {
int c_x = thread_2D_pos.x + f_x - filterWidth / 2;
int c_y = thread_2D_pos.y + f_y - filterWidth / 2;
c_x = min(max(0, c_x), numCols - 1);
c_y = min(max(0, c_y), numRows - 1);
float filter_value = filter[f_y*filterWidth + f_x];
color += filter_value*static_cast<float>(inputChannel[c_y*numCols+c_x]);
}
}
outputChannel[thread_1D_pos] = color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int blockWidth = 32;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockWidth, blockWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockWidth + 1, numRows/blockWidth + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 27b6b5d7b6cdee4b4fef2978731d736be8ce13ac.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
float color = 0.0f;
for (int f_y = 0; f_y < filterWidth; ++f_y) {
for (int f_x = 0; f_x < filterWidth; ++f_x) {
int c_x = thread_2D_pos.x + f_x - filterWidth / 2;
int c_y = thread_2D_pos.y + f_y - filterWidth / 2;
c_x = min(max(0, c_x), numCols - 1);
c_y = min(max(0, c_y), numRows - 1);
float filter_value = filter[f_y*filterWidth + f_x];
color += filter_value*static_cast<float>(inputChannel[c_y*numCols+c_x]);
}
}
outputChannel[thread_1D_pos] = color;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int blockWidth = 32;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockWidth, blockWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols/blockWidth + 1, numRows/blockWidth + 1, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
1ee703c816fa854fedb0f247213a7354e9146d7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaKernels.h"
#define NUM_THREADS 256
static __constant__ CudaInputElem constInput[MAX_ATTR_NUM];
static __device__ bool cuda_strcmp(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*s1=='\0') return true;
}
return false;
}
static __device__ bool cuda_prefix(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*(s2+1)=='\0') return true;
}
return false;
}
static __device__ bool cuda_substr(char *s1, char *s2) {
int size1 = 0;
int size2 = 0;
while (s1[size1]!='\0') size1++;
while (s2[size2]!='\0') size2++;
if (size1==size2) return cuda_strcmp(s1, s2);
if (size1<size2) return false;
for (int i=0; i<size1-size2+1; i++) {
bool failed = false;
for (int j=0; j<size2; j++) {
if (s1[i+j-1]!=s2[j]) {
failed = true;
break;
}
}
if (! failed) return true;
}
return false;
}
static __global__ void cleanCounters(unsigned char *filtersCount, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
// initialize interfaces and filtersCount
if (pos<numInterfaces) interfaces[pos] = 0;
while(pos<numFilters) {
filtersCount[pos] = 0;
pos = pos + gridDim.x*blockDim.x;
}
}
static __global__ void evalConstraint(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces, int attributeIdx) {
int constraintsIndex = blockIdx.x*blockDim.x+threadIdx.x;
if (constraintsIndex>=constInput[attributeIdx].numConstraints) return;
CudaInputElem inputElem = constInput[attributeIdx];
CudaValue val = inputElem.value;
Op constrOp = inputElem.constrOp[constraintsIndex];
if (val.type==INT) {
IntCudaConstraint constrVal = ((IntCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && val.intVal!=constrVal.value) ||
(constrOp==LT && val.intVal>=constrVal.value) ||
(constrOp==GT && val.intVal<=constrVal.value) ||
(constrOp==DF && val.intVal==constrVal.value)) return;
} else {
StringCudaConstraint constrVal = ((StringCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && !cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==DF && cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==PF && !cuda_prefix(val.stringVal, constrVal.value)) ||
(constrOp==IN && !cuda_substr(val.stringVal, constrVal.value))) return;
}
int filterIndex = inputElem.filterIdx[constraintsIndex];
filtersCount[filterIndex]++;
}
static __global__ void summarize(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
while(pos<numFilters) {
if (filtersCount[pos]==filterInfo[pos].numConstraints) {
interfaces[filterInfo[pos].interface] = 1;
}
pos = pos + gridDim.x*blockDim.x;
}
}
CudaKernels::CudaKernels() {
numInterfaces = 0;
numFilters = 0;
consolidated = false;
hostToDeviceCopyTime = 0;
execTime = 0;
deviceToHostCopyTime = 0;
}
CudaKernels::~CudaKernels() {
if (consolidated) {
for (map<string_t, void *>::iterator it=nameDeviceConstrVal.begin(); it!=nameDeviceConstrVal.end(); ++it) {
void *constrPtr = it->second;
hipFree(constrPtr);
}
for (map<string_t, Op *>::iterator it=nameDeviceConstrOp.begin(); it!=nameDeviceConstrOp.end(); ++it) {
Op *constrPtr = it->second;
hipFree(constrPtr);
}
for (map<string_t, int *>::iterator it=nameDeviceFilterIdx.begin(); it!=nameDeviceFilterIdx.end(); ++it) {
int *filterIdxPtr = it->second;
hipFree(filterIdxPtr);
}
hipHostFree(hostInput);
hipFree(currentFiltersCount);
hipFree(filtersInfo);
hipFree(interfacesDevice);
hipHostFree(interfacesHost);
}
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
delete filter;
}
}
}
void CudaKernels::ifConfig(int interfaceId, set<CudaFilter *> &filters) {
// record the set of filters associated to this interface
hostFilters.insert(make_pair(interfaceId, filters));
// update the numConstraints and nameType data structures (to be used at consolidate time)
for (set<CudaFilter *>::iterator it=filters.begin(); it!=filters.end(); ++it) {
CudaFilter *filter = *it;
for (int i=0; i<filter->numConstraints; i++) {
string_t nameStr = filter->constraints[i].name;
map<string_t, int>::iterator it=numConstraints.find(nameStr);
if (it==numConstraints.end()) {
numConstraints.insert(make_pair(nameStr, 1));
} else {
it->second++;
}
map<string_t, Type>::iterator it1=nameType.find(nameStr);
if (it1==nameType.end()) {
nameType.insert(make_pair(nameStr, filter->constraints[i].value.type));
}
}
numFilters++;
}
}
void CudaKernels::consolidate() {
// allocate memory on device and host
int e = 0;
int allocSize = 0;
numInterfaces = hostFilters.size();
allocSize += sizeof(CudaInputElem)*MAX_ATTR_NUM; // allocated into constant memory (see static variable at the beginning of file)
e += hipHostMalloc((void**) &hostInput, (size_t) sizeof(CudaInputElem)*MAX_ATTR_NUM);
e += hipMalloc((void**) &interfacesDevice, (size_t) sizeof(unsigned char)*numInterfaces);
allocSize += sizeof(unsigned char)*numInterfaces;
e += hipHostMalloc((void**) &interfacesHost, (size_t) sizeof(unsigned char)*numInterfaces);
map<string_t, int> currentNumConstraints;
map<string_t, void *> nameHostConstrVal;
map<string_t, Op *> nameHostConstrOp;
map<string_t, int *> nameHostFilterIdx;
for (map<string_t, int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
string_t name = it->first;
int num = it->second;
void *constrValPtr, *hostConstrValPtr;
if(nameType[name]==INT) {
e += hipMalloc((void**) &constrValPtr, (size_t) sizeof(IntCudaConstraint)*num);
hostConstrValPtr = malloc(sizeof(IntCudaConstraint)*num);
allocSize += sizeof(IntCudaConstraint)*num;
} else {
e += hipMalloc((void**) &constrValPtr, (size_t) sizeof(StringCudaConstraint)*num);
hostConstrValPtr = malloc(sizeof(StringCudaConstraint)*num);
allocSize += sizeof(StringCudaConstraint)*num;
}
nameDeviceConstrVal.insert(make_pair(name, constrValPtr));
nameHostConstrVal.insert(make_pair(name, hostConstrValPtr));
Op *constrOpPtr, *hostConstrOpPtr;
e+= hipMalloc((void**) &constrOpPtr, (size_t) sizeof(Op)*num);
hostConstrOpPtr = (Op *)malloc(sizeof(Op)*num);
allocSize += sizeof(Op)*num;
nameDeviceConstrOp.insert(make_pair(name, constrOpPtr));
nameHostConstrOp.insert(make_pair(name, hostConstrOpPtr));
currentNumConstraints.insert(make_pair(name, 0));
int *filterIdxPtr, *hostFilterIdxPtr;
e+= hipMalloc((void**) &filterIdxPtr, (size_t) sizeof(int)*num);
hostFilterIdxPtr = (int *)malloc(sizeof(int)*num);
allocSize += sizeof(int)*num;
nameDeviceFilterIdx.insert(make_pair(name, filterIdxPtr));
nameHostFilterIdx.insert(make_pair(name, hostFilterIdxPtr));
}
e += hipMalloc((void**) ¤tFiltersCount, (size_t) sizeof(unsigned char)*numFilters);
allocSize += sizeof(unsigned char)*numFilters;
e += hipMalloc((void**) &filtersInfo, (size_t) sizeof(FilterInfo)*numFilters);
allocSize += sizeof(FilterInfo)*numFilters;
if (e>0) {
cerr << " Allocation error " << e << endl;
exit(1);
}
// initialize the nameHostConstrVal, nameHostConstrOp, nameHostFilterIdx, and hostFiltersInfo structures (to be copied into the corresponding structures in device later)
int filterId = 0;
FilterInfo *hostFiltersInfo = (FilterInfo *) malloc(sizeof(FilterInfo)*numFilters);
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
int interfaceId = it->first;
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
for (int i=0; i<filter->numConstraints; i++) {
string_t name = filter->constraints[i].name;
int writingIndex = currentNumConstraints[name];
currentNumConstraints[name] = writingIndex+1;
Op *hostConstrOpPtr = nameHostConstrOp[name];
hostConstrOpPtr[writingIndex] = filter->constraints[i].op;
if(nameType[name]==INT) {
IntCudaConstraint *hostConstrValPtr = (IntCudaConstraint *)nameHostConstrVal[name];
hostConstrValPtr[writingIndex].value = filter->constraints[i].value.intVal;
} else {
StringCudaConstraint *hostConstrValPtr = (StringCudaConstraint *)nameHostConstrVal[name];
memcpy(hostConstrValPtr[writingIndex].value, filter->constraints[i].value.stringVal, STRING_VAL_LEN);
}
int *hostFilterIdxPtr = nameHostFilterIdx[name];
hostFilterIdxPtr[writingIndex] = filterId;
}
hostFiltersInfo[filterId].numConstraints = filter->numConstraints;
hostFiltersInfo[filterId].interface = interfaceId;
filterId++;
}
}
// initialize the device memory
for (map<string_t, void *>::iterator it=nameHostConstrVal.begin(); it!=nameHostConstrVal.end(); ++it) {
string_t name = it->first;
void *host = it->second;
void *device = nameDeviceConstrVal[name];
int size = numConstraints[name];
if(nameType[name]==INT) {
e += hipMemcpy(device, host, sizeof(IntCudaConstraint)*size, hipMemcpyHostToDevice);
} else {
e += hipMemcpy(device, host, sizeof(StringCudaConstraint)*size, hipMemcpyHostToDevice);
}
hipDeviceSynchronize();
free(host);
}
for (map<string_t, Op *>::iterator it=nameHostConstrOp.begin(); it!=nameHostConstrOp.end(); ++it) {
string_t name = it->first;
Op *host = it->second;
Op *device = nameDeviceConstrOp[name];
int size = numConstraints[name];
e += hipMemcpy(device, host, sizeof(Op)*size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
free(host);
}
for (map<string_t, int *>::iterator it=nameHostFilterIdx.begin(); it!=nameHostFilterIdx.end(); ++it) {
string_t name = it->first;
int *host = it->second;
int *device = nameDeviceFilterIdx[name];
int size = numConstraints[name];
e += hipMemcpy(device, host, sizeof(int)*size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
free(host);
}
e += hipMemcpy(filtersInfo, hostFiltersInfo, (size_t) sizeof(FilterInfo)*numFilters, hipMemcpyHostToDevice);
hipMemset(currentFiltersCount, 0, (size_t) sizeof(unsigned char)*numFilters);
hipMemset(interfacesDevice, 0, (size_t) sizeof(unsigned char)*numInterfaces);
hipDeviceSynchronize();
consolidated = true;
if (e>0) {
cerr << " Memcpy error " << e << " during consolidation " << endl;
exit(1);
}
free(hostFiltersInfo);
// set up the runtime to optimize performance
//hipFuncSetCacheConfig(evalConstraint, hipFuncCachePreferL1);
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
int totConstr=0;
for(map<string_t,int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
totConstr+=it->second;
}
cout << endl << " ### " << totConstr << " constraints allocated ### " << endl;
cout << endl << " ### " << allocSize << " bytes allocated on device ### " << endl;
cout << endl << "#####################" << endl;
}
void CudaKernels::getStats(double &hToD, double &exec, double &dToH) {
hToD = hostToDeviceCopyTime;
exec = execTime;
dToH = deviceToHostCopyTime;
}
#if STATS==1
void CudaKernels::processMessage(CudaOutbox *outbox) {
Timer t;
t.start();
int maxConstr = copyMsgToDevice(outbox->message);
//hipDeviceSynchronize(); // TODO: remove
hostToDeviceCopyTime += t.stop();
if (maxConstr>0) {
t.start();
computeResults(maxConstr);
//hipDeviceSynchronize(); // TODO: remove
execTime += t.stop();
t.start();
getMatchingInterfaces(outbox->outgoingInterfaces);
//hipDeviceSynchronize(); // TODO: remove
deviceToHostCopyTime += t.stop();
}
}
#elif STATS==0
void CudaKernels::processMessage(CudaOutbox *outbox) {
int maxConstr = copyMsgToDevice(outbox->message);
if (maxConstr>0) {
computeResults(maxConstr);
getMatchingInterfaces(outbox->outgoingInterfaces);
}
}
#endif
int CudaKernels::copyMsgToDevice(CudaMessage *message) {
int dest = 0;
int maxConstr = 0;
for (int i=0; i<message->numAttributes; i++) {
string_t name = message->attributes[i].name;
map<string_t, void *>::iterator it = nameDeviceConstrVal.find(name);
if(it==nameDeviceConstrVal.end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dest].constrVal = it->second;
map<string_t, Op *>::iterator it1 = nameDeviceConstrOp.find(name);
if(it1==nameDeviceConstrOp.end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dest].constrOp = it1->second;
map<string_t, int *>::iterator it2 = nameDeviceFilterIdx.find(name);
if(it2==nameDeviceFilterIdx.end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dest].filterIdx = it2->second;
hostInput[dest].numConstraints = numConstraints[name];
if (hostInput[dest].numConstraints>maxConstr) maxConstr = hostInput[dest].numConstraints;
hostInput[dest].value = message->attributes[i].value;
dest++;
}
numValues = dest;
if (dest>0) {
int e = 0;
e += hipMemcpyToSymbolAsync(constInput, hostInput, (size_t) sizeof(CudaInputElem)*numValues);
if (e>0) {
cerr << " Memcpy error " << e << " during message processing " << endl;
exit(1);
}
}
return maxConstr;
}
void CudaKernels::computeResults(int maxConstr) {
//int numBlocksX = 1+maxConstr/NUM_THREADS;
//dim3 numBlocks = dim3(numBlocksX);
for(int i=0; i<numValues; i++) {
hipLaunchKernelGGL(( evalConstraint), dim3(hostInput[i].numConstraints/NUM_THREADS+1), dim3(NUM_THREADS), 0, 0, currentFiltersCount, filtersInfo, interfacesDevice, numFilters, numInterfaces, i);
}
hipLaunchKernelGGL(( summarize), dim3(numFilters/2048), dim3(NUM_THREADS), 0, 0, currentFiltersCount, filtersInfo, interfacesDevice, numFilters, numInterfaces);
}
void CudaKernels::getMatchingInterfaces(set<int> &results) {
int e = hipMemcpyAsync(interfacesHost, interfacesDevice, (size_t) sizeof(unsigned char)*numInterfaces, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipMemsetAsync(currentFiltersCount, 0, (size_t) sizeof(unsigned char)*numFilters);
hipMemsetAsync(interfacesDevice, 0, (size_t) sizeof(unsigned char)*numInterfaces);
//cleanCounters<<<numFilters/2048, NUM_THREADS>>>(currentFiltersCount, interfacesDevice, numFilters, numInterfaces);
if (e>0) {
cerr << " Memcpy error " << e << " while copying matching interfaces " << endl;
exit(1);
}
for (int i=0; i<numInterfaces; i++) {
if (interfacesHost[i]!=0) {
results.insert(i);
}
}
}
| 1ee703c816fa854fedb0f247213a7354e9146d7a.cu | #include "CudaKernels.h"
#define NUM_THREADS 256
static __constant__ CudaInputElem constInput[MAX_ATTR_NUM];
static __device__ bool cuda_strcmp(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*s1=='\0') return true;
}
return false;
}
static __device__ bool cuda_prefix(char *s1, char *s2) {
for ( ; *s1==*s2; ++s1, ++s2) {
if (*(s2+1)=='\0') return true;
}
return false;
}
static __device__ bool cuda_substr(char *s1, char *s2) {
int size1 = 0;
int size2 = 0;
while (s1[size1]!='\0') size1++;
while (s2[size2]!='\0') size2++;
if (size1==size2) return cuda_strcmp(s1, s2);
if (size1<size2) return false;
for (int i=0; i<size1-size2+1; i++) {
bool failed = false;
for (int j=0; j<size2; j++) {
if (s1[i+j-1]!=s2[j]) {
failed = true;
break;
}
}
if (! failed) return true;
}
return false;
}
static __global__ void cleanCounters(unsigned char *filtersCount, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
// initialize interfaces and filtersCount
if (pos<numInterfaces) interfaces[pos] = 0;
while(pos<numFilters) {
filtersCount[pos] = 0;
pos = pos + gridDim.x*blockDim.x;
}
}
static __global__ void evalConstraint(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces, int attributeIdx) {
int constraintsIndex = blockIdx.x*blockDim.x+threadIdx.x;
if (constraintsIndex>=constInput[attributeIdx].numConstraints) return;
CudaInputElem inputElem = constInput[attributeIdx];
CudaValue val = inputElem.value;
Op constrOp = inputElem.constrOp[constraintsIndex];
if (val.type==INT) {
IntCudaConstraint constrVal = ((IntCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && val.intVal!=constrVal.value) ||
(constrOp==LT && val.intVal>=constrVal.value) ||
(constrOp==GT && val.intVal<=constrVal.value) ||
(constrOp==DF && val.intVal==constrVal.value)) return;
} else {
StringCudaConstraint constrVal = ((StringCudaConstraint *)inputElem.constrVal)[constraintsIndex];
if ((constrOp==EQ && !cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==DF && cuda_strcmp(val.stringVal, constrVal.value)) ||
(constrOp==PF && !cuda_prefix(val.stringVal, constrVal.value)) ||
(constrOp==IN && !cuda_substr(val.stringVal, constrVal.value))) return;
}
int filterIndex = inputElem.filterIdx[constraintsIndex];
filtersCount[filterIndex]++;
}
static __global__ void summarize(unsigned char *filtersCount, const FilterInfo *filterInfo, unsigned char *interfaces, const int numFilters, const int numInterfaces) {
int pos = blockIdx.x*blockDim.x+threadIdx.x;
while(pos<numFilters) {
if (filtersCount[pos]==filterInfo[pos].numConstraints) {
interfaces[filterInfo[pos].interface] = 1;
}
pos = pos + gridDim.x*blockDim.x;
}
}
CudaKernels::CudaKernels() {
numInterfaces = 0;
numFilters = 0;
consolidated = false;
hostToDeviceCopyTime = 0;
execTime = 0;
deviceToHostCopyTime = 0;
}
CudaKernels::~CudaKernels() {
if (consolidated) {
for (map<string_t, void *>::iterator it=nameDeviceConstrVal.begin(); it!=nameDeviceConstrVal.end(); ++it) {
void *constrPtr = it->second;
cudaFree(constrPtr);
}
for (map<string_t, Op *>::iterator it=nameDeviceConstrOp.begin(); it!=nameDeviceConstrOp.end(); ++it) {
Op *constrPtr = it->second;
cudaFree(constrPtr);
}
for (map<string_t, int *>::iterator it=nameDeviceFilterIdx.begin(); it!=nameDeviceFilterIdx.end(); ++it) {
int *filterIdxPtr = it->second;
cudaFree(filterIdxPtr);
}
cudaFreeHost(hostInput);
cudaFree(currentFiltersCount);
cudaFree(filtersInfo);
cudaFree(interfacesDevice);
cudaFreeHost(interfacesHost);
}
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
delete filter;
}
}
}
void CudaKernels::ifConfig(int interfaceId, set<CudaFilter *> &filters) {
// record the set of filters associated to this interface
hostFilters.insert(make_pair(interfaceId, filters));
// update the numConstraints and nameType data structures (to be used at consolidate time)
for (set<CudaFilter *>::iterator it=filters.begin(); it!=filters.end(); ++it) {
CudaFilter *filter = *it;
for (int i=0; i<filter->numConstraints; i++) {
string_t nameStr = filter->constraints[i].name;
map<string_t, int>::iterator it=numConstraints.find(nameStr);
if (it==numConstraints.end()) {
numConstraints.insert(make_pair(nameStr, 1));
} else {
it->second++;
}
map<string_t, Type>::iterator it1=nameType.find(nameStr);
if (it1==nameType.end()) {
nameType.insert(make_pair(nameStr, filter->constraints[i].value.type));
}
}
numFilters++;
}
}
void CudaKernels::consolidate() {
// allocate memory on device and host
int e = 0;
int allocSize = 0;
numInterfaces = hostFilters.size();
allocSize += sizeof(CudaInputElem)*MAX_ATTR_NUM; // allocated into constant memory (see static variable at the beginning of file)
e += cudaMallocHost((void**) &hostInput, (size_t) sizeof(CudaInputElem)*MAX_ATTR_NUM);
e += cudaMalloc((void**) &interfacesDevice, (size_t) sizeof(unsigned char)*numInterfaces);
allocSize += sizeof(unsigned char)*numInterfaces;
e += cudaMallocHost((void**) &interfacesHost, (size_t) sizeof(unsigned char)*numInterfaces);
map<string_t, int> currentNumConstraints;
map<string_t, void *> nameHostConstrVal;
map<string_t, Op *> nameHostConstrOp;
map<string_t, int *> nameHostFilterIdx;
for (map<string_t, int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
string_t name = it->first;
int num = it->second;
void *constrValPtr, *hostConstrValPtr;
if(nameType[name]==INT) {
e += cudaMalloc((void**) &constrValPtr, (size_t) sizeof(IntCudaConstraint)*num);
hostConstrValPtr = malloc(sizeof(IntCudaConstraint)*num);
allocSize += sizeof(IntCudaConstraint)*num;
} else {
e += cudaMalloc((void**) &constrValPtr, (size_t) sizeof(StringCudaConstraint)*num);
hostConstrValPtr = malloc(sizeof(StringCudaConstraint)*num);
allocSize += sizeof(StringCudaConstraint)*num;
}
nameDeviceConstrVal.insert(make_pair(name, constrValPtr));
nameHostConstrVal.insert(make_pair(name, hostConstrValPtr));
Op *constrOpPtr, *hostConstrOpPtr;
e+= cudaMalloc((void**) &constrOpPtr, (size_t) sizeof(Op)*num);
hostConstrOpPtr = (Op *)malloc(sizeof(Op)*num);
allocSize += sizeof(Op)*num;
nameDeviceConstrOp.insert(make_pair(name, constrOpPtr));
nameHostConstrOp.insert(make_pair(name, hostConstrOpPtr));
currentNumConstraints.insert(make_pair(name, 0));
int *filterIdxPtr, *hostFilterIdxPtr;
e+= cudaMalloc((void**) &filterIdxPtr, (size_t) sizeof(int)*num);
hostFilterIdxPtr = (int *)malloc(sizeof(int)*num);
allocSize += sizeof(int)*num;
nameDeviceFilterIdx.insert(make_pair(name, filterIdxPtr));
nameHostFilterIdx.insert(make_pair(name, hostFilterIdxPtr));
}
e += cudaMalloc((void**) ¤tFiltersCount, (size_t) sizeof(unsigned char)*numFilters);
allocSize += sizeof(unsigned char)*numFilters;
e += cudaMalloc((void**) &filtersInfo, (size_t) sizeof(FilterInfo)*numFilters);
allocSize += sizeof(FilterInfo)*numFilters;
if (e>0) {
cerr << " Allocation error " << e << endl;
exit(1);
}
// initialize the nameHostConstrVal, nameHostConstrOp, nameHostFilterIdx, and hostFiltersInfo structures (to be copied into the corresponding structures in device later)
int filterId = 0;
FilterInfo *hostFiltersInfo = (FilterInfo *) malloc(sizeof(FilterInfo)*numFilters);
for (map<int, set<CudaFilter *> >::iterator it=hostFilters.begin(); it!=hostFilters.end(); ++it) {
int interfaceId = it->first;
for (set<CudaFilter *>::iterator it2=it->second.begin(); it2!=it->second.end(); ++it2) {
CudaFilter *filter = *it2;
for (int i=0; i<filter->numConstraints; i++) {
string_t name = filter->constraints[i].name;
int writingIndex = currentNumConstraints[name];
currentNumConstraints[name] = writingIndex+1;
Op *hostConstrOpPtr = nameHostConstrOp[name];
hostConstrOpPtr[writingIndex] = filter->constraints[i].op;
if(nameType[name]==INT) {
IntCudaConstraint *hostConstrValPtr = (IntCudaConstraint *)nameHostConstrVal[name];
hostConstrValPtr[writingIndex].value = filter->constraints[i].value.intVal;
} else {
StringCudaConstraint *hostConstrValPtr = (StringCudaConstraint *)nameHostConstrVal[name];
memcpy(hostConstrValPtr[writingIndex].value, filter->constraints[i].value.stringVal, STRING_VAL_LEN);
}
int *hostFilterIdxPtr = nameHostFilterIdx[name];
hostFilterIdxPtr[writingIndex] = filterId;
}
hostFiltersInfo[filterId].numConstraints = filter->numConstraints;
hostFiltersInfo[filterId].interface = interfaceId;
filterId++;
}
}
// initialize the device memory
for (map<string_t, void *>::iterator it=nameHostConstrVal.begin(); it!=nameHostConstrVal.end(); ++it) {
string_t name = it->first;
void *host = it->second;
void *device = nameDeviceConstrVal[name];
int size = numConstraints[name];
if(nameType[name]==INT) {
e += cudaMemcpy(device, host, sizeof(IntCudaConstraint)*size, cudaMemcpyHostToDevice);
} else {
e += cudaMemcpy(device, host, sizeof(StringCudaConstraint)*size, cudaMemcpyHostToDevice);
}
cudaDeviceSynchronize();
free(host);
}
for (map<string_t, Op *>::iterator it=nameHostConstrOp.begin(); it!=nameHostConstrOp.end(); ++it) {
string_t name = it->first;
Op *host = it->second;
Op *device = nameDeviceConstrOp[name];
int size = numConstraints[name];
e += cudaMemcpy(device, host, sizeof(Op)*size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
free(host);
}
for (map<string_t, int *>::iterator it=nameHostFilterIdx.begin(); it!=nameHostFilterIdx.end(); ++it) {
string_t name = it->first;
int *host = it->second;
int *device = nameDeviceFilterIdx[name];
int size = numConstraints[name];
e += cudaMemcpy(device, host, sizeof(int)*size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
free(host);
}
e += cudaMemcpy(filtersInfo, hostFiltersInfo, (size_t) sizeof(FilterInfo)*numFilters, cudaMemcpyHostToDevice);
cudaMemset(currentFiltersCount, 0, (size_t) sizeof(unsigned char)*numFilters);
cudaMemset(interfacesDevice, 0, (size_t) sizeof(unsigned char)*numInterfaces);
cudaDeviceSynchronize();
consolidated = true;
if (e>0) {
cerr << " Memcpy error " << e << " during consolidation " << endl;
exit(1);
}
free(hostFiltersInfo);
// set up the runtime to optimize performance
//cudaFuncSetCacheConfig(evalConstraint, cudaFuncCachePreferL1);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
int totConstr=0;
for(map<string_t,int>::iterator it=numConstraints.begin(); it!=numConstraints.end(); ++it) {
totConstr+=it->second;
}
cout << endl << " ### " << totConstr << " constraints allocated ### " << endl;
cout << endl << " ### " << allocSize << " bytes allocated on device ### " << endl;
cout << endl << "#####################" << endl;
}
void CudaKernels::getStats(double &hToD, double &exec, double &dToH) {
hToD = hostToDeviceCopyTime;
exec = execTime;
dToH = deviceToHostCopyTime;
}
#if STATS==1
void CudaKernels::processMessage(CudaOutbox *outbox) {
Timer t;
t.start();
int maxConstr = copyMsgToDevice(outbox->message);
//cudaDeviceSynchronize(); // TODO: remove
hostToDeviceCopyTime += t.stop();
if (maxConstr>0) {
t.start();
computeResults(maxConstr);
//cudaDeviceSynchronize(); // TODO: remove
execTime += t.stop();
t.start();
getMatchingInterfaces(outbox->outgoingInterfaces);
//cudaDeviceSynchronize(); // TODO: remove
deviceToHostCopyTime += t.stop();
}
}
#elif STATS==0
void CudaKernels::processMessage(CudaOutbox *outbox) {
int maxConstr = copyMsgToDevice(outbox->message);
if (maxConstr>0) {
computeResults(maxConstr);
getMatchingInterfaces(outbox->outgoingInterfaces);
}
}
#endif
int CudaKernels::copyMsgToDevice(CudaMessage *message) {
int dest = 0;
int maxConstr = 0;
for (int i=0; i<message->numAttributes; i++) {
string_t name = message->attributes[i].name;
map<string_t, void *>::iterator it = nameDeviceConstrVal.find(name);
if(it==nameDeviceConstrVal.end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dest].constrVal = it->second;
map<string_t, Op *>::iterator it1 = nameDeviceConstrOp.find(name);
if(it1==nameDeviceConstrOp.end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dest].constrOp = it1->second;
map<string_t, int *>::iterator it2 = nameDeviceFilterIdx.find(name);
if(it2==nameDeviceFilterIdx.end()) {
cerr << "Name: ";
for(int i=0; i<name.length(); i++) cerr << name[i];
cerr << " not found during message processing" << endl;
exit(1);
}
hostInput[dest].filterIdx = it2->second;
hostInput[dest].numConstraints = numConstraints[name];
if (hostInput[dest].numConstraints>maxConstr) maxConstr = hostInput[dest].numConstraints;
hostInput[dest].value = message->attributes[i].value;
dest++;
}
numValues = dest;
if (dest>0) {
int e = 0;
e += cudaMemcpyToSymbolAsync(constInput, hostInput, (size_t) sizeof(CudaInputElem)*numValues);
if (e>0) {
cerr << " Memcpy error " << e << " during message processing " << endl;
exit(1);
}
}
return maxConstr;
}
void CudaKernels::computeResults(int maxConstr) {
//int numBlocksX = 1+maxConstr/NUM_THREADS;
//dim3 numBlocks = dim3(numBlocksX);
for(int i=0; i<numValues; i++) {
evalConstraint<<<hostInput[i].numConstraints/NUM_THREADS+1, NUM_THREADS>>>(currentFiltersCount, filtersInfo, interfacesDevice, numFilters, numInterfaces, i);
}
summarize<<<numFilters/2048, NUM_THREADS>>>(currentFiltersCount, filtersInfo, interfacesDevice, numFilters, numInterfaces);
}
void CudaKernels::getMatchingInterfaces(set<int> &results) {
int e = cudaMemcpyAsync(interfacesHost, interfacesDevice, (size_t) sizeof(unsigned char)*numInterfaces, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaMemsetAsync(currentFiltersCount, 0, (size_t) sizeof(unsigned char)*numFilters);
cudaMemsetAsync(interfacesDevice, 0, (size_t) sizeof(unsigned char)*numInterfaces);
//cleanCounters<<<numFilters/2048, NUM_THREADS>>>(currentFiltersCount, interfacesDevice, numFilters, numInterfaces);
if (e>0) {
cerr << " Memcpy error " << e << " while copying matching interfaces " << endl;
exit(1);
}
for (int i=0; i<numInterfaces; i++) {
if (interfacesHost[i]!=0) {
results.insert(i);
}
}
}
|
ec5dad281482d3eacbeae034decb6e49b64f4b91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* 2 : 1) ; 2) , .
.
. , . */
/* , , */
#include <iostream>
#define N 16 //shortest dimension of A: 32
#define M 2*(102400*8) // 1
// : 4*819200*32/18000,000,000 (1,8e10 = DeviceToDevice*2 to GB)
using namespace std;
#define CHECK(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
cout<< "Error:" << hipGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
__global__ void Multiply(int *A, int *B, int *C){
// calculate the row & col index of the element
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= N)
return;
int result = 0;
// product between row of a and b
for(int k = 0; k < M; ++k)
{
result += A[row + k*N] * B[k];
//printf("%d ", result);
}
C[row] = result;
}
__global__ void Multiply_smart_string(int *A, int *B, int *C){
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (col >= M)
return;
int dev_private = 0;
__shared__ int dev_shared;
for (int j = 0; j < M/blockDim.x; ++j)
{
int addition = A[(j*blockDim.x+threadIdx.x)*N+blockIdx.x] * B[j*blockDim.x+threadIdx.x];
dev_private += addition;
}
if (threadIdx.x == 0)
dev_shared = 0;
__syncthreads();
atomicAdd(&dev_shared, dev_private);
__syncthreads();
if (threadIdx.x == 0)
C[blockIdx.x] = dev_shared;
}
__global__ void Multiply_smart_column(int *A, int *B, int *C){
int global_id = blockIdx.x*blockDim.x + threadIdx.x;
int global_trd_cnt = blockDim.x*gridDim.x;
__shared__ int dev_shared_res[N];
int addition = 0;
if (threadIdx.x < N)
dev_shared_res[threadIdx.x] = 0;
for (int j = 0; j < M/(global_trd_cnt/N); ++j)
{
int super_global_id = global_id + j*global_trd_cnt;
int row = super_global_id % N;
int col = super_global_id / N;
addition += A[col*N + row] * B[col];
}
__syncthreads();
atomicAdd(&dev_shared_res[threadIdx.x % N], addition);
__syncthreads();
if (threadIdx.x < N)
atomicAdd(&C[threadIdx.x], dev_shared_res[threadIdx.x]);
}
int main(int argc, char **argv)
{
srand(time(NULL));
int *A = new int [N*M];
int *b = new int [M];
int *res_CPU = new int[N];
int *res_GPU = new int[N];
int i, j;
for(i = 0; i < N; ++i)
res_CPU[i] = 0;
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
{
A[i + j*N] = rand() % 10; // % 3 - 1; //1;
//cout << A[i*N + j] << " ";
}
//cout << endl;
}
//cout << endl;
for(i = 0; i < M; ++i)
{
b[i] = rand() % 10; // % 3 - 1; //1;
//cout << b[i] << " ";
}
//cout << endl;
// shared memory: t = 0..32 - warp
clock_t startCPU = clock();
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
res_CPU[i] += A[i + j*N]*b[j];
//cout << "Res_CPU[" << i << "] = " << res_CPU[i] << " " << endl;
}
double elapsedTimeCPU = (double)(clock()-startCPU)/CLOCKS_PER_SEC;
cout << "CPU product time = " << elapsedTimeCPU*1000 << " ms\n";
int (*aA), (*aB), (*aRes);
hipEvent_t startCUDA, stopCUDA;
float elapsedTimeCUDA;
hipEventCreate(&startCUDA);
hipEventCreate(&stopCUDA);
CHECK(hipMalloc((void**)&aA, (N*M)*sizeof(int)));
CHECK(hipMalloc((void**)&aB, (M)*sizeof(int)));
CHECK(hipMalloc((void**)&aRes, (N)*sizeof(int)));
CHECK(hipMemcpy(aA, A, (N*M)*sizeof(int), hipMemcpyHostToDevice));
CHECK(hipMemcpy(aB, b, (M)*sizeof(int), hipMemcpyHostToDevice));
CHECK(hipMemset(aRes, 0, (N)*sizeof(int)));
//int numBlocks = 1;
//dim3 threadsPerBlock(N,N);
hipEventRecord(startCUDA,0);
//Multiply<<<(N+511)/512, 512>>>(aA,aB,aRes);
//Multiply_smart_string<<<N, 512>>>(aA,aB,aRes);
hipLaunchKernelGGL(( Multiply_smart_column), dim3(8), dim3(1024), 0, 0, aA,aB,aRes); //N*M/1024
hipEventRecord(stopCUDA,0);
hipEventSynchronize(stopCUDA);
CHECK(hipGetLastError());
CHECK(hipMemcpy(res_GPU, aRes, N*sizeof(int), hipMemcpyDeviceToHost));
hipEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA product time = " << elapsedTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << 3*N*sizeof(float)/elapsedTimeCUDA/1024/1024/1.024 << " Gb/s\n";
for (i = 0; i < N; i++) {
//cout << "Res_GPU[" << i << "] = " << res_GPU[i] << " " << endl;
}
for (i = 0; i < N; i++) {
if (res_CPU[i] != res_GPU[i])
{
cout << "Not equal. Try again, again." << endl;
break;
}
}
CHECK(hipFree(aA));
CHECK(hipFree(aB));
CHECK(hipFree(aRes));
return 0;
}
| ec5dad281482d3eacbeae034decb6e49b64f4b91.cu | /*В данном задании требуется представить 2 варианта программы для видеокарты: 1) максимально простой и короткий; и 2) быстрый, использующий разделяемую память.
Запрограммируйте генерацию случайных входных данных для алгоритма и автоматическую проверку корректности работы программы.
Выполните теоретическую оценку производительности обоих вариантов алгоритма. Укажите в отчете, насколько теоретическая оценка отличается от практической. */
/*Реализуйте умножение длинной матрицы, хранящейся по столбцам, на длинный вектор*/
#include <iostream>
#define N 16 //shortest dimension of A: 32
#define M 2*(102400*8) // 1
//Теоретическая оценка производительности параллельного варианта: 4*819200*32/18000,000,000 (1,8e10 = DeviceToDevice*2 to GB)
using namespace std;
#define CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
__global__ void Multiply(int *A, int *B, int *C){
// calculate the row & col index of the element
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= N)
return;
int result = 0;
// product between row of a and b
for(int k = 0; k < M; ++k)
{
result += A[row + k*N] * B[k];
//printf("%d ", result);
}
C[row] = result;
}
__global__ void Multiply_smart_string(int *A, int *B, int *C){
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (col >= M)
return;
int dev_private = 0;
__shared__ int dev_shared;
for (int j = 0; j < M/blockDim.x; ++j)
{
int addition = A[(j*blockDim.x+threadIdx.x)*N+blockIdx.x] * B[j*blockDim.x+threadIdx.x];
dev_private += addition;
}
if (threadIdx.x == 0)
dev_shared = 0;
__syncthreads();
atomicAdd(&dev_shared, dev_private);
__syncthreads();
if (threadIdx.x == 0)
C[blockIdx.x] = dev_shared;
}
__global__ void Multiply_smart_column(int *A, int *B, int *C){
int global_id = blockIdx.x*blockDim.x + threadIdx.x;
int global_trd_cnt = blockDim.x*gridDim.x;
__shared__ int dev_shared_res[N];
int addition = 0;
if (threadIdx.x < N)
dev_shared_res[threadIdx.x] = 0;
for (int j = 0; j < M/(global_trd_cnt/N); ++j)
{
int super_global_id = global_id + j*global_trd_cnt;
int row = super_global_id % N;
int col = super_global_id / N;
addition += A[col*N + row] * B[col];
}
__syncthreads();
atomicAdd(&dev_shared_res[threadIdx.x % N], addition);
__syncthreads();
if (threadIdx.x < N)
atomicAdd(&C[threadIdx.x], dev_shared_res[threadIdx.x]);
}
int main(int argc, char **argv)
{
srand(time(NULL));
int *A = new int [N*M];
int *b = new int [M];
int *res_CPU = new int[N];
int *res_GPU = new int[N];
int i, j;
for(i = 0; i < N; ++i)
res_CPU[i] = 0;
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
{
A[i + j*N] = rand() % 10; // % 3 - 1; //1;
//cout << A[i*N + j] << " ";
}
//cout << endl;
}
//cout << endl;
for(i = 0; i < M; ++i)
{
b[i] = rand() % 10; // % 3 - 1; //1;
//cout << b[i] << " ";
}
//cout << endl;
// shared memory: t = 0..32 - warp
clock_t startCPU = clock();
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
res_CPU[i] += A[i + j*N]*b[j];
//cout << "Res_CPU[" << i << "] = " << res_CPU[i] << " " << endl;
}
double elapsedTimeCPU = (double)(clock()-startCPU)/CLOCKS_PER_SEC;
cout << "CPU product time = " << elapsedTimeCPU*1000 << " ms\n";
int (*aA), (*aB), (*aRes);
cudaEvent_t startCUDA, stopCUDA;
float elapsedTimeCUDA;
cudaEventCreate(&startCUDA);
cudaEventCreate(&stopCUDA);
CHECK(cudaMalloc((void**)&aA, (N*M)*sizeof(int)));
CHECK(cudaMalloc((void**)&aB, (M)*sizeof(int)));
CHECK(cudaMalloc((void**)&aRes, (N)*sizeof(int)));
CHECK(cudaMemcpy(aA, A, (N*M)*sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(aB, b, (M)*sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemset(aRes, 0, (N)*sizeof(int)));
//int numBlocks = 1;
//dim3 threadsPerBlock(N,N);
cudaEventRecord(startCUDA,0);
//Multiply<<<(N+511)/512, 512>>>(aA,aB,aRes);
//Multiply_smart_string<<<N, 512>>>(aA,aB,aRes);
Multiply_smart_column<<<8, 1024>>>(aA,aB,aRes); //N*M/1024
cudaEventRecord(stopCUDA,0);
cudaEventSynchronize(stopCUDA);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(res_GPU, aRes, N*sizeof(int), cudaMemcpyDeviceToHost));
cudaEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA product time = " << elapsedTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << 3*N*sizeof(float)/elapsedTimeCUDA/1024/1024/1.024 << " Gb/s\n";
for (i = 0; i < N; i++) {
//cout << "Res_GPU[" << i << "] = " << res_GPU[i] << " " << endl;
}
for (i = 0; i < N; i++) {
if (res_CPU[i] != res_GPU[i])
{
cout << "Not equal. Try again, again." << endl;
break;
}
}
CHECK(cudaFree(aA));
CHECK(cudaFree(aB));
CHECK(cudaFree(aRes));
return 0;
}
|
cc81367300da240ce8cd7b415344057d8ec2dcb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2011
@author Azzam Haidar
@author Tingxing Dong
@generated from zgetf2_kernels.cu normal z -> c, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "magmablas.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define PRECISION_c
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ magmaFloatComplex shared_data[];
extern __shared__ float sdata[];
extern __shared__ int int_sdata[];
/*
routines in this file are used by cgetf2_batched.cu
*/
//////////////////////////////////////////////////////////////////////////////////////////
__device__ int
icamax_devfunc(int length, const magmaFloatComplex *x, int incx, float *shared_x, int *shared_idx)
{
int tx = threadIdx.x;
magmaFloatComplex res;
float res1;
int nchunk = (length-1)/zamax + 1;
if( tx < zamax ){
shared_x[tx] = 0.0;
shared_idx[tx] = tx;//-1;// -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output
}
__syncthreads();
for(int s =0 ; s < nchunk; s++)
{
if( (tx + s * zamax < length) && (tx < zamax) )
{
res = x[(tx + s * zamax) * incx];
res1 = fabs(MAGMA_C_REAL(res)) + fabs(MAGMA_C_IMAG(res));
if( res1 > shared_x[tx] )
{
shared_x[tx] = res1;
shared_idx[tx] = tx + s * zamax;
}
}
__syncthreads();
}
if(length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
return shared_idx[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
icamax_kernel_batched(int length, int chunk, magmaFloatComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep)
{
magmaFloatComplex *x_start = x_array[blockIdx.z];
const magmaFloatComplex *x = &(x_start[step + step * lda]);
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
float *shared_x = sdata;
int *shared_idx = (int*)(shared_x + zamax);
icamax_devfunc(length, x, incx, shared_x, shared_idx);
if(tx == 0){
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if(shared_x[0] == MAGMA_D_ZERO){
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
tree_icamax_kernel_batched(int length, magmaFloatComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep,
float** data_pool_array, magma_int_t** id_pool_array)
{
magmaFloatComplex *x_start = x_array[blockIdx.z];
const magmaFloatComplex *x = &(x_start[step + step * lda]);
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
int local_max_id;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
x += zamax * blockIdx.x * incx;
icamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx);
if(tx ==0)
{
local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset
if(gridDim.x == 1)
{
ipiv[step] = local_max_id + step + 1; // Fortran Indexing
if(shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = local_max_id + step + gbstep + 1;
}
else
{
// put each thread block local max and its index in workspace
data_pool[blockIdx.x] = shared_x[0];
id_pool[blockIdx.x] = local_max_id;
}
}
}
__global__ void
tree_icamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, float** data_pool_array, magma_int_t** id_pool_array)
{
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
int tx = threadIdx.x;
//read data
if( tx < n)
{
shared_x[tx] = data_pool[tx];
shared_idx[tx] = id_pool[tx];
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = -2;
}
__syncthreads();
// compute local result inside each thread block
magma_getidmax<zamax>(tx, shared_x, shared_idx);
if(tx == 0 )
{
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if(shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
magma_int_t magma_icamax_lg_batched(magma_int_t length, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if(length == 1) return 0;
if(incx < 0) return 1;
float* data_pool;
magma_int_t* id_pool;
float** data_pool_array = NULL;
magma_int_t** id_pool_array = NULL;
magma_int_t num_blocks = (length-1)/(zamax) + 1;
// creat pools(data and index) to store the result of each thread blocks
magma_smalloc(&data_pool, num_blocks * batchCount);
magma_imalloc(&id_pool, num_blocks * batchCount);
magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array));
magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array));
#if defined(PRECISION_z) || defined(PRECISION_d)
dset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#else
sset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#endif
set_ipointer(id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue);
if( num_blocks > zamax)
{
printf("length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax \n", length, num_blocks, zamax);
}
else
{
// first level tree reduction
dim3 grid(num_blocks, 1, batchCount);
hipLaunchKernelGGL(( tree_icamax_kernel_batched), dim3(grid), dim3(zamax), 0, queue, length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
if( num_blocks > 1)
{
// second level tree reduction
dim3 grid2(1, 1, batchCount);
hipLaunchKernelGGL(( tree_icamax_kernel2_batched), dim3(grid2), dim3(zamax), 0, queue, num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
}
}
magma_free(data_pool);
magma_free(id_pool);
magma_free(data_pool_array);
magma_free(id_pool_array);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_icamax_batched(magma_int_t length,
magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if(length == 0 ) return 0;
#if 1
dim3 grid(1, 1, batchCount);
int chunk = (length-1)/zamax + 1;
hipLaunchKernelGGL(( icamax_kernel_batched), dim3(grid), dim3(zamax), zamax * (sizeof(float) + sizeof(int)), queue ,
length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
#else
// the magma_icamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the icamax_kernel for that today we are using only icamax_kernel
if( length <= 10 * zamax )
{
dim3 grid(1, 1, batchCount);
int chunk = (length-1)/zamax + 1;
hipLaunchKernelGGL(( icamax_kernel_batched), dim3(grid), dim3(zamax), zamax * (sizeof(float) + sizeof(magma_int_t)), queue ,
length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
}
else
{
magma_icamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount);
}
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void cswap_kernel_batched(magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array)
{
magmaFloatComplex *x = x_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
__shared__ int jp;
if(threadIdx.x == 0)
{
jp = ipiv[step] - 1;
//if(blockIdx.z == 1) printf("jp=%d", jp);
}
__syncthreads();
if(jp == step) return; // no pivot
int id = threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[jp + incx*id];
x[jp + incx*id] = x[step + incx*id];
x[step + incx*id] = tmp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_cswap_batched(magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step,
magma_int_t** ipiv_array, magma_int_t batchCount, magma_queue_t queue)
{
/*
cswap two row: (ipiv[step]-1)th and jth
*/
if( n > MAX_NTHREADS)
{
printf("magma_cswap_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS);
return -15;
}
dim3 grid(1,1, batchCount);
hipLaunchKernelGGL(( cswap_kernel_batched), dim3(grid), dim3(n), 0, queue , n, x_array, incx, step, ipiv_array);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void cscal_cgeru_kernel_batched(int m, int n, int step, magmaFloatComplex **dA_array, int lda, magma_int_t *info_array, int gbstep)
{
// checkinfo to avoid computation of the singular matrix
if(info_array[blockIdx.z] != 0 ) return;
magmaFloatComplex *A_start = dA_array[blockIdx.z];
magmaFloatComplex *A = &(A_start[step + step * lda]);
magmaFloatComplex *shared_y = shared_data;
int tx = threadIdx.x;
int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x;
if (tx < n) {
shared_y[tx] = A[lda * tx];
}
__syncthreads();
if(shared_y[0] == MAGMA_C_ZERO) {
info_array[blockIdx.z] = step + gbstep + 1;
return;
}
if (gbidx < m && gbidx > 0) {
magmaFloatComplex reg = MAGMA_C_ZERO;
reg = A[gbidx];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[gbidx] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
//A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg;//cuda give wrong results with this one
//A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one
A[gbidx + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_cscal_cgeru_batched(magma_int_t m, magma_int_t n, magma_int_t step,
magmaFloatComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if( n == 0) return 0;
if( n > MAX_NTHREADS)
{
printf("magma_cscal_cgeru_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS);
return -15;
}
int nchunk = (m-1)/MAX_NTHREADS + 1;
size_t shared_size = sizeof(magmaFloatComplex)*(n);
dim3 grid(nchunk, 1, batchCount);
hipLaunchKernelGGL(( cscal_cgeru_kernel_batched), dim3(grid), dim3(min(m, MAX_NTHREADS)), shared_size, queue, m, n, step, dA_array, lda, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void cgetf2trsm_kernel_batched(int ib, int n, magmaFloatComplex **dA_array, int step, int lda)
{
/*
this kernel does the safe nonblocked TRSM operation
B = A^-1 * B
*/
magmaFloatComplex *A_start = dA_array[blockIdx.z];
magmaFloatComplex *A = &(A_start[step + step * lda]);
magmaFloatComplex *B = &(A_start[step + (step+ib) * lda]);
magmaFloatComplex *shared_a = shared_data;
magmaFloatComplex *shared_b = shared_data+ib*ib;
int tid = threadIdx.x;
int i,d;
// Read A and B at the same time to the shared memory (shared_a shared_b)
// note that shared_b = shared_a+ib*ib so its contiguous
// I can make it in one loop reading
if ( tid < ib) {
#pragma unroll
for( i=0; i < n+ib; i++) {
shared_a[tid + i*ib] = A[tid + i*lda];
}
}
__syncthreads();
if (tid < n) {
#pragma unroll
for( d=0; d<ib-1; d++) {
for( i=d+1; i<ib; i++) {
shared_b[i+tid*ib] += (MAGMA_C_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib];
}
}
}
__syncthreads();
// write back B
if ( tid < ib) {
#pragma unroll
for( i=0; i < n; i++) {
B[tid + i*lda] = shared_b[tid + i*ib];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" void
magma_cgetf2trsm_batched(magma_int_t ib, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t step, magma_int_t lda,
magma_int_t batchCount, magma_queue_t queue)
{
/*
*/
if( n == 0 || ib == 0 ) return;
size_t shared_size = sizeof(magmaFloatComplex)*(ib*(ib+n));
// TODO TODO TODO
if( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra
{
printf("kernel_cgetf2trsm error out of shared memory \n");
return;
}
dim3 grid(1, 1, batchCount);
hipLaunchKernelGGL(( cgetf2trsm_kernel_batched), dim3(grid), dim3(max(n,ib)), shared_size, queue, ib, n, dA_array, step, lda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zupdate_device(int m, int step, magmaFloatComplex* x, int ldx, magmaFloatComplex *A, int lda)
{
int tid = threadIdx.x;
int nchunk = (m-1)/MAX_NTHREADS + 1;
int indx;
//magmaFloatComplex reg = MAGMA_C_ZERO;
// update the current column by all the previous one
#pragma unroll
for(int i=0; i < step; i++) {
for(int s=0 ; s < nchunk; s++)
{
indx = tid + s * MAX_NTHREADS;
if ( indx > i && indx < m ) {
A[indx] -= A[i] * x[indx + i*ldx];
//printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d \n", step, tid, x[tid + i*ldx], A[i], A[tid],i);
}
}
__syncthreads();
}
//printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
cscal5_device(int m, magmaFloatComplex* x, magmaFloatComplex alpha)
{
int tid = threadIdx.x;
int nchunk = (m-1)/MAX_NTHREADS + 1;
for(int s=0 ; s < nchunk; s++)
{
if( (tid + s * MAX_NTHREADS) < m ) {
#if 0
x[tid + s * MAX_NTHREADS] *= MAGMA_C_DIV(MAGMA_C_ONE, alpha);
#else
x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha;
#endif
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, magmaFloatComplex **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep)
{
int gboff = paneloffset+step;
magma_int_t *ipiv = ipiv_array[blockIdx.z];
magmaFloatComplex *A_start = dA_array[blockIdx.z];
magmaFloatComplex *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]);
magmaFloatComplex *A00 = &(A_start[paneloffset + paneloffset * lda]);
magmaFloatComplex *shared_A = shared_data;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
__shared__ magmaFloatComplex alpha;
int tid = threadIdx.x;
// checkinfo to avoid computation of the singular matrix
if(info_array[blockIdx.z] != 0 ) return;
int nchunk = (m-1)/MAX_NTHREADS + 1;
// read the current column from dev to shared memory
for(int s=0 ; s < nchunk; s++)
{
if( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS];
}
__syncthreads();
// update this column
if( step > 0 ){
zupdate_device( m, step, A00, lda, shared_A, 1);
__syncthreads();
}
// if( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE icamax_devfunc HAS __syncthreads INSIDE.
// So let all htreads call this routine it will handle correctly based on the size
// note that icamax need only 128 threads, s
icamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx);
if(tid == 0){
ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing
alpha = shared_A[shared_idx[0]+step];
//printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f \n",step,ipiv[gboff],gboff,shared_idx[0],alpha);
if(shared_x[0] == MAGMA_D_ZERO){
info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1;
}
}
__syncthreads();
if(shared_x[0] == MAGMA_D_ZERO) return;
__syncthreads();
// DO NO PUT THE IF CONDITION HERE SINCE icamax_devfunc HAS __syncthreads INSIDE.
cscal5_device( m-step, shared_A+step, alpha);
// put back the pivot that has been scaled with itself menaing =1
if(tid == 0) shared_A[shared_idx[0] + step] = alpha;
__syncthreads();
// write back from shared to dev memory
for(int s=0 ; s < nchunk; s++)
{
if( (tid + s * MAX_NTHREADS) < m )
{
A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS];
//printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]);
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_ccomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step,
magmaFloatComplex **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if( m == 0) return 0;
size_t all_shmem_size = zamax*(sizeof(float)+sizeof(int)) + (m+2)*sizeof(magmaFloatComplex);
if( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra
{
printf("magma_ccomputecolumn_batched error out of shared memory \n");
return -20;
}
size_t shared_size = sizeof(magmaFloatComplex)*m;
dim3 grid(1, 1, batchCount);
hipLaunchKernelGGL(( zcomputecolumn_kernel_shared_batched), dim3(grid), dim3(min(m, MAX_NTHREADS)), shared_size, queue, m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| cc81367300da240ce8cd7b415344057d8ec2dcb7.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2011
@author Azzam Haidar
@author Tingxing Dong
@generated from zgetf2_kernels.cu normal z -> c, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "magmablas.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define PRECISION_c
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ magmaFloatComplex shared_data[];
extern __shared__ float sdata[];
extern __shared__ int int_sdata[];
/*
routines in this file are used by cgetf2_batched.cu
*/
//////////////////////////////////////////////////////////////////////////////////////////
__device__ int
icamax_devfunc(int length, const magmaFloatComplex *x, int incx, float *shared_x, int *shared_idx)
{
int tx = threadIdx.x;
magmaFloatComplex res;
float res1;
int nchunk = (length-1)/zamax + 1;
if( tx < zamax ){
shared_x[tx] = 0.0;
shared_idx[tx] = tx;//-1;// -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output
}
__syncthreads();
for(int s =0 ; s < nchunk; s++)
{
if( (tx + s * zamax < length) && (tx < zamax) )
{
res = x[(tx + s * zamax) * incx];
res1 = fabs(MAGMA_C_REAL(res)) + fabs(MAGMA_C_IMAG(res));
if( res1 > shared_x[tx] )
{
shared_x[tx] = res1;
shared_idx[tx] = tx + s * zamax;
}
}
__syncthreads();
}
if(length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
return shared_idx[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
icamax_kernel_batched(int length, int chunk, magmaFloatComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep)
{
magmaFloatComplex *x_start = x_array[blockIdx.z];
const magmaFloatComplex *x = &(x_start[step + step * lda]);
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
float *shared_x = sdata;
int *shared_idx = (int*)(shared_x + zamax);
icamax_devfunc(length, x, incx, shared_x, shared_idx);
if(tx == 0){
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if(shared_x[0] == MAGMA_D_ZERO){
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
tree_icamax_kernel_batched(int length, magmaFloatComplex **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep,
float** data_pool_array, magma_int_t** id_pool_array)
{
magmaFloatComplex *x_start = x_array[blockIdx.z];
const magmaFloatComplex *x = &(x_start[step + step * lda]);
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
int local_max_id;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
x += zamax * blockIdx.x * incx;
icamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx);
if(tx ==0)
{
local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset
if(gridDim.x == 1)
{
ipiv[step] = local_max_id + step + 1; // Fortran Indexing
if(shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = local_max_id + step + gbstep + 1;
}
else
{
// put each thread block local max and its index in workspace
data_pool[blockIdx.x] = shared_x[0];
id_pool[blockIdx.x] = local_max_id;
}
}
}
__global__ void
tree_icamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, float** data_pool_array, magma_int_t** id_pool_array)
{
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
int tx = threadIdx.x;
//read data
if( tx < n)
{
shared_x[tx] = data_pool[tx];
shared_idx[tx] = id_pool[tx];
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = -2;
}
__syncthreads();
// compute local result inside each thread block
magma_getidmax<zamax>(tx, shared_x, shared_idx);
if(tx == 0 )
{
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if(shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
magma_int_t magma_icamax_lg_batched(magma_int_t length, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if(length == 1) return 0;
if(incx < 0) return 1;
float* data_pool;
magma_int_t* id_pool;
float** data_pool_array = NULL;
magma_int_t** id_pool_array = NULL;
magma_int_t num_blocks = (length-1)/(zamax) + 1;
// creat pools(data and index) to store the result of each thread blocks
magma_smalloc(&data_pool, num_blocks * batchCount);
magma_imalloc(&id_pool, num_blocks * batchCount);
magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array));
magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array));
#if defined(PRECISION_z) || defined(PRECISION_d)
dset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#else
sset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue);
#endif
set_ipointer(id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue);
if( num_blocks > zamax)
{
printf("length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax \n", length, num_blocks, zamax);
}
else
{
// first level tree reduction
dim3 grid(num_blocks, 1, batchCount);
tree_icamax_kernel_batched<<<grid, zamax, 0, queue>>>(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
if( num_blocks > 1)
{
// second level tree reduction
dim3 grid2(1, 1, batchCount);
tree_icamax_kernel2_batched<<<grid2, zamax, 0, queue>>>(num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
}
}
magma_free(data_pool);
magma_free(id_pool);
magma_free(data_pool_array);
magma_free(id_pool_array);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_icamax_batched(magma_int_t length,
magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if(length == 0 ) return 0;
#if 1
dim3 grid(1, 1, batchCount);
int chunk = (length-1)/zamax + 1;
icamax_kernel_batched<<< grid, zamax, zamax * (sizeof(float) + sizeof(int)), queue >>>
(length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
#else
// the magma_icamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the icamax_kernel for that today we are using only icamax_kernel
if( length <= 10 * zamax )
{
dim3 grid(1, 1, batchCount);
int chunk = (length-1)/zamax + 1;
icamax_kernel_batched<<< grid, zamax, zamax * (sizeof(float) + sizeof(magma_int_t)), queue >>>
(length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
}
else
{
magma_icamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount);
}
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void cswap_kernel_batched(magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array)
{
magmaFloatComplex *x = x_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
__shared__ int jp;
if(threadIdx.x == 0)
{
jp = ipiv[step] - 1;
//if(blockIdx.z == 1) printf("jp=%d", jp);
}
__syncthreads();
if(jp == step) return; // no pivot
int id = threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[jp + incx*id];
x[jp + incx*id] = x[step + incx*id];
x[step + incx*id] = tmp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_cswap_batched(magma_int_t n, magmaFloatComplex **x_array, magma_int_t incx, magma_int_t step,
magma_int_t** ipiv_array, magma_int_t batchCount, magma_queue_t queue)
{
/*
cswap two row: (ipiv[step]-1)th and jth
*/
if( n > MAX_NTHREADS)
{
printf("magma_cswap_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS);
return -15;
}
dim3 grid(1,1, batchCount);
cswap_kernel_batched<<< grid, n, 0, queue >>>(n, x_array, incx, step, ipiv_array);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void cscal_cgeru_kernel_batched(int m, int n, int step, magmaFloatComplex **dA_array, int lda, magma_int_t *info_array, int gbstep)
{
// checkinfo to avoid computation of the singular matrix
if(info_array[blockIdx.z] != 0 ) return;
magmaFloatComplex *A_start = dA_array[blockIdx.z];
magmaFloatComplex *A = &(A_start[step + step * lda]);
magmaFloatComplex *shared_y = shared_data;
int tx = threadIdx.x;
int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x;
if (tx < n) {
shared_y[tx] = A[lda * tx];
}
__syncthreads();
if(shared_y[0] == MAGMA_C_ZERO) {
info_array[blockIdx.z] = step + gbstep + 1;
return;
}
if (gbidx < m && gbidx > 0) {
magmaFloatComplex reg = MAGMA_C_ZERO;
reg = A[gbidx];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[gbidx] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
//A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg;//cuda give wrong results with this one
//A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one
A[gbidx + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_cscal_cgeru_batched(magma_int_t m, magma_int_t n, magma_int_t step,
magmaFloatComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if( n == 0) return 0;
if( n > MAX_NTHREADS)
{
printf("magma_cscal_cgeru_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS);
return -15;
}
int nchunk = (m-1)/MAX_NTHREADS + 1;
size_t shared_size = sizeof(magmaFloatComplex)*(n);
dim3 grid(nchunk, 1, batchCount);
cscal_cgeru_kernel_batched<<< grid, min(m, MAX_NTHREADS), shared_size, queue>>>(m, n, step, dA_array, lda, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void cgetf2trsm_kernel_batched(int ib, int n, magmaFloatComplex **dA_array, int step, int lda)
{
/*
this kernel does the safe nonblocked TRSM operation
B = A^-1 * B
*/
magmaFloatComplex *A_start = dA_array[blockIdx.z];
magmaFloatComplex *A = &(A_start[step + step * lda]);
magmaFloatComplex *B = &(A_start[step + (step+ib) * lda]);
magmaFloatComplex *shared_a = shared_data;
magmaFloatComplex *shared_b = shared_data+ib*ib;
int tid = threadIdx.x;
int i,d;
// Read A and B at the same time to the shared memory (shared_a shared_b)
// note that shared_b = shared_a+ib*ib so its contiguous
// I can make it in one loop reading
if ( tid < ib) {
#pragma unroll
for( i=0; i < n+ib; i++) {
shared_a[tid + i*ib] = A[tid + i*lda];
}
}
__syncthreads();
if (tid < n) {
#pragma unroll
for( d=0; d<ib-1; d++) {
for( i=d+1; i<ib; i++) {
shared_b[i+tid*ib] += (MAGMA_C_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib];
}
}
}
__syncthreads();
// write back B
if ( tid < ib) {
#pragma unroll
for( i=0; i < n; i++) {
B[tid + i*lda] = shared_b[tid + i*ib];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" void
magma_cgetf2trsm_batched(magma_int_t ib, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t step, magma_int_t lda,
magma_int_t batchCount, magma_queue_t queue)
{
/*
*/
if( n == 0 || ib == 0 ) return;
size_t shared_size = sizeof(magmaFloatComplex)*(ib*(ib+n));
// TODO TODO TODO
if( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra
{
printf("kernel_cgetf2trsm error out of shared memory \n");
return;
}
dim3 grid(1, 1, batchCount);
cgetf2trsm_kernel_batched<<< grid, max(n,ib), shared_size, queue>>>(ib, n, dA_array, step, lda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zupdate_device(int m, int step, magmaFloatComplex* x, int ldx, magmaFloatComplex *A, int lda)
{
int tid = threadIdx.x;
int nchunk = (m-1)/MAX_NTHREADS + 1;
int indx;
//magmaFloatComplex reg = MAGMA_C_ZERO;
// update the current column by all the previous one
#pragma unroll
for(int i=0; i < step; i++) {
for(int s=0 ; s < nchunk; s++)
{
indx = tid + s * MAX_NTHREADS;
if ( indx > i && indx < m ) {
A[indx] -= A[i] * x[indx + i*ldx];
//printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d \n", step, tid, x[tid + i*ldx], A[i], A[tid],i);
}
}
__syncthreads();
}
//printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
cscal5_device(int m, magmaFloatComplex* x, magmaFloatComplex alpha)
{
int tid = threadIdx.x;
int nchunk = (m-1)/MAX_NTHREADS + 1;
for(int s=0 ; s < nchunk; s++)
{
if( (tid + s * MAX_NTHREADS) < m ) {
#if 0
x[tid + s * MAX_NTHREADS] *= MAGMA_C_DIV(MAGMA_C_ONE, alpha);
#else
x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha;
#endif
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, magmaFloatComplex **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep)
{
int gboff = paneloffset+step;
magma_int_t *ipiv = ipiv_array[blockIdx.z];
magmaFloatComplex *A_start = dA_array[blockIdx.z];
magmaFloatComplex *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]);
magmaFloatComplex *A00 = &(A_start[paneloffset + paneloffset * lda]);
magmaFloatComplex *shared_A = shared_data;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
__shared__ magmaFloatComplex alpha;
int tid = threadIdx.x;
// checkinfo to avoid computation of the singular matrix
if(info_array[blockIdx.z] != 0 ) return;
int nchunk = (m-1)/MAX_NTHREADS + 1;
// read the current column from dev to shared memory
for(int s=0 ; s < nchunk; s++)
{
if( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS];
}
__syncthreads();
// update this column
if( step > 0 ){
zupdate_device( m, step, A00, lda, shared_A, 1);
__syncthreads();
}
// if( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE icamax_devfunc HAS __syncthreads INSIDE.
// So let all htreads call this routine it will handle correctly based on the size
// note that icamax need only 128 threads, s
icamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx);
if(tid == 0){
ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing
alpha = shared_A[shared_idx[0]+step];
//printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f \n",step,ipiv[gboff],gboff,shared_idx[0],alpha);
if(shared_x[0] == MAGMA_D_ZERO){
info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1;
}
}
__syncthreads();
if(shared_x[0] == MAGMA_D_ZERO) return;
__syncthreads();
// DO NO PUT THE IF CONDITION HERE SINCE icamax_devfunc HAS __syncthreads INSIDE.
cscal5_device( m-step, shared_A+step, alpha);
// put back the pivot that has been scaled with itself menaing =1
if(tid == 0) shared_A[shared_idx[0] + step] = alpha;
__syncthreads();
// write back from shared to dev memory
for(int s=0 ; s < nchunk; s++)
{
if( (tid + s * MAX_NTHREADS) < m )
{
A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS];
//printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]);
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_ccomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step,
magmaFloatComplex **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if( m == 0) return 0;
size_t all_shmem_size = zamax*(sizeof(float)+sizeof(int)) + (m+2)*sizeof(magmaFloatComplex);
if( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra
{
printf("magma_ccomputecolumn_batched error out of shared memory \n");
return -20;
}
size_t shared_size = sizeof(magmaFloatComplex)*m;
dim3 grid(1, 1, batchCount);
zcomputecolumn_kernel_shared_batched<<< grid, min(m, MAX_NTHREADS), shared_size, queue>>>(m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
|
e1ca1068378165695e51d4a4152cc1d44aee61bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "random/RngEngine.hh"
#include "random/distributions/UniformRealDistribution.hh"
#include "base/KernelParamCalculator.device.hh"
using namespace celeritas;
__device__ __forceinline__ double sample(RngEngine& r, double lower, double
upper)
{
return UniformRealDistribution<double>(lower, upper)(r);
}
__global__ void sample(RngEngine::StateRef const states, double* result)
{
auto thread_id = KernelParamCalculator::thread_id();
RngEngine rng(states, thread_id);
result[3 * thread_id.get()] = sample(rng, 0, 1);
result[3 * thread_id.get() + 1] = sample(rng, 0, 5);
result[3 * thread_id.get() + 2] = sample(rng, 2.5, 7.75);
}
| e1ca1068378165695e51d4a4152cc1d44aee61bf.cu | #include "random/RngEngine.hh"
#include "random/distributions/UniformRealDistribution.hh"
#include "base/KernelParamCalculator.device.hh"
using namespace celeritas;
__device__ __forceinline__ double sample(RngEngine& r, double lower, double
upper)
{
return UniformRealDistribution<double>(lower, upper)(r);
}
__global__ void sample(RngEngine::StateRef const states, double* result)
{
auto thread_id = KernelParamCalculator::thread_id();
RngEngine rng(states, thread_id);
result[3 * thread_id.get()] = sample(rng, 0, 1);
result[3 * thread_id.get() + 1] = sample(rng, 0, 5);
result[3 * thread_id.get() + 2] = sample(rng, 2.5, 7.75);
}
|
45001b8878b93533840b112d4c2b9497610ef5a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "cuda_dt_improved.h"
void THNN_CudaDomainTransform_updateOutput(THCState *state, THCudaTensor *edge,
THCudaTensor *output, THCudaTensor *weight, THCudaTensor *inter,
int num_iter, float sigma_range, int sigma_spatial) {
long batchSize = output->size[0];
long plane = output->size[1];
long height = output->size[2];
long width = output->size[3];
// Resize output
THCudaTensor_resize4d(state, weight, batchSize, num_iter, height, width);
THCudaTensor_resize4d(state, inter, batchSize, num_iter * plane * 4, height,
width);
THCudaTensor_fill(state, weight, -1);
THCudaTensor_fill(state, inter, 0);
THCudaTensor *edge_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
THCudaTensor *inter_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix mulitply per output:
THCudaTensor_select(state, edge_n, edge, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
THCudaTensor_select(state, inter_n, inter, 0, elt);
domainTransform_impro(THCState_getCurrentStream(state),
THCudaTensor_data(state, edge_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, output_n),
THCudaTensor_data(state, inter_n), plane, height, width,
num_iter, sigma_range, sigma_spatial);
}
// Free
THCudaTensor_free(state, edge_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, output_n);
THCudaTensor_free(state, inter_n);
}
void THNN_CudaDomainTransform_updateGradInput(THCState *state,
THCudaTensor *edge, THCudaTensor *gradData, THCudaTensor *gradEdge,
THCudaTensor *weight, THCudaTensor *inter, THCudaTensor *gradWeight,
int num_iter, float sigma_range, int sigma_spatial) {
long batchSize = gradData->size[0];
long plane = gradData->size[1];
long height = gradData->size[2];
long width = gradData->size[3];
THCudaTensor *edge_n = THCudaTensor_new(state);
THCudaTensor *gradEdge_n = THCudaTensor_new(state);
THCudaTensor *gradData_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *inter_n = THCudaTensor_new(state);
THCudaTensor *gradWeight_n = THCudaTensor_new(state);
for (int elt = 0; elt < batchSize; elt++) {
THCudaTensor_select(state, edge_n, edge, 0, elt);
THCudaTensor_select(state, gradEdge_n, gradEdge, 0, elt);
THCudaTensor_select(state, gradData_n, gradData, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, inter_n, inter, 0, elt);
THCudaTensor_select(state, gradWeight_n, gradWeight, 0, elt);
domainTransform_grad_impro(THCState_getCurrentStream(state),
THCudaTensor_data(state, edge_n),
THCudaTensor_data(state, gradEdge_n),
THCudaTensor_data(state, gradData_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, inter_n),
THCudaTensor_data(state, gradWeight_n), plane, height, width,
num_iter, sigma_range, sigma_spatial);
}
THCudaTensor_free(state, edge_n);
THCudaTensor_free(state, gradEdge_n);
THCudaTensor_free(state, gradData_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, inter_n);
THCudaTensor_free(state, gradWeight_n);
}
| 45001b8878b93533840b112d4c2b9497610ef5a7.cu | #include "THCUNN.h"
#include "common.h"
#include "cuda_dt_improved.h"
void THNN_CudaDomainTransform_updateOutput(THCState *state, THCudaTensor *edge,
THCudaTensor *output, THCudaTensor *weight, THCudaTensor *inter,
int num_iter, float sigma_range, int sigma_spatial) {
long batchSize = output->size[0];
long plane = output->size[1];
long height = output->size[2];
long width = output->size[3];
// Resize output
THCudaTensor_resize4d(state, weight, batchSize, num_iter, height, width);
THCudaTensor_resize4d(state, inter, batchSize, num_iter * plane * 4, height,
width);
THCudaTensor_fill(state, weight, -1);
THCudaTensor_fill(state, inter, 0);
THCudaTensor *edge_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
THCudaTensor *inter_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix mulitply per output:
THCudaTensor_select(state, edge_n, edge, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
THCudaTensor_select(state, inter_n, inter, 0, elt);
domainTransform_impro(THCState_getCurrentStream(state),
THCudaTensor_data(state, edge_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, output_n),
THCudaTensor_data(state, inter_n), plane, height, width,
num_iter, sigma_range, sigma_spatial);
}
// Free
THCudaTensor_free(state, edge_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, output_n);
THCudaTensor_free(state, inter_n);
}
void THNN_CudaDomainTransform_updateGradInput(THCState *state,
THCudaTensor *edge, THCudaTensor *gradData, THCudaTensor *gradEdge,
THCudaTensor *weight, THCudaTensor *inter, THCudaTensor *gradWeight,
int num_iter, float sigma_range, int sigma_spatial) {
long batchSize = gradData->size[0];
long plane = gradData->size[1];
long height = gradData->size[2];
long width = gradData->size[3];
THCudaTensor *edge_n = THCudaTensor_new(state);
THCudaTensor *gradEdge_n = THCudaTensor_new(state);
THCudaTensor *gradData_n = THCudaTensor_new(state);
THCudaTensor *weight_n = THCudaTensor_new(state);
THCudaTensor *inter_n = THCudaTensor_new(state);
THCudaTensor *gradWeight_n = THCudaTensor_new(state);
for (int elt = 0; elt < batchSize; elt++) {
THCudaTensor_select(state, edge_n, edge, 0, elt);
THCudaTensor_select(state, gradEdge_n, gradEdge, 0, elt);
THCudaTensor_select(state, gradData_n, gradData, 0, elt);
THCudaTensor_select(state, weight_n, weight, 0, elt);
THCudaTensor_select(state, inter_n, inter, 0, elt);
THCudaTensor_select(state, gradWeight_n, gradWeight, 0, elt);
domainTransform_grad_impro(THCState_getCurrentStream(state),
THCudaTensor_data(state, edge_n),
THCudaTensor_data(state, gradEdge_n),
THCudaTensor_data(state, gradData_n),
THCudaTensor_data(state, weight_n),
THCudaTensor_data(state, inter_n),
THCudaTensor_data(state, gradWeight_n), plane, height, width,
num_iter, sigma_range, sigma_spatial);
}
THCudaTensor_free(state, edge_n);
THCudaTensor_free(state, gradEdge_n);
THCudaTensor_free(state, gradData_n);
THCudaTensor_free(state, weight_n);
THCudaTensor_free(state, inter_n);
THCudaTensor_free(state, gradWeight_n);
}
|
cb3f4066147289c8f91e83daf1a73078278fbda0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <oman/visualizer/plot.h> /* Must include before other - looks like a clash of namespaces between OMAN::Utility and Magick::_ImageInfo? */
#include <oman/general/omandirs.h> /* non c++11 */
#include <oman/general/iterationfunction.h> /* non c++11 */
#include <oman/general/templatedefines.h> /* non c++11 */
#include <sinr/network.h> /* Must include before other - looks like a clash of namespaces between OMAN::Utility and Thrust::Utility? */
#include <sinr/coordinates.cuh>
#include <sinr/visualizer.cuh>
#include <sinr/networkmetrics.cuh>
#include <sinr/util.h>
#include <sinr/types.h>
typedef thrust::tuple<double,double> Point2d_dbl;
typedef thrust::tuple<float,float> Point2d_flt;
/** This program computes averaged metrics versus the size of the network, parameterized over the number of
* data points used to sample each metric over a 2d arena. Each resulting data point is averaged over sampleCount
* independent runs.
*
* A main comparison is made between using floats or doubles as the underlying data type in the averaged metric
* calculations, as GPUs seem to be faster at evaluating single-precision floating point operations.
*/
int main(int argc __attribute__((unused)), char **argv __attribute__((unused))) {
/** @note if you only have one GPU, free memory depends on the number of windows you have open */
/** @todo generalize this code so that other programs can use it */
hipSetDevice(0);
hipDeviceReset();
size_t free, total;
hipMemGetInfo(&free, &total);
std::cout<<"free: "<<free<<"\t total: "<<total<<std::endl;
/* User-controlled setup for experiment */
Util::deleteDirContents(OmanDirs::temp());
Util::deleteDirContents(OmanDirs::images());
Util::deleteDirContents(OmanDirs::videos());
Util::deleteDirContents(OmanDirs::logs());
Util::seedRandomGenerator(0);
vector<double> nodeIter = IterationFunction(10, 50, 5).getVector(); /* number of nodes to iterate over */
vector<double> sidePixelIter = IterationFunction(500, 500, 1).getVector(); /* image sizes to iterate over */
unsigned int sampleCount = 100; /* samples to average over for each param and indVar value*/
double arenaSideLength = 1000.0; /* (m) */
double widthdB = 5.0; /* visual display width of SINR cells (dB) */
Arena2d<double> arena_dbl(arenaSideLength);
Arena2d<float> arena_flt(arenaSideLength);
bool paramStatus = true;
bool indVarStatus = true;
bool sampleStatus = true;
bool saveImage = true;
bool witherrorbars = true;
/* Map iterators to parameter and independent variables
* Note: remember this mapping to make the right variable assignments inside the nested for-loops */
vector<double> paramIter(sidePixelIter);
string paramName = "pixels";
string paramLegend = "p=";
vector<double> indVarIter(nodeIter);
string indVarName = "nodes";
string xlabel = "Number of Nodes";
string imagePrename = "vs" + indVarName + "-samples" + Util::to_string(sampleCount) + "-";
/* Automatic setup for experiment */
vd3 cov_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
vd3 cov_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
vd3 cov_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
vd3 cov_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
uint64_t start, stop;
/// now you don't have to flush the buffer: http://stackoverflow.com/a/1716621/627517
setbuf(stdout, NULL);
for(unsigned int i_param=0; i_param<paramIter.size(); i_param++) {
/* NOTE: make sure we are assigning elements of paramIter to the right environment variable */
unsigned int sidePixelCount = paramIter.at(i_param);
if(paramStatus) {std::cout<<paramName<<i_param<<" "<<std::endl;}
for(unsigned int i_indVar=0; i_indVar<indVarIter.size(); i_indVar++) {
/* NOTE: make sure we are assigning elements of indVarIter to the right environment variable */
unsigned int N = indVarIter.at(i_indVar);
if(indVarStatus) {std::cout<<" "<<indVarName<<i_indVar<<" ";}
Network<float> net_flt_grid(N);
Network<float> net_flt_rand = net_flt_grid;
Network<double> net_dbl_grid(N);
Network<double> net_dbl_rand = net_dbl_grid;
for(unsigned int sample=0; sample<sampleCount; sample++) {
if(sampleStatus) {std::cout<<"s"<<sample<<" ";}
/* Set up network for CUDA */
for (unsigned int n = 0; n < N; n++) {
Point2d_dbl p(Util::uniform_double(0,arenaSideLength), Util::uniform_double(0,arenaSideLength));
net_dbl_grid.setPosition(n,p);
net_dbl_rand.setPosition(n,p);
net_flt_grid.setPosition(n,p);
net_flt_rand.setPosition(n,p);
}
/* float-based grid */
start = Util::getTimeNS();
thrust::device_vector<Point2d_flt> coords_flt_grid(sidePixelCount*sidePixelCount);
sinr::coordinates::generateGrid(coords_flt_grid,
arena_flt,
sidePixelCount,
sidePixelCount);
NetworkMetricsDev<float> nm_flt_grid(&net_flt_grid, &arena_flt, &coords_flt_grid);
cov_flt_grid.at(i_param).at(i_indVar).at(sample) = nm_flt_grid.computeAvgMaxCoverage();
snr_flt_grid.at(i_param).at(i_indVar).at(sample) = nm_flt_grid.computeAvgMaxSINR();
cap_flt_grid.at(i_param).at(i_indVar).at(sample) = nm_flt_grid.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_flt_grid.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
/* double-based grid */
start = Util::getTimeNS();
thrust::device_vector<Point2d_dbl> coords_dbl_grid(sidePixelCount*sidePixelCount);
sinr::coordinates::generateGrid(coords_dbl_grid,
arena_dbl,
sidePixelCount,
sidePixelCount);
NetworkMetricsDev<double> nm_dbl_grid(&net_dbl_grid, &arena_dbl, &coords_dbl_grid);
cov_dbl_grid.at(i_param).at(i_indVar).at(sample) = nm_dbl_grid.computeAvgMaxCoverage();
snr_dbl_grid.at(i_param).at(i_indVar).at(sample) = nm_dbl_grid.computeAvgMaxSINR();
cap_dbl_grid.at(i_param).at(i_indVar).at(sample) = nm_dbl_grid.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_dbl_grid.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
/* float-based random */
start = Util::getTimeNS();
thrust::device_vector<Point2d_flt> coords_flt_rand(sidePixelCount*sidePixelCount);
sinr::coordinates::generateRandom(coords_flt_rand,
arena_flt,
sidePixelCount*sidePixelCount,
sample);
NetworkMetricsDev<float> nm_flt_rand(&net_flt_rand, &arena_flt, &coords_flt_rand);
cov_flt_rand.at(i_param).at(i_indVar).at(sample) = nm_flt_rand.computeAvgMaxCoverage();
snr_flt_rand.at(i_param).at(i_indVar).at(sample) = nm_flt_rand.computeAvgMaxSINR();
cap_flt_rand.at(i_param).at(i_indVar).at(sample) = nm_flt_rand.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_flt_rand.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
/* double-based random */
start = Util::getTimeNS();
thrust::device_vector<Point2d_dbl> coords_dbl_rand(sidePixelCount*sidePixelCount);
sinr::coordinates::generateRandom(coords_dbl_rand,
arena_dbl,
sidePixelCount*sidePixelCount,
sample);
NetworkMetricsDev<double> nm_dbl_rand(&net_dbl_rand, &arena_dbl, &coords_dbl_rand);
cov_dbl_rand.at(i_param).at(i_indVar).at(sample) = nm_dbl_rand.computeAvgMaxCoverage();
snr_dbl_rand.at(i_param).at(i_indVar).at(sample) = nm_dbl_rand.computeAvgMaxSINR();
cap_dbl_rand.at(i_param).at(i_indVar).at(sample) = nm_dbl_rand.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_dbl_rand.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
if (saveImage && (sample == 0)) {
string imagePostname = paramName + Util::to_string((int)paramIter.at(i_param)) + "-" + indVarName + Util::to_string((int)indVarIter.at(i_indVar)) + "-" + "s" + Util::to_string((int)sample);
thrust::device_vector<uchar4> rgba_flt(sidePixelCount*sidePixelCount);
const thrust::device_vector<float> *maxsinr_flt = nm_flt_grid.computeMapMaxSINR();
sinr::visualize::grayscaledB(*maxsinr_flt, rgba_flt, net_flt_grid.getSINRThresholddB()-widthdB, net_flt_grid.getSINRThresholddB());
string imagePath_flt = OmanDirs::images() + "/" + imagePrename + imagePostname + "-flt.bmp";
sinr::visualize::outputBMP(rgba_flt, sidePixelCount, sidePixelCount, imagePath_flt);
thrust::device_vector<uchar4> rgba_dbl(sidePixelCount*sidePixelCount);
const thrust::device_vector<double> *maxsinr_dbl = nm_dbl_grid.computeMapMaxSINR();
sinr::visualize::grayscaledB(*maxsinr_dbl, rgba_dbl, net_flt_grid.getSINRThresholddB()-widthdB, net_flt_grid.getSINRThresholddB());
string imagePath_dbl = OmanDirs::images() + "/" + imagePrename + imagePostname + "-dbl.bmp";
sinr::visualize::outputBMP(rgba_dbl, sidePixelCount, sidePixelCount, imagePath_dbl);
}
}
if (indVarStatus) {std::cout<<std::endl;}
}
}
if(paramStatus || indVarStatus || sampleStatus) {
std::cout << std::endl;
}
/// Here we use the computed coverage, sinrmax, and capacity to determine the "error" in the lower resolution images.
/* float-based grid samples */
vd2 cov_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* double-based grid samples */
vd2 cov_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* float-based random samples */
vd2 cov_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* double-based random samples */
vd2 cov_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* Error Measurements - Grid */
vd2 cov_grid_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_grid_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_grid_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_grid_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_grid_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_grid_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* Error Measurements - Random */
vd2 cov_rand_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_rand_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_rand_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_rand_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_rand_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_rand_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
for(unsigned int i=0; i<paramIter.size(); i++) {
for(unsigned int j=0; j<indVarIter.size(); j++) {
/* float-based grid samples */
cov_flt_grid_mu.at(i).at(j) = Util::mean(cov_flt_grid.at(i).at(j));
snr_flt_grid_mu.at(i).at(j) = Util::mean(snr_flt_grid.at(i).at(j));
cap_flt_grid_mu.at(i).at(j) = Util::mean(cap_flt_grid.at(i).at(j));
tme_flt_grid_mu.at(i).at(j) = Util::mean(tme_flt_grid.at(i).at(j));
cov_flt_grid_std.at(i).at(j) = Util::stddev(cov_flt_grid.at(i).at(j));
snr_flt_grid_std.at(i).at(j) = Util::stddev(snr_flt_grid.at(i).at(j));
cap_flt_grid_std.at(i).at(j) = Util::stddev(cap_flt_grid.at(i).at(j));
tme_flt_grid_std.at(i).at(j) = Util::stddev(tme_flt_grid.at(i).at(j));
/* double-based grid samples */
cov_dbl_grid_mu.at(i).at(j) = Util::mean(cov_dbl_grid.at(i).at(j));
snr_dbl_grid_mu.at(i).at(j) = Util::mean(snr_dbl_grid.at(i).at(j));
cap_dbl_grid_mu.at(i).at(j) = Util::mean(cap_dbl_grid.at(i).at(j));
tme_dbl_grid_mu.at(i).at(j) = Util::mean(tme_dbl_grid.at(i).at(j));
cov_dbl_grid_std.at(i).at(j) = Util::stddev(cov_dbl_grid.at(i).at(j));
snr_dbl_grid_std.at(i).at(j) = Util::stddev(snr_dbl_grid.at(i).at(j));
cap_dbl_grid_std.at(i).at(j) = Util::stddev(cap_dbl_grid.at(i).at(j));
tme_dbl_grid_std.at(i).at(j) = Util::stddev(tme_dbl_grid.at(i).at(j));
/* grid-based absolute error */
cov_grid_error_mu.at(i).at(j) = Util::mean(Util::absErr(cov_flt_grid.at(i).at(j), cov_dbl_grid.at(i).at(j)));
snr_grid_error_mu.at(i).at(j) = Util::mean(Util::absErr(snr_flt_grid.at(i).at(j), snr_dbl_grid.at(i).at(j)));
cap_grid_error_mu.at(i).at(j) = Util::mean(Util::absErr(cap_flt_grid.at(i).at(j), cap_dbl_grid.at(i).at(j)));
cov_grid_error_std.at(i).at(j) = Util::stddev(Util::absErr(cov_flt_grid.at(i).at(j), cov_dbl_grid.at(i).at(j)));
snr_grid_error_std.at(i).at(j) = Util::stddev(Util::absErr(snr_flt_grid.at(i).at(j), snr_dbl_grid.at(i).at(j)));
cap_grid_error_std.at(i).at(j) = Util::stddev(Util::absErr(cap_flt_grid.at(i).at(j), cap_dbl_grid.at(i).at(j)));
/* float-based random samples */
cov_flt_rand_mu.at(i).at(j) = Util::mean(cov_flt_rand.at(i).at(j));
snr_flt_rand_mu.at(i).at(j) = Util::mean(snr_flt_rand.at(i).at(j));
cap_flt_rand_mu.at(i).at(j) = Util::mean(cap_flt_rand.at(i).at(j));
tme_flt_rand_mu.at(i).at(j) = Util::mean(tme_flt_rand.at(i).at(j));
cov_flt_rand_std.at(i).at(j) = Util::stddev(cov_flt_rand.at(i).at(j));
snr_flt_rand_std.at(i).at(j) = Util::stddev(snr_flt_rand.at(i).at(j));
cap_flt_rand_std.at(i).at(j) = Util::stddev(cap_flt_rand.at(i).at(j));
tme_flt_rand_std.at(i).at(j) = Util::stddev(tme_flt_rand.at(i).at(j));
/* double-based random samples */
cov_dbl_rand_mu.at(i).at(j) = Util::mean(cov_dbl_rand.at(i).at(j));
snr_dbl_rand_mu.at(i).at(j) = Util::mean(snr_dbl_rand.at(i).at(j));
cap_dbl_rand_mu.at(i).at(j) = Util::mean(cap_dbl_rand.at(i).at(j));
tme_dbl_rand_mu.at(i).at(j) = Util::mean(tme_dbl_rand.at(i).at(j));
cov_dbl_rand_std.at(i).at(j) = Util::stddev(cov_dbl_rand.at(i).at(j));
snr_dbl_rand_std.at(i).at(j) = Util::stddev(snr_dbl_rand.at(i).at(j));
cap_dbl_rand_std.at(i).at(j) = Util::stddev(cap_dbl_rand.at(i).at(j));
tme_dbl_rand_std.at(i).at(j) = Util::stddev(tme_dbl_rand.at(i).at(j));
/* random-based absolute error */
cov_rand_error_mu.at(i).at(j) = Util::mean(Util::absErr(cov_flt_rand.at(i).at(j), cov_dbl_rand.at(i).at(j)));
snr_rand_error_mu.at(i).at(j) = Util::mean(Util::absErr(snr_flt_rand.at(i).at(j), snr_dbl_rand.at(i).at(j)));
cap_rand_error_mu.at(i).at(j) = Util::mean(Util::absErr(cap_flt_rand.at(i).at(j), cap_dbl_rand.at(i).at(j)));
cov_rand_error_std.at(i).at(j) = Util::stddev(Util::absErr(cov_flt_rand.at(i).at(j), cov_dbl_rand.at(i).at(j)));
snr_rand_error_std.at(i).at(j) = Util::stddev(Util::absErr(snr_flt_rand.at(i).at(j), snr_dbl_rand.at(i).at(j)));
cap_rand_error_std.at(i).at(j) = Util::stddev(Util::absErr(cap_flt_rand.at(i).at(j), cap_dbl_rand.at(i).at(j)));
}
}
Plot plot;
//plot.constants.logscale_x = true;
//plot.constants.logscale_y = false;
/* Coverage */
plot.create(PT_LINE_POINT, "", xlabel, "Coverage");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cov_flt_grid_mu.at(i), cov_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_dbl_grid_mu.at(i), cov_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_flt_rand_mu.at(i), cov_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_dbl_rand_mu.at(i), cov_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cov_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "coverage");
plot.create(PT_LINE_POINT, "", xlabel, "Absolute Error of Coverage");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cov_grid_error_mu.at(i), cov_grid_error_std.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_rand_error_mu.at(i), cov_rand_error_std.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cov_grid_error_mu.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_rand_error_mu.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "coverage-error");
/* SINR Max */
plot.create(PT_LINE_POINT, "", xlabel, "Average Max SINR (W/W)");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, snr_flt_grid_mu.at(i), snr_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_dbl_grid_mu.at(i), snr_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_flt_rand_mu.at(i), snr_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_dbl_rand_mu.at(i), snr_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, snr_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "sinrmax");
plot.create(PT_LINE_POINT, "", xlabel, "Absolute Error of Average Max SINR");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, snr_grid_error_mu.at(i), snr_grid_error_std.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_rand_error_mu.at(i), snr_rand_error_std.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, snr_grid_error_mu.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_rand_error_mu.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "sinrmax-error");
/* Capacity */
plot.create(PT_LINE_POINT, "", xlabel, "Average Max Capacity (bps)");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cap_flt_grid_mu.at(i), cap_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_dbl_grid_mu.at(i), cap_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_flt_rand_mu.at(i), cap_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_dbl_rand_mu.at(i), cap_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cap_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "capacity");
plot.create(PT_LINE_POINT, "", xlabel, "Absolute Error of Average Max Capacity");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cap_grid_error_mu.at(i), cap_grid_error_std.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_rand_error_mu.at(i), cap_rand_error_std.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cap_grid_error_mu.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_rand_error_mu.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "capacity-error");
/* Running Time */
plot.create(PT_LINE_POINT, "", xlabel, "Running Time (seconds)");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, tme_flt_grid_mu.at(i), tme_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, tme_dbl_grid_mu.at(i), tme_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, tme_flt_rand_mu.at(i), tme_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, tme_dbl_rand_mu.at(i), tme_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, tme_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, tme_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, tme_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, tme_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "running-time");
return 0;
}
| cb3f4066147289c8f91e83daf1a73078278fbda0.cu | #include <iostream>
#include <oman/visualizer/plot.h> /* Must include before other - looks like a clash of namespaces between OMAN::Utility and Magick::_ImageInfo? */
#include <oman/general/omandirs.h> /* non c++11 */
#include <oman/general/iterationfunction.h> /* non c++11 */
#include <oman/general/templatedefines.h> /* non c++11 */
#include <sinr/network.h> /* Must include before other - looks like a clash of namespaces between OMAN::Utility and Thrust::Utility? */
#include <sinr/coordinates.cuh>
#include <sinr/visualizer.cuh>
#include <sinr/networkmetrics.cuh>
#include <sinr/util.h>
#include <sinr/types.h>
typedef thrust::tuple<double,double> Point2d_dbl;
typedef thrust::tuple<float,float> Point2d_flt;
/** This program computes averaged metrics versus the size of the network, parameterized over the number of
* data points used to sample each metric over a 2d arena. Each resulting data point is averaged over sampleCount
* independent runs.
*
* A main comparison is made between using floats or doubles as the underlying data type in the averaged metric
* calculations, as GPUs seem to be faster at evaluating single-precision floating point operations.
*/
int main(int argc __attribute__((unused)), char **argv __attribute__((unused))) {
/** @note if you only have one GPU, free memory depends on the number of windows you have open */
/** @todo generalize this code so that other programs can use it */
cudaSetDevice(0);
cudaDeviceReset();
size_t free, total;
cudaMemGetInfo(&free, &total);
std::cout<<"free: "<<free<<"\t total: "<<total<<std::endl;
/* User-controlled setup for experiment */
Util::deleteDirContents(OmanDirs::temp());
Util::deleteDirContents(OmanDirs::images());
Util::deleteDirContents(OmanDirs::videos());
Util::deleteDirContents(OmanDirs::logs());
Util::seedRandomGenerator(0);
vector<double> nodeIter = IterationFunction(10, 50, 5).getVector(); /* number of nodes to iterate over */
vector<double> sidePixelIter = IterationFunction(500, 500, 1).getVector(); /* image sizes to iterate over */
unsigned int sampleCount = 100; /* samples to average over for each param and indVar value*/
double arenaSideLength = 1000.0; /* (m) */
double widthdB = 5.0; /* visual display width of SINR cells (dB) */
Arena2d<double> arena_dbl(arenaSideLength);
Arena2d<float> arena_flt(arenaSideLength);
bool paramStatus = true;
bool indVarStatus = true;
bool sampleStatus = true;
bool saveImage = true;
bool witherrorbars = true;
/* Map iterators to parameter and independent variables
* Note: remember this mapping to make the right variable assignments inside the nested for-loops */
vector<double> paramIter(sidePixelIter);
string paramName = "pixels";
string paramLegend = "p=";
vector<double> indVarIter(nodeIter);
string indVarName = "nodes";
string xlabel = "Number of Nodes";
string imagePrename = "vs" + indVarName + "-samples" + Util::to_string(sampleCount) + "-";
/* Automatic setup for experiment */
vd3 cov_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_flt_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
vd3 cov_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_dbl_grid(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
vd3 cov_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_flt_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
vd3 cov_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Coverage */
vd3 snr_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* SINR Max */
vd3 cap_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Capacity */
vd3 tme_dbl_rand(paramIter.size(), vd2(indVarIter.size(), vd1(sampleCount, 0.0))); /* Time */
uint64_t start, stop;
/// now you don't have to flush the buffer: http://stackoverflow.com/a/1716621/627517
setbuf(stdout, NULL);
for(unsigned int i_param=0; i_param<paramIter.size(); i_param++) {
/* NOTE: make sure we are assigning elements of paramIter to the right environment variable */
unsigned int sidePixelCount = paramIter.at(i_param);
if(paramStatus) {std::cout<<paramName<<i_param<<" "<<std::endl;}
for(unsigned int i_indVar=0; i_indVar<indVarIter.size(); i_indVar++) {
/* NOTE: make sure we are assigning elements of indVarIter to the right environment variable */
unsigned int N = indVarIter.at(i_indVar);
if(indVarStatus) {std::cout<<" "<<indVarName<<i_indVar<<" ";}
Network<float> net_flt_grid(N);
Network<float> net_flt_rand = net_flt_grid;
Network<double> net_dbl_grid(N);
Network<double> net_dbl_rand = net_dbl_grid;
for(unsigned int sample=0; sample<sampleCount; sample++) {
if(sampleStatus) {std::cout<<"s"<<sample<<" ";}
/* Set up network for CUDA */
for (unsigned int n = 0; n < N; n++) {
Point2d_dbl p(Util::uniform_double(0,arenaSideLength), Util::uniform_double(0,arenaSideLength));
net_dbl_grid.setPosition(n,p);
net_dbl_rand.setPosition(n,p);
net_flt_grid.setPosition(n,p);
net_flt_rand.setPosition(n,p);
}
/* float-based grid */
start = Util::getTimeNS();
thrust::device_vector<Point2d_flt> coords_flt_grid(sidePixelCount*sidePixelCount);
sinr::coordinates::generateGrid(coords_flt_grid,
arena_flt,
sidePixelCount,
sidePixelCount);
NetworkMetricsDev<float> nm_flt_grid(&net_flt_grid, &arena_flt, &coords_flt_grid);
cov_flt_grid.at(i_param).at(i_indVar).at(sample) = nm_flt_grid.computeAvgMaxCoverage();
snr_flt_grid.at(i_param).at(i_indVar).at(sample) = nm_flt_grid.computeAvgMaxSINR();
cap_flt_grid.at(i_param).at(i_indVar).at(sample) = nm_flt_grid.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_flt_grid.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
/* double-based grid */
start = Util::getTimeNS();
thrust::device_vector<Point2d_dbl> coords_dbl_grid(sidePixelCount*sidePixelCount);
sinr::coordinates::generateGrid(coords_dbl_grid,
arena_dbl,
sidePixelCount,
sidePixelCount);
NetworkMetricsDev<double> nm_dbl_grid(&net_dbl_grid, &arena_dbl, &coords_dbl_grid);
cov_dbl_grid.at(i_param).at(i_indVar).at(sample) = nm_dbl_grid.computeAvgMaxCoverage();
snr_dbl_grid.at(i_param).at(i_indVar).at(sample) = nm_dbl_grid.computeAvgMaxSINR();
cap_dbl_grid.at(i_param).at(i_indVar).at(sample) = nm_dbl_grid.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_dbl_grid.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
/* float-based random */
start = Util::getTimeNS();
thrust::device_vector<Point2d_flt> coords_flt_rand(sidePixelCount*sidePixelCount);
sinr::coordinates::generateRandom(coords_flt_rand,
arena_flt,
sidePixelCount*sidePixelCount,
sample);
NetworkMetricsDev<float> nm_flt_rand(&net_flt_rand, &arena_flt, &coords_flt_rand);
cov_flt_rand.at(i_param).at(i_indVar).at(sample) = nm_flt_rand.computeAvgMaxCoverage();
snr_flt_rand.at(i_param).at(i_indVar).at(sample) = nm_flt_rand.computeAvgMaxSINR();
cap_flt_rand.at(i_param).at(i_indVar).at(sample) = nm_flt_rand.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_flt_rand.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
/* double-based random */
start = Util::getTimeNS();
thrust::device_vector<Point2d_dbl> coords_dbl_rand(sidePixelCount*sidePixelCount);
sinr::coordinates::generateRandom(coords_dbl_rand,
arena_dbl,
sidePixelCount*sidePixelCount,
sample);
NetworkMetricsDev<double> nm_dbl_rand(&net_dbl_rand, &arena_dbl, &coords_dbl_rand);
cov_dbl_rand.at(i_param).at(i_indVar).at(sample) = nm_dbl_rand.computeAvgMaxCoverage();
snr_dbl_rand.at(i_param).at(i_indVar).at(sample) = nm_dbl_rand.computeAvgMaxSINR();
cap_dbl_rand.at(i_param).at(i_indVar).at(sample) = nm_dbl_rand.computeAvgMaxCapacity();
stop = Util::getTimeNS();
tme_dbl_rand.at(i_param).at(i_indVar).at(sample) = (stop-start)/double(Util::nanoPerSec);
if (saveImage && (sample == 0)) {
string imagePostname = paramName + Util::to_string((int)paramIter.at(i_param)) + "-" + indVarName + Util::to_string((int)indVarIter.at(i_indVar)) + "-" + "s" + Util::to_string((int)sample);
thrust::device_vector<uchar4> rgba_flt(sidePixelCount*sidePixelCount);
const thrust::device_vector<float> *maxsinr_flt = nm_flt_grid.computeMapMaxSINR();
sinr::visualize::grayscaledB(*maxsinr_flt, rgba_flt, net_flt_grid.getSINRThresholddB()-widthdB, net_flt_grid.getSINRThresholddB());
string imagePath_flt = OmanDirs::images() + "/" + imagePrename + imagePostname + "-flt.bmp";
sinr::visualize::outputBMP(rgba_flt, sidePixelCount, sidePixelCount, imagePath_flt);
thrust::device_vector<uchar4> rgba_dbl(sidePixelCount*sidePixelCount);
const thrust::device_vector<double> *maxsinr_dbl = nm_dbl_grid.computeMapMaxSINR();
sinr::visualize::grayscaledB(*maxsinr_dbl, rgba_dbl, net_flt_grid.getSINRThresholddB()-widthdB, net_flt_grid.getSINRThresholddB());
string imagePath_dbl = OmanDirs::images() + "/" + imagePrename + imagePostname + "-dbl.bmp";
sinr::visualize::outputBMP(rgba_dbl, sidePixelCount, sidePixelCount, imagePath_dbl);
}
}
if (indVarStatus) {std::cout<<std::endl;}
}
}
if(paramStatus || indVarStatus || sampleStatus) {
std::cout << std::endl;
}
/// Here we use the computed coverage, sinrmax, and capacity to determine the "error" in the lower resolution images.
/* float-based grid samples */
vd2 cov_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* double-based grid samples */
vd2 cov_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_grid_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_grid_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* float-based random samples */
vd2 cov_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_flt_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* double-based random samples */
vd2 cov_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_rand_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 tme_dbl_rand_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* Error Measurements - Grid */
vd2 cov_grid_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_grid_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_grid_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_grid_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_grid_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_grid_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
/* Error Measurements - Random */
vd2 cov_rand_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_rand_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_rand_error_mu(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cov_rand_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 snr_rand_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
vd2 cap_rand_error_std(paramIter.size(), vd1(indVarIter.size(), 0));
for(unsigned int i=0; i<paramIter.size(); i++) {
for(unsigned int j=0; j<indVarIter.size(); j++) {
/* float-based grid samples */
cov_flt_grid_mu.at(i).at(j) = Util::mean(cov_flt_grid.at(i).at(j));
snr_flt_grid_mu.at(i).at(j) = Util::mean(snr_flt_grid.at(i).at(j));
cap_flt_grid_mu.at(i).at(j) = Util::mean(cap_flt_grid.at(i).at(j));
tme_flt_grid_mu.at(i).at(j) = Util::mean(tme_flt_grid.at(i).at(j));
cov_flt_grid_std.at(i).at(j) = Util::stddev(cov_flt_grid.at(i).at(j));
snr_flt_grid_std.at(i).at(j) = Util::stddev(snr_flt_grid.at(i).at(j));
cap_flt_grid_std.at(i).at(j) = Util::stddev(cap_flt_grid.at(i).at(j));
tme_flt_grid_std.at(i).at(j) = Util::stddev(tme_flt_grid.at(i).at(j));
/* double-based grid samples */
cov_dbl_grid_mu.at(i).at(j) = Util::mean(cov_dbl_grid.at(i).at(j));
snr_dbl_grid_mu.at(i).at(j) = Util::mean(snr_dbl_grid.at(i).at(j));
cap_dbl_grid_mu.at(i).at(j) = Util::mean(cap_dbl_grid.at(i).at(j));
tme_dbl_grid_mu.at(i).at(j) = Util::mean(tme_dbl_grid.at(i).at(j));
cov_dbl_grid_std.at(i).at(j) = Util::stddev(cov_dbl_grid.at(i).at(j));
snr_dbl_grid_std.at(i).at(j) = Util::stddev(snr_dbl_grid.at(i).at(j));
cap_dbl_grid_std.at(i).at(j) = Util::stddev(cap_dbl_grid.at(i).at(j));
tme_dbl_grid_std.at(i).at(j) = Util::stddev(tme_dbl_grid.at(i).at(j));
/* grid-based absolute error */
cov_grid_error_mu.at(i).at(j) = Util::mean(Util::absErr(cov_flt_grid.at(i).at(j), cov_dbl_grid.at(i).at(j)));
snr_grid_error_mu.at(i).at(j) = Util::mean(Util::absErr(snr_flt_grid.at(i).at(j), snr_dbl_grid.at(i).at(j)));
cap_grid_error_mu.at(i).at(j) = Util::mean(Util::absErr(cap_flt_grid.at(i).at(j), cap_dbl_grid.at(i).at(j)));
cov_grid_error_std.at(i).at(j) = Util::stddev(Util::absErr(cov_flt_grid.at(i).at(j), cov_dbl_grid.at(i).at(j)));
snr_grid_error_std.at(i).at(j) = Util::stddev(Util::absErr(snr_flt_grid.at(i).at(j), snr_dbl_grid.at(i).at(j)));
cap_grid_error_std.at(i).at(j) = Util::stddev(Util::absErr(cap_flt_grid.at(i).at(j), cap_dbl_grid.at(i).at(j)));
/* float-based random samples */
cov_flt_rand_mu.at(i).at(j) = Util::mean(cov_flt_rand.at(i).at(j));
snr_flt_rand_mu.at(i).at(j) = Util::mean(snr_flt_rand.at(i).at(j));
cap_flt_rand_mu.at(i).at(j) = Util::mean(cap_flt_rand.at(i).at(j));
tme_flt_rand_mu.at(i).at(j) = Util::mean(tme_flt_rand.at(i).at(j));
cov_flt_rand_std.at(i).at(j) = Util::stddev(cov_flt_rand.at(i).at(j));
snr_flt_rand_std.at(i).at(j) = Util::stddev(snr_flt_rand.at(i).at(j));
cap_flt_rand_std.at(i).at(j) = Util::stddev(cap_flt_rand.at(i).at(j));
tme_flt_rand_std.at(i).at(j) = Util::stddev(tme_flt_rand.at(i).at(j));
/* double-based random samples */
cov_dbl_rand_mu.at(i).at(j) = Util::mean(cov_dbl_rand.at(i).at(j));
snr_dbl_rand_mu.at(i).at(j) = Util::mean(snr_dbl_rand.at(i).at(j));
cap_dbl_rand_mu.at(i).at(j) = Util::mean(cap_dbl_rand.at(i).at(j));
tme_dbl_rand_mu.at(i).at(j) = Util::mean(tme_dbl_rand.at(i).at(j));
cov_dbl_rand_std.at(i).at(j) = Util::stddev(cov_dbl_rand.at(i).at(j));
snr_dbl_rand_std.at(i).at(j) = Util::stddev(snr_dbl_rand.at(i).at(j));
cap_dbl_rand_std.at(i).at(j) = Util::stddev(cap_dbl_rand.at(i).at(j));
tme_dbl_rand_std.at(i).at(j) = Util::stddev(tme_dbl_rand.at(i).at(j));
/* random-based absolute error */
cov_rand_error_mu.at(i).at(j) = Util::mean(Util::absErr(cov_flt_rand.at(i).at(j), cov_dbl_rand.at(i).at(j)));
snr_rand_error_mu.at(i).at(j) = Util::mean(Util::absErr(snr_flt_rand.at(i).at(j), snr_dbl_rand.at(i).at(j)));
cap_rand_error_mu.at(i).at(j) = Util::mean(Util::absErr(cap_flt_rand.at(i).at(j), cap_dbl_rand.at(i).at(j)));
cov_rand_error_std.at(i).at(j) = Util::stddev(Util::absErr(cov_flt_rand.at(i).at(j), cov_dbl_rand.at(i).at(j)));
snr_rand_error_std.at(i).at(j) = Util::stddev(Util::absErr(snr_flt_rand.at(i).at(j), snr_dbl_rand.at(i).at(j)));
cap_rand_error_std.at(i).at(j) = Util::stddev(Util::absErr(cap_flt_rand.at(i).at(j), cap_dbl_rand.at(i).at(j)));
}
}
Plot plot;
//plot.constants.logscale_x = true;
//plot.constants.logscale_y = false;
/* Coverage */
plot.create(PT_LINE_POINT, "", xlabel, "Coverage");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cov_flt_grid_mu.at(i), cov_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_dbl_grid_mu.at(i), cov_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_flt_rand_mu.at(i), cov_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_dbl_rand_mu.at(i), cov_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cov_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "coverage");
plot.create(PT_LINE_POINT, "", xlabel, "Absolute Error of Coverage");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cov_grid_error_mu.at(i), cov_grid_error_std.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cov_rand_error_mu.at(i), cov_rand_error_std.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cov_grid_error_mu.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cov_rand_error_mu.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "coverage-error");
/* SINR Max */
plot.create(PT_LINE_POINT, "", xlabel, "Average Max SINR (W/W)");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, snr_flt_grid_mu.at(i), snr_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_dbl_grid_mu.at(i), snr_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_flt_rand_mu.at(i), snr_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_dbl_rand_mu.at(i), snr_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, snr_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "sinrmax");
plot.create(PT_LINE_POINT, "", xlabel, "Absolute Error of Average Max SINR");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, snr_grid_error_mu.at(i), snr_grid_error_std.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, snr_rand_error_mu.at(i), snr_rand_error_std.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, snr_grid_error_mu.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, snr_rand_error_mu.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "sinrmax-error");
/* Capacity */
plot.create(PT_LINE_POINT, "", xlabel, "Average Max Capacity (bps)");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cap_flt_grid_mu.at(i), cap_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_dbl_grid_mu.at(i), cap_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_flt_rand_mu.at(i), cap_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_dbl_rand_mu.at(i), cap_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cap_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "capacity");
plot.create(PT_LINE_POINT, "", xlabel, "Absolute Error of Average Max Capacity");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, cap_grid_error_mu.at(i), cap_grid_error_std.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, cap_rand_error_mu.at(i), cap_rand_error_std.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, cap_grid_error_mu.at(i), "g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, cap_rand_error_mu.at(i), "r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "capacity-error");
/* Running Time */
plot.create(PT_LINE_POINT, "", xlabel, "Running Time (seconds)");
for (unsigned int i=0; i<paramIter.size(); i++) {
if (witherrorbars) {
plot.addData_withErrorBars(indVarIter, tme_flt_grid_mu.at(i), tme_flt_grid_std.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, tme_dbl_grid_mu.at(i), tme_dbl_grid_std.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, tme_flt_rand_mu.at(i), tme_flt_rand_std.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData_withErrorBars(indVarIter, tme_dbl_rand_mu.at(i), tme_dbl_rand_std.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
} else {
plot.addData(indVarIter, tme_flt_grid_mu.at(i), "f,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, tme_dbl_grid_mu.at(i), "d,g," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, tme_flt_rand_mu.at(i), "f,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
plot.addData(indVarIter, tme_dbl_rand_mu.at(i), "d,r," + paramLegend + Util::to_string((int)paramIter.at(i)));
}
}
plot.save(imagePrename + "running-time");
return 0;
}
|
02208c67a10c7bf9edfb19e3f6bf127f4d790e35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
#include <cstdlib>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <math.h>
#include <string.h>
#include <iostream>
using namespace cv;
using namespace std;
#define BLOCK_SIZE 256
#define PRIVATE 256
void print_array(int* vect, int dim)
{
for (long i = 0; i < dim; i++) printf("%d ", vect[i]);
}
void print_array(float* vect, int dim)
{
for (long i = 0; i < dim; i++) printf("%f ", vect[i]);
}
void display_histogram(int histogram[], const char* name) {
int histogramWidth = 512;
int histogramHeight = 400;
int newHistogram[256];
int binWidth;
int maximumIntensity;
for (int i = 0; i < 256; i++) newHistogram[i] = histogram[i];
//creating "bins" for the range of 256 intensity values
binWidth = cvRound((double)histogramWidth / 256);
Mat histogramImage(histogramHeight, histogramWidth, CV_8UC1, Scalar(255, 255, 255));
//finding maximum intensity level in the histogram
maximumIntensity = newHistogram[0];
for (int i = 1; i < 256; i++) {
if (maximumIntensity < newHistogram[i]) maximumIntensity = newHistogram[i];
}
//normalizing histogram in terms of rows (y)
for (int i = 0; i < 256; i++) newHistogram[i] = ((double)newHistogram[i] / maximumIntensity) * histogramImage.rows;
//drawing the intensity level - line
for (int i = 0; i < 256; i++) line(histogramImage, Point(binWidth * (i), histogramHeight), Point(binWidth * (i), histogramHeight - newHistogram[i]), Scalar(0, 0, 0), 1, 8, 0);
namedWindow(name, WINDOW_AUTOSIZE);
imshow(name, histogramImage);
}
__global__ void histogramKernel(int* bins, long* input, long numElems) {
int tx = threadIdx.x; int bx = blockIdx.x;
// compute global thread coordinates
int i = (bx * blockDim.x) + tx;
// create a private histogram copy for each thread block
__shared__ unsigned int hist[PRIVATE];
// each thread must initialize more than 1 location
if (PRIVATE > BLOCK_SIZE) {
for (int j = tx; j < PRIVATE; j += BLOCK_SIZE) {
if (j < PRIVATE) {
hist[j] = 0;
}
}
}
// use the first `PRIVATE` threads of each block to init
else {
if (tx < PRIVATE) {
hist[tx] = 0;
}
}
// wait for all threads in the block to finish
__syncthreads();
// update private histogram
if (i < numElems) {
atomicAdd(&(hist[input[i]]), 1);
}
// wait for all threads in the block to finish
__syncthreads();
// each thread must update more than 1 location
if (PRIVATE > BLOCK_SIZE) {
for (int j = tx; j < PRIVATE; j += BLOCK_SIZE) {
if (j < PRIVATE) {
atomicAdd(&(bins[j]), hist[j]);
}
}
}
// use the first `PRIVATE` threads to update final histogram
else {
if (tx < PRIVATE) {
atomicAdd(&(bins[tx]), hist[tx]);
}
}
}
__global__ void histogramKernel2(int* ohist, long* idata, long dataSize, int nbin)
{
// https://sett.com/gpgpu/cuda-leveraging-implicit-intra-warp-synchronization-in-reduction-algorithms
__shared__ volatile int sh_data_temp[256][32];
unsigned int tidxx = threadIdx.x;
unsigned int gTidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < nbin; i++)
{
sh_data_temp[i][tidxx] = 0;
}
__syncthreads();
if (gTidx < dataSize)
{
float item = idata[gTidx];
int bin = ((int)item) % nbin;
sh_data_temp[bin][tidxx] += 1;
}
__syncthreads();
if (tidxx < 16)
{
for (int k = 0; k < nbin; k++)
{
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 16];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 8];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 4];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 2];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 1];
if (tidxx == 0)
{
atomicAdd(&ohist[k], sh_data_temp[k][0]);
}
}
}
}
#define WARP_SIZE 32
__global__ void histogramKernel3(int* histo, long* data, long size, const int BINS, const int R)
{
extern __shared__ int Hs[];
const int warpid = (int)(threadIdx.x / WARP_SIZE);
const int lane = threadIdx.x % WARP_SIZE;
const int warps_block = blockDim.x / WARP_SIZE;
const int off_rep = (BINS + 1) * (threadIdx.x % R);
const int begin = (size / warps_block) * warpid + WARP_SIZE * blockIdx.x + lane;
const int end = (size / warps_block) * (warpid + 1);
const int step = WARP_SIZE * gridDim.x;
for (int pos = threadIdx.x; pos < (BINS + 1) * R; pos += blockDim.x)
Hs[pos] = 0;
__syncthreads();
for (int i = begin; i < end; i += step)
{
int d = data[i];
atomicAdd(&Hs[off_rep + d], 1);
}
__syncthreads();
for (int pos = threadIdx.x; pos < BINS; pos += blockDim.x)
{
int sum = 0;
for (int base = 0; base < (BINS + 1) * R; base += BINS + 1)
sum += Hs[base + pos];
atomicAdd(histo + pos, sum);
}
}
// Shared memory using balanced trees (optimization)
__global__ void cumHistKernelBT(int* g_odata, int* g_idata, int n)
{
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[2 * thid + 1];
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2 * thid] = temp[2 * thid]; // write results to device memory
g_odata[2 * thid + 1] = temp[2 * thid + 1];
}
__global__ void prkKernel(float* d_out, int* d_in, long size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = (float)d_in[i] / size;
}
__global__ void skKernel(int* d_out, int* d_in, float alpha)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = round((float)d_in[i] * alpha);
}
__global__ void pskKernel(float* d_out, int* d_in_a, float* d_in_b)
{
int in = blockIdx.x * blockDim.x + threadIdx.x;
int out = (int)d_in_a[in];
atomicAdd(&d_out[out], d_in_b[in]);
}
__global__ void finalValuesKernel(int* d_out, float* d_in)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = round(d_in[i] * 255);
}
__global__ void finalImageKernel(long* d_out, int* d_in)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = (uchar)(d_in[d_out[i]]);
}
int main()
{
char img_path[1024];
printf("Starting application\n");
printf("Insert image path: ");
scanf_s("%1023[^\n]", img_path, (unsigned)_countof(img_path));
printf("Showing results\n");
Mat image = imread(img_path, IMREAD_GRAYSCALE);
Mat image = imread("D:/University/Master/Year 2/GPUP/Project/histogram_equalization/images/img0.jpg", IMREAD_GRAYSCALE);
int h = image.rows, w = image.cols; // image dimensions
int* h_hist;
long* h_image;
float* h_PRk;
int* h_cumHist;
int* h_Sk;
float* h_PSk;
int* h_finalValues;
int dim_hist = 256;
long dim_image = h * w; // image size
float alpha = 255.0 / dim_image;
hipError_t cudaStatus;
int numThreadsPerBlock = 256; // define block size
int numBlocks = dim_image / numThreadsPerBlock;
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0); // Start global timers
hipMallocManaged(&h_hist, dim_hist * sizeof(int));
hipMallocManaged(&h_image, dim_image * sizeof(long));
hipMallocManaged(&h_PRk, dim_hist * sizeof(float));
hipMallocManaged(&h_cumHist, dim_hist * sizeof(int));
hipMallocManaged(&h_Sk, dim_hist * sizeof(int));
hipMallocManaged(&h_PSk, dim_hist * sizeof(float));
hipMallocManaged(&h_finalValues, dim_hist * sizeof(int));
for (int i = 0; i < dim_hist; ++i) h_hist[i] = 0;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
h_image[i * w + j] = image.at<uchar>(i, j);
}
}
// Check CUDA device
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// ******************************************************************************************
// Compute image histogram
// launch kernel
/*
dim3 threadPerBlock(BLOCK_SIZE, 1, 1);
dim3 blockPerGrid(ceil(dim_image / (float)BLOCK_SIZE), 1, 1);
histogramKernel << <blockPerGrid, threadPerBlock >> > (h_hist, h_image, dim_image);
*/
//histogramKernel2 << <dim_image/32, 32 >> > (h_hist, h_image, dim_image, dim_hist);
int BINS = dim_hist;
int R = 32;
histogramKernel3 << <numBlocks, numThreadsPerBlock, (BINS + 1)* R * sizeof(int) >> > (h_hist, h_image, BINS, dim_hist, R);
// block until the device has completed
hipDeviceSynchronize();
// device to host copy
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel histo launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
display_histogram(h_hist, "CUDA Histogram");
// ******************************************************************************************
// Compute Cumulative Histogram
cumHistKernelBT << < 1, dim_hist, 2 * dim_hist * sizeof(int) >> > (h_cumHist, h_hist, dim_hist);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "[cumhist] addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// ******************************************************************************************
// Probability distribution for intensity levels
prkKernel << < 1, dim_hist >> > (h_PRk, h_hist, dim_image);
// Scaling operation
skKernel << < 1, dim_hist >> > (h_Sk, h_cumHist, alpha);
// Mapping operation
pskKernel << < 1, dim_hist >> > (h_PSk, h_Sk, h_PRk);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// ******************************************************************************************
// Rounding to get final values
finalValuesKernel << < 1, dim_hist >> > (h_finalValues, h_PSk);
// Creating equalized image
finalImageKernel << < numBlocks, numThreadsPerBlock >> > (h_image, h_Sk);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// ******************************************************************************************
display_histogram(h_finalValues, "CUDA Equalized histogram");
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
image.at<uchar>(i, j) = h_image[i * w + j];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop); // hipEventElapsedTime returns value in milliseconds.Resolution ~0.5ms
printf("Execution time GPU: %f\n", elapsedTime);
Error:
// Free device memory
hipFree(h_hist);
hipFree(h_image);
hipFree(h_PRk);
hipFree(h_cumHist);
hipFree(h_Sk);
hipFree(h_PSk);
hipFree(h_finalValues);
// Free host memory
// Destroy CUDA Event API Events
hipEventDestroy(start);
hipEventDestroy(stop);
// Display equalized image
namedWindow("CUDA Equilized Image", WINDOW_NORMAL);
imshow("CUDA Equilized Image", image);
waitKey();
return 0;
} | 02208c67a10c7bf9edfb19e3f6bf127f4d790e35.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
#include <cstdlib>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <math.h>
#include <string.h>
#include <iostream>
using namespace cv;
using namespace std;
#define BLOCK_SIZE 256
#define PRIVATE 256
void print_array(int* vect, int dim)
{
for (long i = 0; i < dim; i++) printf("%d ", vect[i]);
}
void print_array(float* vect, int dim)
{
for (long i = 0; i < dim; i++) printf("%f ", vect[i]);
}
void display_histogram(int histogram[], const char* name) {
int histogramWidth = 512;
int histogramHeight = 400;
int newHistogram[256];
int binWidth;
int maximumIntensity;
for (int i = 0; i < 256; i++) newHistogram[i] = histogram[i];
//creating "bins" for the range of 256 intensity values
binWidth = cvRound((double)histogramWidth / 256);
Mat histogramImage(histogramHeight, histogramWidth, CV_8UC1, Scalar(255, 255, 255));
//finding maximum intensity level in the histogram
maximumIntensity = newHistogram[0];
for (int i = 1; i < 256; i++) {
if (maximumIntensity < newHistogram[i]) maximumIntensity = newHistogram[i];
}
//normalizing histogram in terms of rows (y)
for (int i = 0; i < 256; i++) newHistogram[i] = ((double)newHistogram[i] / maximumIntensity) * histogramImage.rows;
//drawing the intensity level - line
for (int i = 0; i < 256; i++) line(histogramImage, Point(binWidth * (i), histogramHeight), Point(binWidth * (i), histogramHeight - newHistogram[i]), Scalar(0, 0, 0), 1, 8, 0);
namedWindow(name, WINDOW_AUTOSIZE);
imshow(name, histogramImage);
}
__global__ void histogramKernel(int* bins, long* input, long numElems) {
int tx = threadIdx.x; int bx = blockIdx.x;
// compute global thread coordinates
int i = (bx * blockDim.x) + tx;
// create a private histogram copy for each thread block
__shared__ unsigned int hist[PRIVATE];
// each thread must initialize more than 1 location
if (PRIVATE > BLOCK_SIZE) {
for (int j = tx; j < PRIVATE; j += BLOCK_SIZE) {
if (j < PRIVATE) {
hist[j] = 0;
}
}
}
// use the first `PRIVATE` threads of each block to init
else {
if (tx < PRIVATE) {
hist[tx] = 0;
}
}
// wait for all threads in the block to finish
__syncthreads();
// update private histogram
if (i < numElems) {
atomicAdd(&(hist[input[i]]), 1);
}
// wait for all threads in the block to finish
__syncthreads();
// each thread must update more than 1 location
if (PRIVATE > BLOCK_SIZE) {
for (int j = tx; j < PRIVATE; j += BLOCK_SIZE) {
if (j < PRIVATE) {
atomicAdd(&(bins[j]), hist[j]);
}
}
}
// use the first `PRIVATE` threads to update final histogram
else {
if (tx < PRIVATE) {
atomicAdd(&(bins[tx]), hist[tx]);
}
}
}
__global__ void histogramKernel2(int* ohist, long* idata, long dataSize, int nbin)
{
// https://sett.com/gpgpu/cuda-leveraging-implicit-intra-warp-synchronization-in-reduction-algorithms
__shared__ volatile int sh_data_temp[256][32];
unsigned int tidxx = threadIdx.x;
unsigned int gTidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < nbin; i++)
{
sh_data_temp[i][tidxx] = 0;
}
__syncthreads();
if (gTidx < dataSize)
{
float item = idata[gTidx];
int bin = ((int)item) % nbin;
sh_data_temp[bin][tidxx] += 1;
}
__syncthreads();
if (tidxx < 16)
{
for (int k = 0; k < nbin; k++)
{
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 16];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 8];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 4];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 2];
sh_data_temp[k][tidxx] += sh_data_temp[k][tidxx + 1];
if (tidxx == 0)
{
atomicAdd(&ohist[k], sh_data_temp[k][0]);
}
}
}
}
#define WARP_SIZE 32
__global__ void histogramKernel3(int* histo, long* data, long size, const int BINS, const int R)
{
extern __shared__ int Hs[];
const int warpid = (int)(threadIdx.x / WARP_SIZE);
const int lane = threadIdx.x % WARP_SIZE;
const int warps_block = blockDim.x / WARP_SIZE;
const int off_rep = (BINS + 1) * (threadIdx.x % R);
const int begin = (size / warps_block) * warpid + WARP_SIZE * blockIdx.x + lane;
const int end = (size / warps_block) * (warpid + 1);
const int step = WARP_SIZE * gridDim.x;
for (int pos = threadIdx.x; pos < (BINS + 1) * R; pos += blockDim.x)
Hs[pos] = 0;
__syncthreads();
for (int i = begin; i < end; i += step)
{
int d = data[i];
atomicAdd(&Hs[off_rep + d], 1);
}
__syncthreads();
for (int pos = threadIdx.x; pos < BINS; pos += blockDim.x)
{
int sum = 0;
for (int base = 0; base < (BINS + 1) * R; base += BINS + 1)
sum += Hs[base + pos];
atomicAdd(histo + pos, sum);
}
}
// Shared memory using balanced trees (optimization)
__global__ void cumHistKernelBT(int* g_odata, int* g_idata, int n)
{
extern __shared__ int temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[2 * thid + 1];
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2 * thid] = temp[2 * thid]; // write results to device memory
g_odata[2 * thid + 1] = temp[2 * thid + 1];
}
__global__ void prkKernel(float* d_out, int* d_in, long size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = (float)d_in[i] / size;
}
__global__ void skKernel(int* d_out, int* d_in, float alpha)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = round((float)d_in[i] * alpha);
}
__global__ void pskKernel(float* d_out, int* d_in_a, float* d_in_b)
{
int in = blockIdx.x * blockDim.x + threadIdx.x;
int out = (int)d_in_a[in];
atomicAdd(&d_out[out], d_in_b[in]);
}
__global__ void finalValuesKernel(int* d_out, float* d_in)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = round(d_in[i] * 255);
}
__global__ void finalImageKernel(long* d_out, int* d_in)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_out[i] = (uchar)(d_in[d_out[i]]);
}
int main()
{
char img_path[1024];
printf("Starting application\n");
printf("Insert image path: ");
scanf_s("%1023[^\n]", img_path, (unsigned)_countof(img_path));
printf("Showing results\n");
Mat image = imread(img_path, IMREAD_GRAYSCALE);
Mat image = imread("D:/University/Master/Year 2/GPUP/Project/histogram_equalization/images/img0.jpg", IMREAD_GRAYSCALE);
int h = image.rows, w = image.cols; // image dimensions
int* h_hist;
long* h_image;
float* h_PRk;
int* h_cumHist;
int* h_Sk;
float* h_PSk;
int* h_finalValues;
int dim_hist = 256;
long dim_image = h * w; // image size
float alpha = 255.0 / dim_image;
cudaError_t cudaStatus;
int numThreadsPerBlock = 256; // define block size
int numBlocks = dim_image / numThreadsPerBlock;
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0); // Start global timers
cudaMallocManaged(&h_hist, dim_hist * sizeof(int));
cudaMallocManaged(&h_image, dim_image * sizeof(long));
cudaMallocManaged(&h_PRk, dim_hist * sizeof(float));
cudaMallocManaged(&h_cumHist, dim_hist * sizeof(int));
cudaMallocManaged(&h_Sk, dim_hist * sizeof(int));
cudaMallocManaged(&h_PSk, dim_hist * sizeof(float));
cudaMallocManaged(&h_finalValues, dim_hist * sizeof(int));
for (int i = 0; i < dim_hist; ++i) h_hist[i] = 0;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
h_image[i * w + j] = image.at<uchar>(i, j);
}
}
// Check CUDA device
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// ******************************************************************************************
// Compute image histogram
// launch kernel
/*
dim3 threadPerBlock(BLOCK_SIZE, 1, 1);
dim3 blockPerGrid(ceil(dim_image / (float)BLOCK_SIZE), 1, 1);
histogramKernel << <blockPerGrid, threadPerBlock >> > (h_hist, h_image, dim_image);
*/
//histogramKernel2 << <dim_image/32, 32 >> > (h_hist, h_image, dim_image, dim_hist);
int BINS = dim_hist;
int R = 32;
histogramKernel3 << <numBlocks, numThreadsPerBlock, (BINS + 1)* R * sizeof(int) >> > (h_hist, h_image, BINS, dim_hist, R);
// block until the device has completed
cudaThreadSynchronize();
// device to host copy
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel histo launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
display_histogram(h_hist, "CUDA Histogram");
// ******************************************************************************************
// Compute Cumulative Histogram
cumHistKernelBT << < 1, dim_hist, 2 * dim_hist * sizeof(int) >> > (h_cumHist, h_hist, dim_hist);
cudaThreadSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "[cumhist] addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// ******************************************************************************************
// Probability distribution for intensity levels
prkKernel << < 1, dim_hist >> > (h_PRk, h_hist, dim_image);
// Scaling operation
skKernel << < 1, dim_hist >> > (h_Sk, h_cumHist, alpha);
// Mapping operation
pskKernel << < 1, dim_hist >> > (h_PSk, h_Sk, h_PRk);
cudaThreadSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// ******************************************************************************************
// Rounding to get final values
finalValuesKernel << < 1, dim_hist >> > (h_finalValues, h_PSk);
// Creating equalized image
finalImageKernel << < numBlocks, numThreadsPerBlock >> > (h_image, h_Sk);
cudaThreadSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// ******************************************************************************************
display_histogram(h_finalValues, "CUDA Equalized histogram");
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
image.at<uchar>(i, j) = h_image[i * w + j];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop); // cudaEventElapsedTime returns value in milliseconds.Resolution ~0.5ms
printf("Execution time GPU: %f\n", elapsedTime);
Error:
// Free device memory
cudaFree(h_hist);
cudaFree(h_image);
cudaFree(h_PRk);
cudaFree(h_cumHist);
cudaFree(h_Sk);
cudaFree(h_PSk);
cudaFree(h_finalValues);
// Free host memory
// Destroy CUDA Event API Events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Display equalized image
namedWindow("CUDA Equilized Image", WINDOW_NORMAL);
imshow("CUDA Equilized Image", image);
waitKey();
return 0;
} |
95773ef7905a9b2a9e27df52e99020900d6b1687.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
#include "reference.h"
__global__
void MRCGradient (
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
__global__
void MRCGradient2(
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float y = Y[i];
float o = dOutput[i];
float dist = -y * (X1[i] - X2[i]) + margin;
dX1[i] = dist < 0.f ? 0.f : -y * o;
dX2[i] = dist < 0.f ? 0.f : y * o;
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int length = atoi(argv[1]);
const int repeat = atoi(argv[2]);
size_t size_bytes = length * sizeof(float);
float *h_X1 = (float*) malloc (size_bytes);
float *h_X2 = (float*) malloc (size_bytes);
float *h_O = (float*) malloc (size_bytes);
int *h_Y = ( int*) malloc (size_bytes);
float *h_dX1 = (float*) malloc (size_bytes);
float *h_dX2 = (float*) malloc (size_bytes);
float *r_dX1 = (float*) malloc (size_bytes);
float *r_dX2 = (float*) malloc (size_bytes);
const float m = 0.01; // margin
std::default_random_engine g (123);
std::uniform_real_distribution<float> distr (-2.f, 2.f);
for (int i = 0; i < length; i++) {
h_X1[i] = distr(g);
h_X2[i] = distr(g);
h_O[i] = distr(g);
h_Y[i] = (distr(g) < 0) ? -1 : 1;
}
float *d_X1, *d_X2, *d_O, *d_dX1, *d_dX2;
int *d_Y;
hipMalloc((void**)&d_X1, size_bytes);
hipMemcpy(d_X1, h_X1, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_X2, size_bytes);
hipMemcpy(d_X2, h_X2, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_O, size_bytes);
hipMemcpy(d_O, h_O, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_Y, size_bytes);
hipMemcpy(d_Y, h_Y, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_dX1, size_bytes);
hipMalloc((void**)&d_dX2, size_bytes);
dim3 grid ((length + 255) / 256);
dim3 block (256);
// warmup
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( MRCGradient) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipLaunchKernelGGL(( MRCGradient2) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
}
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( MRCGradient) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC kernel: %f (us)\n", (time * 1e-3f) / repeat);
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( MRCGradient2) , dim3(grid), dim3(block), 0, 0, length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC2 kernel: %f (us)\n", (time * 1e-3f) / repeat);
// verify
hipMemcpy(h_dX1, d_dX1, size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(h_dX2, d_dX2, size_bytes, hipMemcpyDeviceToHost);
reference (length, h_Y, h_X1, h_X2, h_O, m, r_dX1, r_dX2);
bool ok = true;
for (int i = 0; i < length; i++) {
if (fabs(h_dX1[i] - r_dX1[i]) > 1e-3 || fabs(h_dX2[i] - r_dX2[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
hipFree(d_X1);
hipFree(d_X2);
hipFree(d_O);
hipFree(d_Y);
hipFree(d_dX1);
hipFree(d_dX2);
free(h_X1);
free(h_X2);
free(h_O);
free(h_Y);
free(h_dX1);
free(h_dX2);
return 0;
}
| 95773ef7905a9b2a9e27df52e99020900d6b1687.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <cuda.h>
#include "reference.h"
__global__
void MRCGradient (
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float dist = -Y[i] * (X1[i] - X2[i]) + margin;
if (dist < 0.f) {
dX1[i] = dX2[i] = 0.f;
} else {
dX1[i] = -Y[i] * dOutput[i];
dX2[i] = Y[i] * dOutput[i];
}
}
}
__global__
void MRCGradient2(
const int N, const int* Y, const float* X1, const float* X2, const float* dOutput,
const float margin, float*__restrict__ dX1, float*__restrict__ dX2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
float y = Y[i];
float o = dOutput[i];
float dist = -y * (X1[i] - X2[i]) + margin;
dX1[i] = dist < 0.f ? 0.f : -y * o;
dX2[i] = dist < 0.f ? 0.f : y * o;
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int length = atoi(argv[1]);
const int repeat = atoi(argv[2]);
size_t size_bytes = length * sizeof(float);
float *h_X1 = (float*) malloc (size_bytes);
float *h_X2 = (float*) malloc (size_bytes);
float *h_O = (float*) malloc (size_bytes);
int *h_Y = ( int*) malloc (size_bytes);
float *h_dX1 = (float*) malloc (size_bytes);
float *h_dX2 = (float*) malloc (size_bytes);
float *r_dX1 = (float*) malloc (size_bytes);
float *r_dX2 = (float*) malloc (size_bytes);
const float m = 0.01; // margin
std::default_random_engine g (123);
std::uniform_real_distribution<float> distr (-2.f, 2.f);
for (int i = 0; i < length; i++) {
h_X1[i] = distr(g);
h_X2[i] = distr(g);
h_O[i] = distr(g);
h_Y[i] = (distr(g) < 0) ? -1 : 1;
}
float *d_X1, *d_X2, *d_O, *d_dX1, *d_dX2;
int *d_Y;
cudaMalloc((void**)&d_X1, size_bytes);
cudaMemcpy(d_X1, h_X1, size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_X2, size_bytes);
cudaMemcpy(d_X2, h_X2, size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_O, size_bytes);
cudaMemcpy(d_O, h_O, size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_Y, size_bytes);
cudaMemcpy(d_Y, h_Y, size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_dX1, size_bytes);
cudaMalloc((void**)&d_dX2, size_bytes);
dim3 grid ((length + 255) / 256);
dim3 block (256);
// warmup
for (int i = 0; i < repeat; i++) {
MRCGradient <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
MRCGradient2 <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
}
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
MRCGradient <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC kernel: %f (us)\n", (time * 1e-3f) / repeat);
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
MRCGradient2 <<<grid, block>>> (length, d_Y, d_X1, d_X2, d_O, m, d_dX1, d_dX2);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of MRC2 kernel: %f (us)\n", (time * 1e-3f) / repeat);
// verify
cudaMemcpy(h_dX1, d_dX1, size_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_dX2, d_dX2, size_bytes, cudaMemcpyDeviceToHost);
reference (length, h_Y, h_X1, h_X2, h_O, m, r_dX1, r_dX2);
bool ok = true;
for (int i = 0; i < length; i++) {
if (fabs(h_dX1[i] - r_dX1[i]) > 1e-3 || fabs(h_dX2[i] - r_dX2[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
cudaFree(d_X1);
cudaFree(d_X2);
cudaFree(d_O);
cudaFree(d_Y);
cudaFree(d_dX1);
cudaFree(d_dX2);
free(h_X1);
free(h_X2);
free(h_O);
free(h_Y);
free(h_dX1);
free(h_dX2);
return 0;
}
|
7b01e01b897ec9315e88b02fac1838b1f645b761.hip | // !!! This is a file automatically generated by hipify!!!
//fail
//--blockDim=64 --gridDim=64 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#define N 2//64
__device__ void bar(float x) {
assert(0);
}
__global__ void foo(int* A) {
bar(A[0]);
}
| 7b01e01b897ec9315e88b02fac1838b1f645b761.cu | //fail
//--blockDim=64 --gridDim=64 --no-inline
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2//64
__device__ void bar(float x) {
assert(0);
}
__global__ void foo(int* A) {
bar(A[0]);
}
|
b169fa8a9aa71a9f49aedb1d50173b8b987e18fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include"cutil.h" // Comment this if cutil.h is not available
#include"cuda_runtime.h"
#include"stdio.h"
// Number of operations
//#define NUM_ITEMS 50000
// Number operations per block
#define FACTOR 1
// Number of integer keys assumed in the range [10, 9+KEYS]
//#define KEYS 100
// Number of threads per block
#define THREADS_NUM 32
// Number of hash table buckets
#define BUCKETS_NUM 36419
// Supported operations
#define ADD (0)
#define DELETE (1)
#define SEARCH (2)
#if __WORDSIZE == 64
typedef unsigned long long int LL;
#else
typedef unsigned int LL;
#endif
// Definition of generic slot
typedef LL Slot;
#if __WORDSIZE == 64
// Size of the neighborhood, every bucket has (1 + NEIGHBORHOOD_SIZE) slots
#define NEIGHBORHOOD_SIZE 31
// Because of the flag MASK, the key value in the Slot need to be restricted
#define MAX_KEY ((LL)0x000000000fffffff)
// Use MASK to get the flag value in Slot
#define EMP_FLAG_MASK ((LL)0x8000000000000000)
#define CHECK_1_FLAG_MASK ((LL)0x4000000000000000)
#define CHECK_2_FLAG_MASK ((LL)0x2000000000000000)
#define SWAP_FLAG_MASK ((LL)0x1000000000000000)
#define BITMAP_MASK ((LL)0x0ffffffff0000000)
#define BITMAP_SHIFT 28
#define WRONG_POS ((LL)0xffffffffffffffff)
#else
#define NEIGHBORHOOD_SIZE 15
#define MAX_KEY ((LL)0x00000fff)
#define EMP_FLAG_MASK ((LL)0x80000000)
#define CHECK_1_FLAG_MASK ((LL)0x40000000)
#define CHECK_2_FLAG_MASK ((LL)0x20000000)
#define SWAP_FLAG_MASK ((LL)0x10000000)
#define BITMAP_MASK ((LL)0x0ffff000)
#define BITMAP_SHIFT 12
#define WRONG_POS ((LL)0xffffffff)
#endif
#define BIT ((LL)0x1)
#define BUCKET_RANGE (NEIGHBORHOOD_SIZE+1)
// Actuall hash table pysical size
#define TABLE_SIZE (BUCKETS_NUM + NEIGHBORHOOD_SIZE)
#define MAX_PROBES_FOR_EMPTY_BUCKET (12*BUCKET_RANGE)
__device__ Slot * m_slots; // Array of hash table slots
// Kernel for initializing device memory
// This kernel initializes every slot as an empty node
__global__ void init(Slot * slots)
{
m_slots = slots;
}
// Hash function
__device__ int Hash(LL x)
{
return x%BUCKETS_NUM;
}
__device__ bool CompareAndSet(int pos, LL old_value, LL new_value)
{
Slot old_value_out = atomicCAS(&(m_slots[pos]), old_value, new_value);
if (old_value_out == old_value) return true;
return false;
}
__device__ void Find(LL key, LL * result, Slot * location)
{
int tid = threadIdx.x;
int pos = Hash(key); // step 0
LL bitmap;
Slot location_pos;
do{
*location = m_slots[pos + tid];
location_pos = __shfl(*location, 0);
} while( (location_pos & CHECK_1_FLAG_MASK) != 0 ); // step 2a
// step 2b
if( (location_pos & EMP_FLAG_MASK) != 0 ){ // step 2b1
bitmap = (BITMAP_MASK >> BITMAP_SHIFT);
} else { // step 2b2
bitmap = ( (location_pos & BITMAP_MASK) >> BITMAP_SHIFT);
}
int predict = 0;
int tmp_pos = Hash((*location) & MAX_KEY);
if( (((bitmap >> tid) & BIT) != 0) // is valid
&& ( ( (*location) & EMP_FLAG_MASK) == 0) // no emp flag
&& ( ( (*location) & MAX_KEY) == key ) // is the key
&& (tmp_pos == pos) // just for safe
){
predict = 1;
}
int ans = __ffs(__ballot(predict));
if(ans==0){
*result = WRONG_POS;
} else {
*result = pos + (ans - 1);
}
}
__device__ void Delete(LL key, LL * result)
{
int tid = threadIdx.x;
int pos = Hash(key); // step 0
LL target;
Slot location;
Slot location_pos;
Slot new_location_pos;
LL ans;
bool success;
while (true) {
ans = WRONG_POS;
target = WRONG_POS;
success = false;
Find(key, &target, &location); // step 1
if(target == WRONG_POS){
*result = 0; //return false
return; //step 2b
}
location_pos = __shfl(location, 0);
if( ((location_pos & CHECK_1_FLAG_MASK) != 0)
|| ((location_pos & CHECK_2_FLAG_MASK) != 0)
|| ((location_pos & SWAP_FLAG_MASK) != 0) ){
;
} else if( ((location_pos & EMP_FLAG_MASK) == 0)
&& ( ( ( (location_pos & BITMAP_MASK) >> BITMAP_SHIFT) & BIT ) != 0 )
&& ( ( location_pos & MAX_KEY) == key ) ) {
if(tid == 0){
new_location_pos = (location_pos | EMP_FLAG_MASK);
success = CompareAndSet(pos, location_pos, new_location_pos);
if (success) {
ans = 1; // return true;
}
}
ans = __shfl(ans, 0);
if(ans == 1){
*result = 1;
return;
}
} else {
new_location_pos = (location_pos | CHECK_1_FLAG_MASK);
if(tid == 0){
/*
if(pos == 7468){
printf("Delete key: %lu, step 3c add CHECK_1_FLAG_MASK\n", key);
}*/
success = CompareAndSet(pos, location_pos, new_location_pos);
}
success = __shfl(success, 0);
if(success){
location_pos = new_location_pos;
int lane_id = (int)target - pos;
if(tid == lane_id){
Slot new_location = (location | EMP_FLAG_MASK);
success = CompareAndSet(target, location, new_location);
if(success){ // step 4a
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
//remove bitmap bit
new_location_pos &= (~(BIT<<(BITMAP_SHIFT+lane_id)));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){
ans = 1;
} else {
// TODO: design fail
printf("Delete key: %lu, step4a2 design fail\n", key);
}
} else { //step 4b
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if (!success) {
// TODO: design fail
printf("Delete key: %lu, step4b2 design fail\n", key);
}
}
}
ans = __shfl(ans, lane_id);
if(ans == 1){
*result = 1;
return;
}
} // else step 3c2
}
}
}
__device__ void Insert(LL key, LL * result)
{
int tid = threadIdx.x;
int pos = Hash(key); // step 0
LL target;
Slot location;
Slot location_pos;
Slot new_location_pos;
LL ans;
bool success;
//Slot location_swap_empty;
Slot location_swap;
Slot location_check2;
int search_pos = pos;
while (true) {
ans = WRONG_POS;
target = WRONG_POS;
success = false;
Find(key, &target, &location); // step 1
if(target != WRONG_POS){
*result = 0; // return false
return; // step 2b
}
location_pos = __shfl(location, 0);
// step 3
if( ((location_pos & CHECK_1_FLAG_MASK) != 0)
|| ((location_pos & CHECK_2_FLAG_MASK) != 0)
|| ((location_pos & SWAP_FLAG_MASK) != 0) ){ // step 3a
continue;
} else if( (location_pos & EMP_FLAG_MASK) != 0 ){ // step 3b
if(tid == 0){
new_location_pos = (key & MAX_KEY);
// add bitmap bit;
new_location_pos |= (BIT<<(BITMAP_SHIFT));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){
ans = 1;
}
}
ans = __shfl(ans, 0);
if(ans == 1){ // step 3b1
*result = 1;
return;
} else { // step 3b2
continue;
}
} else { // step 3c
bool continue_3c = false;
if(tid == 0){
new_location_pos = (location_pos | CHECK_1_FLAG_MASK);
/*
if(pos == 7468){
printf("Insert key: %lu, step 3c add CHECK_1_FLAG_MASK\n", key);
printf("location_pos: %x%x, new_location_pos: %x%x\n", location_pos, new_location_pos);
}*/
success = CompareAndSet(pos, location_pos, new_location_pos);
if(!success){ // step 3c2
continue_3c = true;
} else {
location = new_location_pos;
}
}
continue_3c = __shfl(continue_3c, 0);
if(continue_3c) continue;
location_pos = __shfl(location, 0);
}
search_pos = pos;
step_4:
//__syncthreads();
bool condition_4b = (((location & CHECK_1_FLAG_MASK) != 0) && ((location & EMP_FLAG_MASK) != 0));
LL target_4b = __ffs(__ballot(condition_4b));
bool condition_4a = (((location & CHECK_1_FLAG_MASK) == 0) && ((location & EMP_FLAG_MASK) != 0));
LL target_4a_list = __ballot(condition_4a);
for(int target_4a_offset = __ffs(target_4a_list);
target_4a_offset != 0;
target_4a_offset = __ffs(target_4a_list) ){
LL lanid_4a = target_4a_offset-1;
target = search_pos + lanid_4a;
bool goto_4a_step7 = false;
if(tid == lanid_4a){
Slot new_location = (location | CHECK_1_FLAG_MASK);
/*
if(target == 7468){
printf(" Delete key: %lu, step 4a add CHECK_1_FLAG_MASK\n", key);
}*/
success = CompareAndSet(target, location, new_location);
if(success){
location = new_location;
//location_swap_empty = location;
goto_4a_step7 = true;
}
}
goto_4a_step7 = __shfl(goto_4a_step7, lanid_4a);
if(goto_4a_step7){
//target = __shfl(target, lanid_4a);
//location_swap_empty = __shfl(location_swap_empty, lanid_4a); // use for swap
goto step_7;
}
// bug fixed: should be target_4a_list &= (~(BIT<<lanid_4a));
// target_4a_list &= (~(BIT<<target_4a_list));
target_4a_list &= (~(BIT<<lanid_4a));
}
if(target_4b != 0){
search_pos = pos;
goto step_6;
}
// step 5
search_pos += BUCKET_RANGE;
if( search_pos >= pos + MAX_PROBES_FOR_EMPTY_BUCKET || search_pos >= BUCKETS_NUM ){
bool goto_5a_full = false;
if(tid == 0){
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){ // step 5a1
goto_5a_full = true;
} else { // step 5a2
// TODO: design fail
printf("Insert key: %lu, step5a2 design fail\n", key);
}
}
goto_5a_full = __shfl(goto_5a_full, 0);
if(goto_5a_full){
// TODO: full
return;
} else {
// TODO: design fail 5a2
printf("Insert key: %lu, step5a2 full design fail\n", key);
}
}
step_6:
location = m_slots[search_pos + tid];
goto step_4;
step_7:
if( ((int)target > pos) && (((int)target - NEIGHBORHOOD_SIZE) <= pos) ){ // step 7a
location = m_slots[pos + tid];
location_pos = __shfl(location,0);
int lanid_7a = (int)target - pos;
if(tid == lanid_7a){
Slot new_location_target = (key & MAX_KEY);
success = CompareAndSet(target, location, new_location_target);
if(success){
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
//add bitmap bit
new_location_pos |= (BIT<<(BITMAP_SHIFT+lanid_7a));
/*
if(pos == 7468){
printf("Insert key: %lu, step 7a remove CHECK_1_FLAG_MASK\n", key);
printf("location_pos: %x%x, new_location_pos: %x%x\n", location_pos, new_location_pos);
}*/
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){
ans = 1;
} else {
// TODO: design fail
printf("Insert key: %lu, step7a1b design fail\n", key);
}
} else {
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if( !success ){
// TODO: design fail
printf("Insert key: %lu, step7a2b design fail\n", key);
}
}
}
ans = __shfl(ans, lanid_7a);
if(ans == 1){ // step 3a1a
*result = 1;
return;
} else {
continue;
}
}
//step 8
int to_check_2 = target - NEIGHBORHOOD_SIZE;
location = m_slots[to_check_2 + tid];
location_check2 = __shfl(location, 0);
step_9:
if( ((location_check2 & CHECK_1_FLAG_MASK) == 0)
&& ((location_check2 & CHECK_2_FLAG_MASK) == 0)
&& ((location_check2 & SWAP_FLAG_MASK) == 0)
&& ((location_check2 & EMP_FLAG_MASK) == 0) ){ // step 9a
bool goto_9a1_step12 = false;
if(tid == 0){
Slot new_location_check2 = (location_check2 | CHECK_2_FLAG_MASK);
new_location_check2 |= (BIT<<(BITMAP_SHIFT+(target-to_check_2)));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){
location = new_location_check2;
location_check2 = location;
goto_9a1_step12 = true;
}
}
goto_9a1_step12 = __shfl(goto_9a1_step12, 0);
if(goto_9a1_step12){
location_check2 = __shfl(location_check2, 0);
goto step_12;
}
} else if( ((location_check2 & CHECK_1_FLAG_MASK) == 0)
&& ((location_check2 & EMP_FLAG_MASK) != 0) ) { // step 9b
// bug fixed: add CHECK_1_FLAG_MASK to location_check2 and remove targets CHECK_1_FLAG_MASK
int lanid_9b = (int)target - to_check_2;
bool goto_9b_step7 = false;
if(tid == lanid_9b){
Slot new_location_check2 = (location_check2 | CHECK_1_FLAG_MASK);
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){
location_check2 = new_location_check2;
Slot new_location_target = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location_target);
if(success){
goto_9b_step7 = true;
} else {
// TODO : design fail
}
} else {
// TODO : design fail
}
}
location_check2 = __shfl(location_check2,lanid_9b);
goto_9b_step7 = __shfl(goto_9b_step7,lanid_9b);
if(goto_9b_step7){
target = to_check_2;
goto step_7;
}
} else if( ((location_check2 & CHECK_1_FLAG_MASK) != 0)
&& ((location_check2 & EMP_FLAG_MASK) != 0) ) { // step 9c
// bug fixed: remove targets CHECK_1_FLAG_MASK and change search_pos = to_check_2
int lanid_9c = (int)target - to_check_2;
bool goto_9c_step6 = false;
if(tid == lanid_9c){
Slot new_location_target = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location_target);
if(success){
goto_9c_step6 = true;
} else {
// TODO : design fail
}
}
goto_9c_step6 = __shfl(goto_9c_step6,lanid_9c);
if(goto_9c_step6){
search_pos = to_check_2;
goto step_6;
}
}
step_10:
to_check_2++;
location = m_slots[to_check_2 + tid];
//step 11
if(to_check_2 < (int)target){
location_check2 = __shfl(location, 0);
goto step_9;
} else { // to_check_2 == target
bool goto_11b1_full = false;
if(tid == 0){
Slot new_location = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location);
if(success){
location = new_location;
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){ // step 11b1a
goto_11b1_full = true;
} else { // step 11b1b
// TODO: design fail
printf("Insert key: %lu, step11b1b design fail\n", key);
}
} else { // step 11b2
// TODO: design fail
printf("Insert key: %lu, step11b2 design fail\n", key);
}
}
goto_11b1_full = __shfl(goto_11b1_full, 0);
if(goto_11b1_full){
// TODO: full
return;
} else {
// TODO: design fail 11b2
printf("Insert key: %lu, step11b2 full design fail\n", key);
}
}
step_12:
if ( (location_check2 & (BIT<<BITMAP_SHIFT)) != 0 ){ // step 12a;
int lanid_12a = target - to_check_2;
bool goto_12a1a_step7 = false;
if(tid == lanid_12a){
Slot new_location = (location & (~MAX_KEY)) | (location_check2 & MAX_KEY);
new_location &= (~EMP_FLAG_MASK);
new_location &= (~CHECK_1_FLAG_MASK);
new_location &= (~BITMAP_MASK);
success = CompareAndSet(target, location, new_location);
if(success){ // step 12a1
location = new_location;
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK)) | EMP_FLAG_MASK;
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){ // step 12a1a
location_check2 = new_location_check2;
goto_12a1a_step7 = true;
} else { // step 12a1b
// TODO: design fail
printf("Insert key: %lu, step12a1b design fail\n", key);
}
} else { // step 12a2
// TODO: design fail
printf("Insert key: %lu, step12a2 design fail\n", key);
}
}
goto_12a1a_step7 = __shfl(goto_12a1a_step7, lanid_12a);
if(goto_12a1a_step7){
location_check2 = __shfl(location_check2, lanid_12a);
// bug fixed: target = to_check_2
target = to_check_2;
goto step_7;
}
}
// step 12b
LL bitmap = ( (location_check2 & BITMAP_MASK) >> BITMAP_SHIFT);
int predict = 0;
if( (((bitmap >> tid) & BIT) != 0) // is valid
&& ((location & CHECK_1_FLAG_MASK) == 0)
&& ((location & CHECK_2_FLAG_MASK) == 0)
&& ((location & SWAP_FLAG_MASK) == 0) ){
predict = 1;
}
LL swap_list = __ballot(predict);
// step 13
for(int to_swap_offset = __ffs(swap_list);
to_swap_offset != 0 && (to_swap_offset-1) < (int)target-to_check_2 ;
to_swap_offset = __ffs(swap_list) ){
to_swap_offset--;
// step 14
int to_swap = to_check_2 + to_swap_offset;
location_swap = __shfl(location, to_swap_offset);
int lanid_target = target-to_check_2;
// TODO: lanid == to_swap_offset 's location need to change?
if( (location_swap & EMP_FLAG_MASK) != 0 ){ // step 14a
bool goto_14a1a1_step7 = false;
bool goto_14a1a2_step6 = false;
// bug fixed: first put CHECK_1_FLAG_MASK on location_swap
if(tid == lanid_target){
Slot new_location_swap = location_swap | CHECK_1_FLAG_MASK;
success = CompareAndSet(to_swap, location_swap, new_location_swap);
if(success){ // step 14a1
//location_swap = new_location_swap;
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK));
new_location_check2 &= (~(BIT<<(BITMAP_SHIFT+(lanid_target))));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){ // step 14a1a
location_check2 = new_location_check2;
Slot new_location_target = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location_target);
if(success){ // step 14a1a1
location = new_location_target;
target = to_swap;
goto_14a1a1_step7 = true;
} else { // step 14a1a2
// bug fixed: change search_pos = to_check_2;
//search_pos = pos;
search_pos = to_check_2;
goto_14a1a2_step6 = true;
}
} else { // step 14a1b
// TODO: design fail
printf("Insert key: %lu, step14a1b design fail\n", key);
}
} else { // step 14a2
// TODO: design fail
printf("Insert key: %lu, step14a2 design fail\n", key);
}
}
location_check2 = __shfl(location_check2, lanid_target);
goto_14a1a1_step7 = __shfl(goto_14a1a1_step7, lanid_target);
goto_14a1a2_step6 = __shfl(goto_14a1a2_step6, lanid_target);
if(goto_14a1a1_step7){
target = __shfl(target, lanid_target);
goto step_7;
}
if(goto_14a1a2_step6){
search_pos = __shfl(search_pos, lanid_target);
goto step_6;
}
} else { // step 14b
bool goto_14b1a1a_step7 = false;
if(tid == lanid_target){
Slot new_location_swap = location_swap | SWAP_FLAG_MASK;
success = CompareAndSet(to_swap, location_swap, new_location_swap);
if(success){
location_swap = new_location_swap;
Slot new_location_target = (location & (~MAX_KEY)) | (location_swap & MAX_KEY);
new_location_target &= (~EMP_FLAG_MASK);
new_location_target &= (~CHECK_1_FLAG_MASK);
new_location_target &= (~BITMAP_MASK);
success = CompareAndSet(target, location, new_location_target);
if(success){ // step 14b1a
location = new_location_target;
new_location_swap = (location_swap & (~SWAP_FLAG_MASK)) | EMP_FLAG_MASK | CHECK_1_FLAG_MASK ;
/*
if(to_swap == 7468){
printf("Insert key: %lu, step 14b1a add CHECK_1_FLAG_MASK\n", key);
}*/
success = CompareAndSet(to_swap, location_swap, new_location_swap);
if(success){ // step 14b1a1
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK));
new_location_check2 &= (~(BIT<<(BITMAP_SHIFT+(to_swap_offset))));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){ // step 14b1a1a
location_check2 = new_location_check2;
target = to_swap;
goto_14b1a1a_step7 = true;
} else { // step 14b1a1b
// TODO: design fail
printf("Insert key: %lu, step14b1a1b design fail\n", key);
}
} else { // step 14b1a2
// TODO: design fail
printf("Insert key: %lu, step14b1a2 design fail\n", key);
}
} else { // step 14b1b
// TODO: design fail
printf("Insert key: %lu, step14b1b design fail\n", key);
}
}
}
goto_14b1a1a_step7 = __shfl(goto_14b1a1a_step7, lanid_target);
if(goto_14b1a1a_step7){
location_check2 = __shfl(location_check2, lanid_target);
target = __shfl(target, lanid_target);
goto step_7;
}
}
swap_list &= (~(BIT<<to_swap_offset));
}
// step 13b
bool goto_13b1_step10;
if(tid == 0){
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK));
new_location_check2 &= (~(BIT<<(BITMAP_SHIFT+(target-to_check_2))));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){
location = new_location_check2;
location_check2 = location;
goto_13b1_step10 = true;
} else { // step 13b2
// TODO: design fail
printf("Insert key: %lu, step13b2 design fail\n", key);
}
}
location_check2 = __shfl(location_check2, 0);
goto_13b1_step10 = __shfl(goto_13b1_step10, 0);
if(goto_13b1_step10){
goto step_10;
}
}
}
__global__ void kernel(LL* items, LL* op, LL* result)
{
/*
for(int op_id=0;op_id<NUM_ITEMS;op_id++){
LL itm=items[op_id];
result[op_id] = WRONG_POS;
Slot location;
if(op_id == 3653){
printf("have done %d\n",op_id-1);
}
if(op_id == 2689){
printf("have done %d\n",op_id-1);
}
if(op[op_id]==ADD){
Insert(itm, &(result[op_id])); // return 1 or 0 or WRONG_POS(need rehash)
} else if(op[op_id]==DELETE){
Delete(itm, &(result[op_id])); // return 1 or 0
} else if(op[op_id]==SEARCH){
Find(itm, &(result[op_id]), &location); // return slot_no or WRONG_POS
}
}
*/
for(int i=0;i<FACTOR;i++){ // FACTOR is the number of operations per thread
int op_id=FACTOR*blockIdx.x+i;
if(op_id>=NUM_ITEMS) return;
// Grab the operation and the associated key and execute
LL itm=items[op_id];
result[op_id] = WRONG_POS;
Slot location;
if(op[op_id]==ADD){
Insert(itm, &(result[op_id])); // return 1 or 0 or WRONG_POS(need rehash)
} else if(op[op_id]==DELETE){
Delete(itm, &(result[op_id])); // return 1 or 0
} else if(op[op_id]==SEARCH){
Find(itm, &(result[op_id]), &location); // return slot_no or WRONG_POS
}
}
}
int main(int argc, char** argv)
{
if (argc != 3) {
printf("Need two arguments: percent add ops and percent delete ops (e.g., 30 50 for 30%% add and 50%% delete).\nAborting...\n");
exit(1);
}
int adds=atoi(argv[1]);
int deletes=atoi(argv[2]);
if (adds+deletes > 100) {
printf("Sum of add and delete precentages exceeds 100.\nAborting...\n");
exit(1);
}
// Allocate hash table
Slot slots[TABLE_SIZE];
Slot * Cslots;
int i;
for(i=0;i<TABLE_SIZE;i++){
slots[i] = EMP_FLAG_MASK;
}
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(hipMalloc((void**)&(Cslots), sizeof(Slot)*TABLE_SIZE ));
#else
hipMalloc((void**)&(Cslots), sizeof(Slot)*TABLE_SIZE );
#endif
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(hipMemcpy(Cslots, slots, sizeof(Slot)*TABLE_SIZE, hipMemcpyHostToDevice));
#else
hipMemcpy(Cslots, slots, sizeof(Slot)*TABLE_SIZE, hipMemcpyHostToDevice);
#endif
// Initialize the device memory
hipLaunchKernelGGL(( init), dim3(1), dim3(THREADS_NUM), 0, 0, Cslots);
LL op[NUM_ITEMS]; // Array of operations
LL items[NUM_ITEMS]; // Array of keys associated with operations
LL result[NUM_ITEMS]; // Array of outcomes
//LL expect_result[NUM_ITEMS]; // Array of expected result
/*
FILE * fp;
fp = fopen("/home/udms/Fanny/test/myfile_4.txt","r");
if(fp == NULL) exit(EXIT_FAILURE);
char line[100];
i=0;
while (fgets(line, 100, fp) != NULL) {
char * p = strtok (line," ");
if(*p == 'I'){
op[i]=ADD;
} else if(*p == 'R'){
op[i]=SEARCH;
} else {
op[i]=DELETE;
}
p = strtok(NULL," ");
if(*p == '0'){
expect_result[i] = 0;
} else {
expect_result[i] = 1;
}
p = strtok(NULL," ");
unsigned long ul = strtoul (p, NULL, 0);
items[i] = ul;
i++;
}
fclose(fp);
*/
srand(0);
// NUM_ITEMS is the total number of operations to execute
for(i=0;i<NUM_ITEMS;i++){
items[i]=10+rand()%KEYS; // Keys
}
// Populate the op sequence
for(i=0;i<(NUM_ITEMS*adds)/100;i++){
op[i]=ADD;
}
for(;i<(NUM_ITEMS*(adds+deletes))/100;i++){
op[i]=DELETE;
}
for(;i<NUM_ITEMS;i++){
op[i]=SEARCH;
}
//adds=(NUM_ITEMS*adds)/100;
// Allocate device memory
LL* Citems;
LL* Cop;
LL* Cresult;
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(hipMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS));
CUDA_SAFE_CALL(hipMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS));
CUDA_SAFE_CALL(hipMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS));
CUDA_SAFE_CALL(hipMemcpy(Citems,items, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice));
#else
hipMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS);
hipMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS);
hipMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS);
hipMemcpy(Citems,items, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice);
hipMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, hipMemcpyHostToDevice);
#endif
// Calculate the number of thread blocks
// NUM_ITEMS = total number of operations to execute
// NUM_THREADS = number of threads per block
// FACTOR = number of operations per thread
//int blocks=(NUM_ITEMS%FACTOR==0)?(NUM_ITEMS/FACTOR):(NUM_ITEMS/FACTOR)+1;
int blocks=(NUM_ITEMS%(THREADS_NUM*FACTOR)==0)?NUM_ITEMS/(THREADS_NUM*FACTOR):(NUM_ITEMS/(THREADS_NUM*FACTOR))+1;
// Launch main kernel
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(THREADS_NUM), 0, 0, Citems, Cop, Cresult);
hipEventRecord(stop, 0);
//hipEventSynchronize(start);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
// Print kernel execution time in milliseconds
printf("%lf\n",time);
// Check for errors
hipError_t error= hipGetLastError();
if(hipSuccess!=error){
printf("error:CUDA ERROR (%d) {%s}\n",error,hipGetErrorString(error));
exit(-1);
}
// Move results back to host memory
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(hipMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, hipMemcpyDeviceToHost));
#else
hipMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, hipMemcpyDeviceToHost);
#endif
/*
int insert_full = 0;
int insert_fail = 0;
int delete_fail = 0;
int find_fail = 0;
for(i=0;i<NUM_ITEMS;i++){
if(op[i]==ADD){
if(result[i]==WRONG_POS){
if(i == 140700){
printf("ADD full catch, line: %d, item: %d, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
}
insert_full++;
} else if(result[i] != expect_result[i]){
printf("ADD fail, line: %d, item: %lu, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
insert_fail++;
}
} else if(op[i]==DELETE){
if(result[i] != expect_result[i]){
//printf("DELETE fail, line: %d, item: %lu, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
delete_fail++;
}
} else if(op[i]==SEARCH){
if(result[i]==WRONG_POS && expect_result[i]==0){
;
} else if(result[i]!=WRONG_POS && expect_result[i]==1){
;
} else {
printf("SEARCH fail, line: %d, item: %lu, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
find_fail++;
}
}
}
printf("insert_full: %d insert_fail: %d delete_fail: %d find_fail: %d\n", insert_full, insert_fail, delete_fail, find_fail); */
return 0;
}
| b169fa8a9aa71a9f49aedb1d50173b8b987e18fe.cu | //#include"cutil.h" // Comment this if cutil.h is not available
#include"cuda_runtime.h"
#include"stdio.h"
// Number of operations
//#define NUM_ITEMS 50000
// Number operations per block
#define FACTOR 1
// Number of integer keys assumed in the range [10, 9+KEYS]
//#define KEYS 100
// Number of threads per block
#define THREADS_NUM 32
// Number of hash table buckets
#define BUCKETS_NUM 36419
// Supported operations
#define ADD (0)
#define DELETE (1)
#define SEARCH (2)
#if __WORDSIZE == 64
typedef unsigned long long int LL;
#else
typedef unsigned int LL;
#endif
// Definition of generic slot
typedef LL Slot;
#if __WORDSIZE == 64
// Size of the neighborhood, every bucket has (1 + NEIGHBORHOOD_SIZE) slots
#define NEIGHBORHOOD_SIZE 31
// Because of the flag MASK, the key value in the Slot need to be restricted
#define MAX_KEY ((LL)0x000000000fffffff)
// Use MASK to get the flag value in Slot
#define EMP_FLAG_MASK ((LL)0x8000000000000000)
#define CHECK_1_FLAG_MASK ((LL)0x4000000000000000)
#define CHECK_2_FLAG_MASK ((LL)0x2000000000000000)
#define SWAP_FLAG_MASK ((LL)0x1000000000000000)
#define BITMAP_MASK ((LL)0x0ffffffff0000000)
#define BITMAP_SHIFT 28
#define WRONG_POS ((LL)0xffffffffffffffff)
#else
#define NEIGHBORHOOD_SIZE 15
#define MAX_KEY ((LL)0x00000fff)
#define EMP_FLAG_MASK ((LL)0x80000000)
#define CHECK_1_FLAG_MASK ((LL)0x40000000)
#define CHECK_2_FLAG_MASK ((LL)0x20000000)
#define SWAP_FLAG_MASK ((LL)0x10000000)
#define BITMAP_MASK ((LL)0x0ffff000)
#define BITMAP_SHIFT 12
#define WRONG_POS ((LL)0xffffffff)
#endif
#define BIT ((LL)0x1)
#define BUCKET_RANGE (NEIGHBORHOOD_SIZE+1)
// Actuall hash table pysical size
#define TABLE_SIZE (BUCKETS_NUM + NEIGHBORHOOD_SIZE)
#define MAX_PROBES_FOR_EMPTY_BUCKET (12*BUCKET_RANGE)
__device__ Slot * m_slots; // Array of hash table slots
// Kernel for initializing device memory
// This kernel initializes every slot as an empty node
__global__ void init(Slot * slots)
{
m_slots = slots;
}
// Hash function
__device__ int Hash(LL x)
{
return x%BUCKETS_NUM;
}
__device__ bool CompareAndSet(int pos, LL old_value, LL new_value)
{
Slot old_value_out = atomicCAS(&(m_slots[pos]), old_value, new_value);
if (old_value_out == old_value) return true;
return false;
}
__device__ void Find(LL key, LL * result, Slot * location)
{
int tid = threadIdx.x;
int pos = Hash(key); // step 0
LL bitmap;
Slot location_pos;
do{
*location = m_slots[pos + tid];
location_pos = __shfl(*location, 0);
} while( (location_pos & CHECK_1_FLAG_MASK) != 0 ); // step 2a
// step 2b
if( (location_pos & EMP_FLAG_MASK) != 0 ){ // step 2b1
bitmap = (BITMAP_MASK >> BITMAP_SHIFT);
} else { // step 2b2
bitmap = ( (location_pos & BITMAP_MASK) >> BITMAP_SHIFT);
}
int predict = 0;
int tmp_pos = Hash((*location) & MAX_KEY);
if( (((bitmap >> tid) & BIT) != 0) // is valid
&& ( ( (*location) & EMP_FLAG_MASK) == 0) // no emp flag
&& ( ( (*location) & MAX_KEY) == key ) // is the key
&& (tmp_pos == pos) // just for safe
){
predict = 1;
}
int ans = __ffs(__ballot(predict));
if(ans==0){
*result = WRONG_POS;
} else {
*result = pos + (ans - 1);
}
}
__device__ void Delete(LL key, LL * result)
{
int tid = threadIdx.x;
int pos = Hash(key); // step 0
LL target;
Slot location;
Slot location_pos;
Slot new_location_pos;
LL ans;
bool success;
while (true) {
ans = WRONG_POS;
target = WRONG_POS;
success = false;
Find(key, &target, &location); // step 1
if(target == WRONG_POS){
*result = 0; //return false
return; //step 2b
}
location_pos = __shfl(location, 0);
if( ((location_pos & CHECK_1_FLAG_MASK) != 0)
|| ((location_pos & CHECK_2_FLAG_MASK) != 0)
|| ((location_pos & SWAP_FLAG_MASK) != 0) ){
;
} else if( ((location_pos & EMP_FLAG_MASK) == 0)
&& ( ( ( (location_pos & BITMAP_MASK) >> BITMAP_SHIFT) & BIT ) != 0 )
&& ( ( location_pos & MAX_KEY) == key ) ) {
if(tid == 0){
new_location_pos = (location_pos | EMP_FLAG_MASK);
success = CompareAndSet(pos, location_pos, new_location_pos);
if (success) {
ans = 1; // return true;
}
}
ans = __shfl(ans, 0);
if(ans == 1){
*result = 1;
return;
}
} else {
new_location_pos = (location_pos | CHECK_1_FLAG_MASK);
if(tid == 0){
/*
if(pos == 7468){
printf("Delete key: %lu, step 3c add CHECK_1_FLAG_MASK\n", key);
}*/
success = CompareAndSet(pos, location_pos, new_location_pos);
}
success = __shfl(success, 0);
if(success){
location_pos = new_location_pos;
int lane_id = (int)target - pos;
if(tid == lane_id){
Slot new_location = (location | EMP_FLAG_MASK);
success = CompareAndSet(target, location, new_location);
if(success){ // step 4a
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
//remove bitmap bit
new_location_pos &= (~(BIT<<(BITMAP_SHIFT+lane_id)));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){
ans = 1;
} else {
// TODO: design fail
printf("Delete key: %lu, step4a2 design fail\n", key);
}
} else { //step 4b
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if (!success) {
// TODO: design fail
printf("Delete key: %lu, step4b2 design fail\n", key);
}
}
}
ans = __shfl(ans, lane_id);
if(ans == 1){
*result = 1;
return;
}
} // else step 3c2
}
}
}
__device__ void Insert(LL key, LL * result)
{
int tid = threadIdx.x;
int pos = Hash(key); // step 0
LL target;
Slot location;
Slot location_pos;
Slot new_location_pos;
LL ans;
bool success;
//Slot location_swap_empty;
Slot location_swap;
Slot location_check2;
int search_pos = pos;
while (true) {
ans = WRONG_POS;
target = WRONG_POS;
success = false;
Find(key, &target, &location); // step 1
if(target != WRONG_POS){
*result = 0; // return false
return; // step 2b
}
location_pos = __shfl(location, 0);
// step 3
if( ((location_pos & CHECK_1_FLAG_MASK) != 0)
|| ((location_pos & CHECK_2_FLAG_MASK) != 0)
|| ((location_pos & SWAP_FLAG_MASK) != 0) ){ // step 3a
continue;
} else if( (location_pos & EMP_FLAG_MASK) != 0 ){ // step 3b
if(tid == 0){
new_location_pos = (key & MAX_KEY);
// add bitmap bit;
new_location_pos |= (BIT<<(BITMAP_SHIFT));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){
ans = 1;
}
}
ans = __shfl(ans, 0);
if(ans == 1){ // step 3b1
*result = 1;
return;
} else { // step 3b2
continue;
}
} else { // step 3c
bool continue_3c = false;
if(tid == 0){
new_location_pos = (location_pos | CHECK_1_FLAG_MASK);
/*
if(pos == 7468){
printf("Insert key: %lu, step 3c add CHECK_1_FLAG_MASK\n", key);
printf("location_pos: %x%x, new_location_pos: %x%x\n", location_pos, new_location_pos);
}*/
success = CompareAndSet(pos, location_pos, new_location_pos);
if(!success){ // step 3c2
continue_3c = true;
} else {
location = new_location_pos;
}
}
continue_3c = __shfl(continue_3c, 0);
if(continue_3c) continue;
location_pos = __shfl(location, 0);
}
search_pos = pos;
step_4:
//__syncthreads();
bool condition_4b = (((location & CHECK_1_FLAG_MASK) != 0) && ((location & EMP_FLAG_MASK) != 0));
LL target_4b = __ffs(__ballot(condition_4b));
bool condition_4a = (((location & CHECK_1_FLAG_MASK) == 0) && ((location & EMP_FLAG_MASK) != 0));
LL target_4a_list = __ballot(condition_4a);
for(int target_4a_offset = __ffs(target_4a_list);
target_4a_offset != 0;
target_4a_offset = __ffs(target_4a_list) ){
LL lanid_4a = target_4a_offset-1;
target = search_pos + lanid_4a;
bool goto_4a_step7 = false;
if(tid == lanid_4a){
Slot new_location = (location | CHECK_1_FLAG_MASK);
/*
if(target == 7468){
printf(" Delete key: %lu, step 4a add CHECK_1_FLAG_MASK\n", key);
}*/
success = CompareAndSet(target, location, new_location);
if(success){
location = new_location;
//location_swap_empty = location;
goto_4a_step7 = true;
}
}
goto_4a_step7 = __shfl(goto_4a_step7, lanid_4a);
if(goto_4a_step7){
//target = __shfl(target, lanid_4a);
//location_swap_empty = __shfl(location_swap_empty, lanid_4a); // use for swap
goto step_7;
}
// bug fixed: should be target_4a_list &= (~(BIT<<lanid_4a));
// target_4a_list &= (~(BIT<<target_4a_list));
target_4a_list &= (~(BIT<<lanid_4a));
}
if(target_4b != 0){
search_pos = pos;
goto step_6;
}
// step 5
search_pos += BUCKET_RANGE;
if( search_pos >= pos + MAX_PROBES_FOR_EMPTY_BUCKET || search_pos >= BUCKETS_NUM ){
bool goto_5a_full = false;
if(tid == 0){
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){ // step 5a1
goto_5a_full = true;
} else { // step 5a2
// TODO: design fail
printf("Insert key: %lu, step5a2 design fail\n", key);
}
}
goto_5a_full = __shfl(goto_5a_full, 0);
if(goto_5a_full){
// TODO: full
return;
} else {
// TODO: design fail 5a2
printf("Insert key: %lu, step5a2 full design fail\n", key);
}
}
step_6:
location = m_slots[search_pos + tid];
goto step_4;
step_7:
if( ((int)target > pos) && (((int)target - NEIGHBORHOOD_SIZE) <= pos) ){ // step 7a
location = m_slots[pos + tid];
location_pos = __shfl(location,0);
int lanid_7a = (int)target - pos;
if(tid == lanid_7a){
Slot new_location_target = (key & MAX_KEY);
success = CompareAndSet(target, location, new_location_target);
if(success){
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
//add bitmap bit
new_location_pos |= (BIT<<(BITMAP_SHIFT+lanid_7a));
/*
if(pos == 7468){
printf("Insert key: %lu, step 7a remove CHECK_1_FLAG_MASK\n", key);
printf("location_pos: %x%x, new_location_pos: %x%x\n", location_pos, new_location_pos);
}*/
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){
ans = 1;
} else {
// TODO: design fail
printf("Insert key: %lu, step7a1b design fail\n", key);
}
} else {
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if( !success ){
// TODO: design fail
printf("Insert key: %lu, step7a2b design fail\n", key);
}
}
}
ans = __shfl(ans, lanid_7a);
if(ans == 1){ // step 3a1a
*result = 1;
return;
} else {
continue;
}
}
//step 8
int to_check_2 = target - NEIGHBORHOOD_SIZE;
location = m_slots[to_check_2 + tid];
location_check2 = __shfl(location, 0);
step_9:
if( ((location_check2 & CHECK_1_FLAG_MASK) == 0)
&& ((location_check2 & CHECK_2_FLAG_MASK) == 0)
&& ((location_check2 & SWAP_FLAG_MASK) == 0)
&& ((location_check2 & EMP_FLAG_MASK) == 0) ){ // step 9a
bool goto_9a1_step12 = false;
if(tid == 0){
Slot new_location_check2 = (location_check2 | CHECK_2_FLAG_MASK);
new_location_check2 |= (BIT<<(BITMAP_SHIFT+(target-to_check_2)));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){
location = new_location_check2;
location_check2 = location;
goto_9a1_step12 = true;
}
}
goto_9a1_step12 = __shfl(goto_9a1_step12, 0);
if(goto_9a1_step12){
location_check2 = __shfl(location_check2, 0);
goto step_12;
}
} else if( ((location_check2 & CHECK_1_FLAG_MASK) == 0)
&& ((location_check2 & EMP_FLAG_MASK) != 0) ) { // step 9b
// bug fixed: add CHECK_1_FLAG_MASK to location_check2 and remove target’s CHECK_1_FLAG_MASK
int lanid_9b = (int)target - to_check_2;
bool goto_9b_step7 = false;
if(tid == lanid_9b){
Slot new_location_check2 = (location_check2 | CHECK_1_FLAG_MASK);
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){
location_check2 = new_location_check2;
Slot new_location_target = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location_target);
if(success){
goto_9b_step7 = true;
} else {
// TODO : design fail
}
} else {
// TODO : design fail
}
}
location_check2 = __shfl(location_check2,lanid_9b);
goto_9b_step7 = __shfl(goto_9b_step7,lanid_9b);
if(goto_9b_step7){
target = to_check_2;
goto step_7;
}
} else if( ((location_check2 & CHECK_1_FLAG_MASK) != 0)
&& ((location_check2 & EMP_FLAG_MASK) != 0) ) { // step 9c
// bug fixed: remove target’s CHECK_1_FLAG_MASK and change search_pos = to_check_2
int lanid_9c = (int)target - to_check_2;
bool goto_9c_step6 = false;
if(tid == lanid_9c){
Slot new_location_target = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location_target);
if(success){
goto_9c_step6 = true;
} else {
// TODO : design fail
}
}
goto_9c_step6 = __shfl(goto_9c_step6,lanid_9c);
if(goto_9c_step6){
search_pos = to_check_2;
goto step_6;
}
}
step_10:
to_check_2++;
location = m_slots[to_check_2 + tid];
//step 11
if(to_check_2 < (int)target){
location_check2 = __shfl(location, 0);
goto step_9;
} else { // to_check_2 == target
bool goto_11b1_full = false;
if(tid == 0){
Slot new_location = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location);
if(success){
location = new_location;
new_location_pos = (location_pos & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(pos, location_pos, new_location_pos);
if(success){ // step 11b1a
goto_11b1_full = true;
} else { // step 11b1b
// TODO: design fail
printf("Insert key: %lu, step11b1b design fail\n", key);
}
} else { // step 11b2
// TODO: design fail
printf("Insert key: %lu, step11b2 design fail\n", key);
}
}
goto_11b1_full = __shfl(goto_11b1_full, 0);
if(goto_11b1_full){
// TODO: full
return;
} else {
// TODO: design fail 11b2
printf("Insert key: %lu, step11b2 full design fail\n", key);
}
}
step_12:
if ( (location_check2 & (BIT<<BITMAP_SHIFT)) != 0 ){ // step 12a;
int lanid_12a = target - to_check_2;
bool goto_12a1a_step7 = false;
if(tid == lanid_12a){
Slot new_location = (location & (~MAX_KEY)) | (location_check2 & MAX_KEY);
new_location &= (~EMP_FLAG_MASK);
new_location &= (~CHECK_1_FLAG_MASK);
new_location &= (~BITMAP_MASK);
success = CompareAndSet(target, location, new_location);
if(success){ // step 12a1
location = new_location;
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK)) | EMP_FLAG_MASK;
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){ // step 12a1a
location_check2 = new_location_check2;
goto_12a1a_step7 = true;
} else { // step 12a1b
// TODO: design fail
printf("Insert key: %lu, step12a1b design fail\n", key);
}
} else { // step 12a2
// TODO: design fail
printf("Insert key: %lu, step12a2 design fail\n", key);
}
}
goto_12a1a_step7 = __shfl(goto_12a1a_step7, lanid_12a);
if(goto_12a1a_step7){
location_check2 = __shfl(location_check2, lanid_12a);
// bug fixed: target = to_check_2
target = to_check_2;
goto step_7;
}
}
// step 12b
LL bitmap = ( (location_check2 & BITMAP_MASK) >> BITMAP_SHIFT);
int predict = 0;
if( (((bitmap >> tid) & BIT) != 0) // is valid
&& ((location & CHECK_1_FLAG_MASK) == 0)
&& ((location & CHECK_2_FLAG_MASK) == 0)
&& ((location & SWAP_FLAG_MASK) == 0) ){
predict = 1;
}
LL swap_list = __ballot(predict);
// step 13
for(int to_swap_offset = __ffs(swap_list);
to_swap_offset != 0 && (to_swap_offset-1) < (int)target-to_check_2 ;
to_swap_offset = __ffs(swap_list) ){
to_swap_offset--;
// step 14
int to_swap = to_check_2 + to_swap_offset;
location_swap = __shfl(location, to_swap_offset);
int lanid_target = target-to_check_2;
// TODO: lanid == to_swap_offset 's location need to change?
if( (location_swap & EMP_FLAG_MASK) != 0 ){ // step 14a
bool goto_14a1a1_step7 = false;
bool goto_14a1a2_step6 = false;
// bug fixed: first put CHECK_1_FLAG_MASK on location_swap
if(tid == lanid_target){
Slot new_location_swap = location_swap | CHECK_1_FLAG_MASK;
success = CompareAndSet(to_swap, location_swap, new_location_swap);
if(success){ // step 14a1
//location_swap = new_location_swap;
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK));
new_location_check2 &= (~(BIT<<(BITMAP_SHIFT+(lanid_target))));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){ // step 14a1a
location_check2 = new_location_check2;
Slot new_location_target = (location & (~CHECK_1_FLAG_MASK));
success = CompareAndSet(target, location, new_location_target);
if(success){ // step 14a1a1
location = new_location_target;
target = to_swap;
goto_14a1a1_step7 = true;
} else { // step 14a1a2
// bug fixed: change search_pos = to_check_2;
//search_pos = pos;
search_pos = to_check_2;
goto_14a1a2_step6 = true;
}
} else { // step 14a1b
// TODO: design fail
printf("Insert key: %lu, step14a1b design fail\n", key);
}
} else { // step 14a2
// TODO: design fail
printf("Insert key: %lu, step14a2 design fail\n", key);
}
}
location_check2 = __shfl(location_check2, lanid_target);
goto_14a1a1_step7 = __shfl(goto_14a1a1_step7, lanid_target);
goto_14a1a2_step6 = __shfl(goto_14a1a2_step6, lanid_target);
if(goto_14a1a1_step7){
target = __shfl(target, lanid_target);
goto step_7;
}
if(goto_14a1a2_step6){
search_pos = __shfl(search_pos, lanid_target);
goto step_6;
}
} else { // step 14b
bool goto_14b1a1a_step7 = false;
if(tid == lanid_target){
Slot new_location_swap = location_swap | SWAP_FLAG_MASK;
success = CompareAndSet(to_swap, location_swap, new_location_swap);
if(success){
location_swap = new_location_swap;
Slot new_location_target = (location & (~MAX_KEY)) | (location_swap & MAX_KEY);
new_location_target &= (~EMP_FLAG_MASK);
new_location_target &= (~CHECK_1_FLAG_MASK);
new_location_target &= (~BITMAP_MASK);
success = CompareAndSet(target, location, new_location_target);
if(success){ // step 14b1a
location = new_location_target;
new_location_swap = (location_swap & (~SWAP_FLAG_MASK)) | EMP_FLAG_MASK | CHECK_1_FLAG_MASK ;
/*
if(to_swap == 7468){
printf("Insert key: %lu, step 14b1a add CHECK_1_FLAG_MASK\n", key);
}*/
success = CompareAndSet(to_swap, location_swap, new_location_swap);
if(success){ // step 14b1a1
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK));
new_location_check2 &= (~(BIT<<(BITMAP_SHIFT+(to_swap_offset))));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){ // step 14b1a1a
location_check2 = new_location_check2;
target = to_swap;
goto_14b1a1a_step7 = true;
} else { // step 14b1a1b
// TODO: design fail
printf("Insert key: %lu, step14b1a1b design fail\n", key);
}
} else { // step 14b1a2
// TODO: design fail
printf("Insert key: %lu, step14b1a2 design fail\n", key);
}
} else { // step 14b1b
// TODO: design fail
printf("Insert key: %lu, step14b1b design fail\n", key);
}
}
}
goto_14b1a1a_step7 = __shfl(goto_14b1a1a_step7, lanid_target);
if(goto_14b1a1a_step7){
location_check2 = __shfl(location_check2, lanid_target);
target = __shfl(target, lanid_target);
goto step_7;
}
}
swap_list &= (~(BIT<<to_swap_offset));
}
// step 13b
bool goto_13b1_step10;
if(tid == 0){
Slot new_location_check2 = (location_check2 & (~CHECK_2_FLAG_MASK));
new_location_check2 &= (~(BIT<<(BITMAP_SHIFT+(target-to_check_2))));
success = CompareAndSet(to_check_2, location_check2, new_location_check2);
if(success){
location = new_location_check2;
location_check2 = location;
goto_13b1_step10 = true;
} else { // step 13b2
// TODO: design fail
printf("Insert key: %lu, step13b2 design fail\n", key);
}
}
location_check2 = __shfl(location_check2, 0);
goto_13b1_step10 = __shfl(goto_13b1_step10, 0);
if(goto_13b1_step10){
goto step_10;
}
}
}
__global__ void kernel(LL* items, LL* op, LL* result)
{
/*
for(int op_id=0;op_id<NUM_ITEMS;op_id++){
LL itm=items[op_id];
result[op_id] = WRONG_POS;
Slot location;
if(op_id == 3653){
printf("have done %d\n",op_id-1);
}
if(op_id == 2689){
printf("have done %d\n",op_id-1);
}
if(op[op_id]==ADD){
Insert(itm, &(result[op_id])); // return 1 or 0 or WRONG_POS(need rehash)
} else if(op[op_id]==DELETE){
Delete(itm, &(result[op_id])); // return 1 or 0
} else if(op[op_id]==SEARCH){
Find(itm, &(result[op_id]), &location); // return slot_no or WRONG_POS
}
}
*/
for(int i=0;i<FACTOR;i++){ // FACTOR is the number of operations per thread
int op_id=FACTOR*blockIdx.x+i;
if(op_id>=NUM_ITEMS) return;
// Grab the operation and the associated key and execute
LL itm=items[op_id];
result[op_id] = WRONG_POS;
Slot location;
if(op[op_id]==ADD){
Insert(itm, &(result[op_id])); // return 1 or 0 or WRONG_POS(need rehash)
} else if(op[op_id]==DELETE){
Delete(itm, &(result[op_id])); // return 1 or 0
} else if(op[op_id]==SEARCH){
Find(itm, &(result[op_id]), &location); // return slot_no or WRONG_POS
}
}
}
int main(int argc, char** argv)
{
if (argc != 3) {
printf("Need two arguments: percent add ops and percent delete ops (e.g., 30 50 for 30%% add and 50%% delete).\nAborting...\n");
exit(1);
}
int adds=atoi(argv[1]);
int deletes=atoi(argv[2]);
if (adds+deletes > 100) {
printf("Sum of add and delete precentages exceeds 100.\nAborting...\n");
exit(1);
}
// Allocate hash table
Slot slots[TABLE_SIZE];
Slot * Cslots;
int i;
for(i=0;i<TABLE_SIZE;i++){
slots[i] = EMP_FLAG_MASK;
}
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(cudaMalloc((void**)&(Cslots), sizeof(Slot)*TABLE_SIZE ));
#else
cudaMalloc((void**)&(Cslots), sizeof(Slot)*TABLE_SIZE );
#endif
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(cudaMemcpy(Cslots, slots, sizeof(Slot)*TABLE_SIZE, cudaMemcpyHostToDevice));
#else
cudaMemcpy(Cslots, slots, sizeof(Slot)*TABLE_SIZE, cudaMemcpyHostToDevice);
#endif
// Initialize the device memory
init<<<1, THREADS_NUM>>>(Cslots);
LL op[NUM_ITEMS]; // Array of operations
LL items[NUM_ITEMS]; // Array of keys associated with operations
LL result[NUM_ITEMS]; // Array of outcomes
//LL expect_result[NUM_ITEMS]; // Array of expected result
/*
FILE * fp;
fp = fopen("/home/udms/Fanny/test/myfile_4.txt","r");
if(fp == NULL) exit(EXIT_FAILURE);
char line[100];
i=0;
while (fgets(line, 100, fp) != NULL) {
char * p = strtok (line," ");
if(*p == 'I'){
op[i]=ADD;
} else if(*p == 'R'){
op[i]=SEARCH;
} else {
op[i]=DELETE;
}
p = strtok(NULL," ");
if(*p == '0'){
expect_result[i] = 0;
} else {
expect_result[i] = 1;
}
p = strtok(NULL," ");
unsigned long ul = strtoul (p, NULL, 0);
items[i] = ul;
i++;
}
fclose(fp);
*/
srand(0);
// NUM_ITEMS is the total number of operations to execute
for(i=0;i<NUM_ITEMS;i++){
items[i]=10+rand()%KEYS; // Keys
}
// Populate the op sequence
for(i=0;i<(NUM_ITEMS*adds)/100;i++){
op[i]=ADD;
}
for(;i<(NUM_ITEMS*(adds+deletes))/100;i++){
op[i]=DELETE;
}
for(;i<NUM_ITEMS;i++){
op[i]=SEARCH;
}
//adds=(NUM_ITEMS*adds)/100;
// Allocate device memory
LL* Citems;
LL* Cop;
LL* Cresult;
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(cudaMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS));
CUDA_SAFE_CALL(cudaMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS));
CUDA_SAFE_CALL(cudaMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS));
CUDA_SAFE_CALL(cudaMemcpy(Citems,items, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice));
#else
cudaMalloc((void**)&Cresult, sizeof(LL)*NUM_ITEMS);
cudaMalloc((void**)&Citems, sizeof(LL)*NUM_ITEMS);
cudaMalloc((void**)&Cop, sizeof(LL)*NUM_ITEMS);
cudaMemcpy(Citems,items, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice);
cudaMemcpy(Cop, op, sizeof(LL)*NUM_ITEMS, cudaMemcpyHostToDevice);
#endif
// Calculate the number of thread blocks
// NUM_ITEMS = total number of operations to execute
// NUM_THREADS = number of threads per block
// FACTOR = number of operations per thread
//int blocks=(NUM_ITEMS%FACTOR==0)?(NUM_ITEMS/FACTOR):(NUM_ITEMS/FACTOR)+1;
int blocks=(NUM_ITEMS%(THREADS_NUM*FACTOR)==0)?NUM_ITEMS/(THREADS_NUM*FACTOR):(NUM_ITEMS/(THREADS_NUM*FACTOR))+1;
// Launch main kernel
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel<<<blocks, THREADS_NUM>>>(Citems, Cop, Cresult);
cudaEventRecord(stop, 0);
//cudaEventSynchronize(start);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Print kernel execution time in milliseconds
printf("%lf\n",time);
// Check for errors
cudaError_t error= cudaGetLastError();
if(cudaSuccess!=error){
printf("error:CUDA ERROR (%d) {%s}\n",error,cudaGetErrorString(error));
exit(-1);
}
// Move results back to host memory
#ifdef _CUTIL_H_
CUDA_SAFE_CALL(cudaMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, cudaMemcpyDeviceToHost));
#else
cudaMemcpy(result, Cresult, sizeof(LL)*NUM_ITEMS, cudaMemcpyDeviceToHost);
#endif
/*
int insert_full = 0;
int insert_fail = 0;
int delete_fail = 0;
int find_fail = 0;
for(i=0;i<NUM_ITEMS;i++){
if(op[i]==ADD){
if(result[i]==WRONG_POS){
if(i == 140700){
printf("ADD full catch, line: %d, item: %d, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
}
insert_full++;
} else if(result[i] != expect_result[i]){
printf("ADD fail, line: %d, item: %lu, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
insert_fail++;
}
} else if(op[i]==DELETE){
if(result[i] != expect_result[i]){
//printf("DELETE fail, line: %d, item: %lu, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
delete_fail++;
}
} else if(op[i]==SEARCH){
if(result[i]==WRONG_POS && expect_result[i]==0){
;
} else if(result[i]!=WRONG_POS && expect_result[i]==1){
;
} else {
printf("SEARCH fail, line: %d, item: %lu, result: %d, expect_result: %d\n", i, (unsigned long)items[i], (int)result[i], (int)expect_result[i]);
find_fail++;
}
}
}
printf("insert_full: %d insert_fail: %d delete_fail: %d find_fail: %d\n", insert_full, insert_fail, delete_fail, find_fail); */
return 0;
}
|
2bd27977ebec47c8a0d8eb24741505c3dc35c77a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#ifndef __HIPCC__
#include <stdlib.h>
#include <math.h>
#include <hmpprt/Grouplet.h>
#include <hmpprt/HostTypes.h>
#include <hmpprt/Context.h>
#include <hmpprt/CUDAGrid.h>
#include <hmpprt/CUDAModule.h>
#include <hmpprt/DeviceManager.h>
#include <hmpperr/hmpperr.h>
#include <openacci/openacci_c.h>
#ifdef _WIN32
# define CDLT_API __declspec(dllexport)
#else /* ! _WIN32 */
# define CDLT_API
#endif /* _WIN32 */
#else // ! __HIPCC__
#include <hmpprt/HostTypes.h>
#include <hmpprt/CUDAIntrinsics.h>
extern __shared__ int64_t hmpp_sharedmem[];
#endif // __HIPCC__
#ifndef __HIPCC__
#else
#endif
#define HMPPCG_SIMD_LENGTH 32
# 12 "mt.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_44(hmpprt::s32 n_2, float* A_2, float* B_2)
;
#endif // __HIPCC__
# 12 "mt.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_44_internal_1(hmpprt::s32 n, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> A, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> B)
;
#endif // __HIPCC__
# 12 "mt.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_44_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_44_parallel_region_1(float* A_1, float* B_1, hmpprt::s32 n_1);
#endif // __HIPCC__
# 12 "mt.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_44_parallel_region_1(float* A_1, float* B_1, hmpprt::s32 n_1)
{
# 14 "mt.cpp"
{
# 45 "mt.cpp"
hmpprt::s32 iter_per_gang_1;
# 45 "mt.cpp"
hmpprt::s32 first_gang_iter_1;
# 45 "mt.cpp"
hmpprt::s32 last_gang_iter_1;
# 45 "mt.cpp"
iter_per_gang_1 = ((1 + (n_1 * n_1 - 1) / 192) > 256 ? (1 + (n_1 * n_1 - 1) / 192) : 256);
# 45 "mt.cpp"
first_gang_iter_1 = (hmpprt::gr_gbidx()) * iter_per_gang_1;
# 45 "mt.cpp"
last_gang_iter_1 = ((first_gang_iter_1 + iter_per_gang_1 - 1) < (n_1 * n_1 - 1) ? (first_gang_iter_1 + iter_per_gang_1 - 1) : (n_1 * n_1 - 1));
# 45 "mt.cpp"
hmpprt::s32 i_2;
# 45 "mt.cpp"
# 49 "mt.cpp"
for (i_2 = first_gang_iter_1 + (hmpprt::gr_btidy()) ; i_2 <= last_gang_iter_1 ; i_2 = i_2 + (hmpprt::gr_btnumy()))
{
# 45 "mt.cpp"
hmpprt::s32 i_1;
# 48 "mt.cpp"
hmpprt::s32 j_1;
# 50 "mt.cpp"
j_1 = i_2 % n_1;
# 50 "mt.cpp"
i_1 = i_2 / n_1;
# 50 "mt.cpp"
*(B_1 + (j_1 * n_1 + i_1)) = *(A_1 + (i_1 * n_1 + j_1));
}
# 12 "mt.cpp"
}
}
#endif // __HIPCC__
# 12 "mt.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_44_internal_1(hmpprt::s32 n, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> A, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> B)
{
# 12 "mt.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&A, 8, "A_1");
__hmppcg_call.addLocalParameter(&B, 8, "B_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (n), "n_1");
__hmppcg_call.launch(hmpp_acc_region_main_44_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 12 "mt.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_44(hmpprt::s32 n_2, float* A_2, float* B_2)
{
# 1 "<preprocessor>"
(hmpp_acc_region_main_44_internal_1(n_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> (A_2), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> (B_2)));
}
#endif // __HIPCC__
#ifndef __HIPCC__
extern "C" const char * hmpprt_cuda_get_gpu_code();
static hmpprt::CUDAModule * hmpprt_module = 0;
static int hmpprt_uses = 0;
extern "C" CDLT_API void * hmpprt_init()
{
try
{
if (hmpprt_uses++ == 0)
{
hmpprt_module = new hmpprt::CUDAModule(hmpprt_cuda_get_gpu_code());
hmpp_acc_region_main_44_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_44_parallel_region_1");
}
hmpprt::Context::getInstance()->getGrouplet()->setTarget(hmpprt::CUDA);
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_44", "prototype hmpp_acc_region_main_44(n: s32, A: ^cudaglob float, B: ^cudaglob float)");
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_init()\n");
abort();
}
return 0;
}
#endif // __HIPCC__
#ifndef __HIPCC__
extern "C" CDLT_API void * hmpprt_fini()
{
try
{
if (--hmpprt_uses == 0)
{
delete hmpp_acc_region_main_44_parallel_region_1;
delete hmpprt_module;
hmpprt_module = 0;
}
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_fini()\n");
abort();
}
return 0;
}
#endif // __HIPCC__
// footer
| 2bd27977ebec47c8a0d8eb24741505c3dc35c77a.cu |
#include <stdio.h>
#ifndef __CUDACC__
#include <stdlib.h>
#include <math.h>
#include <hmpprt/Grouplet.h>
#include <hmpprt/HostTypes.h>
#include <hmpprt/Context.h>
#include <hmpprt/CUDAGrid.h>
#include <hmpprt/CUDAModule.h>
#include <hmpprt/DeviceManager.h>
#include <hmpperr/hmpperr.h>
#include <openacci/openacci_c.h>
#ifdef _WIN32
# define CDLT_API __declspec(dllexport)
#else /* ! _WIN32 */
# define CDLT_API
#endif /* _WIN32 */
#else // ! __CUDACC__
#include <hmpprt/HostTypes.h>
#include <hmpprt/CUDAIntrinsics.h>
extern __shared__ int64_t hmpp_sharedmem[];
#endif // __CUDACC__
#ifndef __CUDACC__
#else
#endif
#define HMPPCG_SIMD_LENGTH 32
# 12 "mt.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_44(hmpprt::s32 n_2, float* A_2, float* B_2)
;
#endif // __CUDACC__
# 12 "mt.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_44_internal_1(hmpprt::s32 n, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> A, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> B)
;
#endif // __CUDACC__
# 12 "mt.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_44_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_44_parallel_region_1(float* A_1, float* B_1, hmpprt::s32 n_1);
#endif // __CUDACC__
# 12 "mt.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_44_parallel_region_1(float* A_1, float* B_1, hmpprt::s32 n_1)
{
# 14 "mt.cpp"
{
# 45 "mt.cpp"
hmpprt::s32 iter_per_gang_1;
# 45 "mt.cpp"
hmpprt::s32 first_gang_iter_1;
# 45 "mt.cpp"
hmpprt::s32 last_gang_iter_1;
# 45 "mt.cpp"
iter_per_gang_1 = ((1 + (n_1 * n_1 - 1) / 192) > 256 ? (1 + (n_1 * n_1 - 1) / 192) : 256);
# 45 "mt.cpp"
first_gang_iter_1 = (hmpprt::gr_gbidx()) * iter_per_gang_1;
# 45 "mt.cpp"
last_gang_iter_1 = ((first_gang_iter_1 + iter_per_gang_1 - 1) < (n_1 * n_1 - 1) ? (first_gang_iter_1 + iter_per_gang_1 - 1) : (n_1 * n_1 - 1));
# 45 "mt.cpp"
hmpprt::s32 i_2;
# 45 "mt.cpp"
# 49 "mt.cpp"
for (i_2 = first_gang_iter_1 + (hmpprt::gr_btidy()) ; i_2 <= last_gang_iter_1 ; i_2 = i_2 + (hmpprt::gr_btnumy()))
{
# 45 "mt.cpp"
hmpprt::s32 i_1;
# 48 "mt.cpp"
hmpprt::s32 j_1;
# 50 "mt.cpp"
j_1 = i_2 % n_1;
# 50 "mt.cpp"
i_1 = i_2 / n_1;
# 50 "mt.cpp"
*(B_1 + (j_1 * n_1 + i_1)) = *(A_1 + (i_1 * n_1 + j_1));
}
# 12 "mt.cpp"
}
}
#endif // __CUDACC__
# 12 "mt.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_44_internal_1(hmpprt::s32 n, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> A, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> B)
{
# 12 "mt.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&A, 8, "A_1");
__hmppcg_call.addLocalParameter(&B, 8, "B_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (n), "n_1");
__hmppcg_call.launch(hmpp_acc_region_main_44_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 12 "mt.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_44(hmpprt::s32 n_2, float* A_2, float* B_2)
{
# 1 "<preprocessor>"
(hmpp_acc_region_main_44_internal_1(n_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> (A_2), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,float> (B_2)));
}
#endif // __CUDACC__
#ifndef __CUDACC__
extern "C" const char * hmpprt_cuda_get_gpu_code();
static hmpprt::CUDAModule * hmpprt_module = 0;
static int hmpprt_uses = 0;
extern "C" CDLT_API void * hmpprt_init()
{
try
{
if (hmpprt_uses++ == 0)
{
hmpprt_module = new hmpprt::CUDAModule(hmpprt_cuda_get_gpu_code());
hmpp_acc_region_main_44_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_44_parallel_region_1");
}
hmpprt::Context::getInstance()->getGrouplet()->setTarget(hmpprt::CUDA);
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_44", "prototype hmpp_acc_region_main_44(n: s32, A: ^cudaglob float, B: ^cudaglob float)");
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_init()\n");
abort();
}
return 0;
}
#endif // __CUDACC__
#ifndef __CUDACC__
extern "C" CDLT_API void * hmpprt_fini()
{
try
{
if (--hmpprt_uses == 0)
{
delete hmpp_acc_region_main_44_parallel_region_1;
delete hmpprt_module;
hmpprt_module = 0;
}
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_fini()\n");
abort();
}
return 0;
}
#endif // __CUDACC__
// footer
|
3244e1f61e1e0ad67867fbd248bccee6aab46c90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/sequence_ops/sequence_erase_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void LabelErasedIdx(const T* in_dat,
const int64_t in_len,
const int* tokens,
const size_t tokens_len,
size_t* num_erased) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
for (size_t i = 0; i < tokens_len; ++i) {
if (in_dat[index] == tokens[i]) {
num_erased[index + 1] = 1;
break;
}
}
}
}
__global__ void GetOutLod(const size_t* num_erased,
const size_t* in_lod,
const size_t lod_len,
size_t* out_lod0) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lod_len) {
out_lod0[index] = in_lod[index] - num_erased[in_lod[index]];
}
}
template <typename T>
__global__ void SetOutput(const T* in_dat,
const int64_t in_len,
const size_t* num_erased,
T* out_dat) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
if (num_erased[index] == num_erased[index + 1]) {
out_dat[index - num_erased[index]] = in_dat[index];
}
}
}
template <typename T>
class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto lod = in->lod();
PADDLE_ENFORCE_EQ(
lod[lod.size() - 1].back(),
(size_t)in->numel(),
platform::errors::InvalidArgument(
"The actual size mismatches with the LoD information."));
auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel();
auto in_dat = in->data<T>();
// Copy tokens to GPU
thrust::device_vector<int> dev_tokens(tokens.begin(), tokens.end());
int* dev_tokens_ptr = thrust::raw_pointer_cast(dev_tokens.data());
// Count number of elements to be erased
thrust::device_vector<size_t> num_erased(in_len + 1, 0);
size_t* num_erased_ptr = thrust::raw_pointer_cast(num_erased.data());
auto stream = ctx.cuda_device_context().stream();
hipLaunchKernelGGL(( LabelErasedIdx), dim3((in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream,
in_dat, in_len, dev_tokens_ptr, tokens.size(), num_erased_ptr);
thrust::inclusive_scan(
num_erased.begin() + 1, num_erased.end(), num_erased.begin() + 1);
// Copy LoD to GPU
auto last_lod = lod[lod.size() - 1];
auto lod_len = last_lod.size();
phi::MixVector<size_t> mixv_last_lod(&last_lod);
const size_t* dev_in_lod_ptr = mixv_last_lod.CUDAData(ctx.GetPlace());
// Calc output LoD
thrust::device_vector<size_t> dev_out_lod(lod_len);
size_t* dev_out_lod_ptr = thrust::raw_pointer_cast(dev_out_lod.data());
hipLaunchKernelGGL(( GetOutLod), dim3((lod_len - 1) / PADDLE_CUDA_NUM_THREADS + 1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream,
num_erased_ptr, dev_in_lod_ptr, lod_len, dev_out_lod_ptr);
// Set LoD for output
std::vector<size_t> out_last_lod(dev_out_lod.begin(), dev_out_lod.end());
framework::LoD out_lod;
for (size_t i = 0; i < lod.size() - 1; ++i) {
out_lod.push_back(lod[i]);
}
out_lod.push_back(out_last_lod);
out->set_lod(out_lod);
// Set output
out->Resize({static_cast<int64_t>(out_last_lod.back()), 1});
auto out_dat = out->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( SetOutput), dim3((in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream, in_dat, in_len, num_erased_ptr, out_dat);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(sequence_erase,
paddle::operators::SequenceEraseOpCUDAKernel<int32_t>,
paddle::operators::SequenceEraseOpCUDAKernel<int64_t>);
| 3244e1f61e1e0ad67867fbd248bccee6aab46c90.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/sequence_ops/sequence_erase_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void LabelErasedIdx(const T* in_dat,
const int64_t in_len,
const int* tokens,
const size_t tokens_len,
size_t* num_erased) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
for (size_t i = 0; i < tokens_len; ++i) {
if (in_dat[index] == tokens[i]) {
num_erased[index + 1] = 1;
break;
}
}
}
}
__global__ void GetOutLod(const size_t* num_erased,
const size_t* in_lod,
const size_t lod_len,
size_t* out_lod0) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lod_len) {
out_lod0[index] = in_lod[index] - num_erased[in_lod[index]];
}
}
template <typename T>
__global__ void SetOutput(const T* in_dat,
const int64_t in_len,
const size_t* num_erased,
T* out_dat) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_len) {
if (num_erased[index] == num_erased[index + 1]) {
out_dat[index - num_erased[index]] = in_dat[index];
}
}
}
template <typename T>
class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto lod = in->lod();
PADDLE_ENFORCE_EQ(
lod[lod.size() - 1].back(),
(size_t)in->numel(),
platform::errors::InvalidArgument(
"The actual size mismatches with the LoD information."));
auto tokens = ctx.Attr<std::vector<int>>("tokens");
auto in_len = in->numel();
auto in_dat = in->data<T>();
// Copy tokens to GPU
thrust::device_vector<int> dev_tokens(tokens.begin(), tokens.end());
int* dev_tokens_ptr = thrust::raw_pointer_cast(dev_tokens.data());
// Count number of elements to be erased
thrust::device_vector<size_t> num_erased(in_len + 1, 0);
size_t* num_erased_ptr = thrust::raw_pointer_cast(num_erased.data());
auto stream = ctx.cuda_device_context().stream();
LabelErasedIdx<<<(in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(
in_dat, in_len, dev_tokens_ptr, tokens.size(), num_erased_ptr);
thrust::inclusive_scan(
num_erased.begin() + 1, num_erased.end(), num_erased.begin() + 1);
// Copy LoD to GPU
auto last_lod = lod[lod.size() - 1];
auto lod_len = last_lod.size();
phi::MixVector<size_t> mixv_last_lod(&last_lod);
const size_t* dev_in_lod_ptr = mixv_last_lod.CUDAData(ctx.GetPlace());
// Calc output LoD
thrust::device_vector<size_t> dev_out_lod(lod_len);
size_t* dev_out_lod_ptr = thrust::raw_pointer_cast(dev_out_lod.data());
GetOutLod<<<(lod_len - 1) / PADDLE_CUDA_NUM_THREADS + 1,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(
num_erased_ptr, dev_in_lod_ptr, lod_len, dev_out_lod_ptr);
// Set LoD for output
std::vector<size_t> out_last_lod(dev_out_lod.begin(), dev_out_lod.end());
framework::LoD out_lod;
for (size_t i = 0; i < lod.size() - 1; ++i) {
out_lod.push_back(lod[i]);
}
out_lod.push_back(out_last_lod);
out->set_lod(out_lod);
// Set output
out->Resize({static_cast<int64_t>(out_last_lod.back()), 1});
auto out_dat = out->mutable_data<T>(ctx.GetPlace());
SetOutput<<<(in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(in_dat, in_len, num_erased_ptr, out_dat);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(sequence_erase,
paddle::operators::SequenceEraseOpCUDAKernel<int32_t>,
paddle::operators::SequenceEraseOpCUDAKernel<int64_t>);
|
c60d60bbd543dc7209c65585f765c7e877d8108c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mpi.h"
// #include <bits/stdc++.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
using namespace std;
#define left 0
#define right 1
#define front 2
#define back 3
#define down 4
#define up 5
#define on_x 0
#define on_y 1
#define on_z 2
const int NDIM = 3;
const int NDIM_2 = 6;
int id, ib, jb, kb;
// int dimensions[NDIM];
int npx, npy, npz;
int blocks[NDIM];
double l[NDIM];
double u[NDIM_2];
string filename;
double eps, u0;
double hx, hy, hz;
__constant__ int g_dimensions[3];
//
#define _i(i, j, k) (((k) + 1) * (npy + 2) * (npx + 2) + ((j) + 1) * (npx + 2) + (i) + 1)
#define _iz(id) (((id) / (npx + 2) / (npy + 2)) - 1)
#define _iy(id) ((((id) % ((npx + 2) * (npy + 2))) / (npx + 2)) - 1)
#define _ix(id) ((id) % (npx + 2) - 1)
// ()
#define _ib(i, j, k) ((k) * blocks[on_y] * blocks[on_x] + (j) * blocks[on_x] + (i))
#define _ibz(id) ((id) / blocks[on_x] / blocks[on_y])
#define _iby(id) (((id) % (blocks[on_x] * blocks[on_y])) / blocks[on_x])
#define _ibx(id) ((id) % blocks[on_x])
#define CUDA_ERROR(err) { \
if (err != hipSuccess) { \
fprintf(stderr, "ERROR: CUDA failed in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(err)); \
return(1); \
} \
} \
__device__ int _ind(int i, int j, int k) {
return (((k) + 1) * (g_dimensions[1] + 2) * (g_dimensions[0] + 2) + ((j) + 1) * (g_dimensions[0] + 2) + (i) + 1);
}
__global__ void kernel_copy_edge_xy(double* edge_xy, double* data, int nx, int ny, int nz, int k, bool flag, double u) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int i, j;
if (flag) {
for (i = idx; i < nx; i += offsetX)
for (j = idy; j < ny; j += offsetY)
edge_xy[i + j * nx] = data[_ind(i, j, k)];
} else {
if (edge_xy) {
for (i = idx; i < nx; i += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = edge_xy[i + j * nx];
} else {
for (i = idx; i < nx; i += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = u;
}
}
}
__global__ void kernel_copy_edge_xz(double* edge_xz, double* data, int nx, int ny, int nz, int j, bool flag, double u) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int i, k;
if (flag) {
for (i = idx; i < nx; i += offsetX)
for (k = idy; k < nz; k += offsetY)
edge_xz[i + k * nx] = data[_ind(i, j, k)];
} else {
if (edge_xz) {
for (i = idx; i < nx; i += offsetX)
for (k = idy; k < nz; k += offsetY)
data[_ind(i, j, k)] = edge_xz[i + k * nx];
} else {
for (i = idx; i < nx; i += offsetX)
for (k = idy; k < nz; k += offsetY)
data[_ind(i, j, k)] = u;
}
}
}
__global__ void kernel_copy_edge_yz(double* edge_yz, double* data, int nx, int ny, int nz, int i, bool flag, double u) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int j, k;
if (flag) {
for (k = idx; k < nz; k += offsetX)
for (j = idy; j < ny; j += offsetY)
edge_yz[j + k * ny] = data[_ind(i, j, k)];
} else {
if (edge_yz) {
for (k = idx; k < nz; k += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = edge_yz[j + k * ny];
} else {
for (k = idx; k < nz; k += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = u;
}
}
}
__global__ void kernel_computation(double* next, double* data, int nx, int ny, int nz, double hx, double hy, double hz, double divisor) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idz = blockIdx.z * blockDim.z + threadIdx.z;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int offsetZ = blockDim.z * gridDim.z;
int i, j, k;
for (i = idx; i < nx; i += offsetX) {
for (j = idy; j < ny; j += offsetY) {
for (k = idz; k < nz; k += offsetZ) {
next[_ind(i, j, k)] = ((data[_ind(i - 1, j, k)] + data[_ind(i + 1, j, k)]) * hx + \
(data[_ind(i, j - 1, k)] + data[_ind(i, j + 1, k)]) * hy + \
(data[_ind(i, j, k - 1)] + data[_ind(i, j, k + 1)]) * hz) / \
divisor;
}
}
}
}
__global__ void kernel_error(double* next, double* data, double* diff, int nx, int ny, int nz) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idz = blockIdx.z * blockDim.z + threadIdx.z;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int offsetZ = blockDim.z * gridDim.z;
int i, j, k;
for (i = idx - 1; i < nx + 1; i += offsetX) {
for (j = idy - 1; j < ny + 1; j += offsetY) {
for (k = idz - 1; k < nz + 1; k += offsetZ) {
diff[_ind(i, j, k)] = (i != -1 && j != -1 && k != -1 && i != nx && j != ny && k != nz) * abs(next[_ind(i, j, k)] - data[_ind(i, j, k)]);
}
}
}
}
int main(int argc, char *argv[]) {
ios_base::sync_with_stdio(false);
cin.tie(NULL);
cout.tie(NULL);
cout << fixed;
cout.precision(7);
// Input
int i, j, k;
double *data, *temp, *next;
double *edge_xy, *edge_xz, *edge_yz;
char proc_name[MPI_MAX_PROCESSOR_NAME];
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
int numproc, proc_name_len;
// MPI initialisation
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Get_processor_name(proc_name, &proc_name_len);
fprintf(stderr, "proc %2d(%d) on %s(%s)\n", id, numproc, proc_name, devProp.name);
fflush(stderr);
// int device_cnt;
// hipGetDeviceCount(&device_cnt);
// hipSetDevice(id % device_cnt);
if (id == 0) {
cin >> blocks[on_x] >> blocks[on_y] >> blocks[on_z];
cin >> npx >> npy >> npz;
cin >> filename;
cin >> eps;
cin >> l[on_x] >> l[on_y] >> l[on_z];
cin >> u[down] >> u[up];
cin >> u[left] >> u[right];
cin >> u[front] >> u[back];
cin >> u0;
}
//
// MPI_Bcast(dimensions, NDIM, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&npx, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&npy, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&npz, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(blocks, NDIM, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&eps, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(l, NDIM, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(u, NDIM_2, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&u0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// filename sending:
int filename_size = filename.size();
MPI_Bcast(&filename_size, 1, MPI_INT, 0, MPI_COMM_WORLD);
filename.resize(filename_size);
MPI_Bcast((char*) filename.c_str(), filename_size, MPI_CHAR, 0, MPI_COMM_WORLD);
if (blocks[on_x] * blocks[on_y] * blocks[on_z] * npx * npy * npz == 0) {
fprintf(stderr, "Error at proc %d on %s\n", id, proc_name);
if (blocks[on_x] * blocks[on_y] * blocks[on_z] != numproc) {
fprintf(stderr, "Dead because of blocks\n");
fprintf(stderr, "blocks[on_x]=%d, blocks[on_y]=%d, blocks[on_z]=%d, numproc=%d\n", blocks[on_x], blocks[on_y], blocks[on_z], numproc);
}
fflush(stderr);
MPI_Finalize();
return 0;
}
hx = l[on_x] / (double)(npx * blocks[on_x]);
hy = l[on_y] / (double)(npy * blocks[on_y]);
hz = l[on_z] / (double)(npz * blocks[on_z]);
// We need hx^2 hy^2 hz^2
double h2x = hx, h2y = hy, h2z = hz;
h2x *= hx;
h2y *= hy;
h2z *= hz;
// To a negative degree
h2x = 1.0 / h2x;
h2y = 1.0 / h2y;
h2z = 1.0 / h2z;
// Divisor as well
double divisor = 2 * (h2x + h2y + h2z);
// fprintf(stderr, "h2x=%f\n", h2x);
// fprintf(stderr, "h2y=%f\n", h2y);
// fprintf(stderr, "h2z=%f\n", h2z);
// fprintf(stderr, "divisor=%f\n", divisor);
// initiale bloks ids 3D
ib = _ibx(id);
jb = _iby(id);
kb = _ibz(id);
double* gpu_data, *gpu_next, *gpu_edge_xy, *gpu_edge_xz, *gpu_edge_yz;
CUDA_ERROR(hipMalloc(&gpu_data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2)));
CUDA_ERROR(hipMalloc(&gpu_next, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2)));
CUDA_ERROR(hipMalloc(&gpu_edge_xy, sizeof(double) * npx * npy));
CUDA_ERROR(hipMalloc(&gpu_edge_xz, sizeof(double) * npx * npz));
CUDA_ERROR(hipMalloc(&gpu_edge_yz, sizeof(double) * npy * npz));
// Buffer initialisation
data = (double *)malloc(sizeof(double) * (npx + 2) * \
(npy + 2) * (npz + 2));
next = (double *)malloc(sizeof(double) * (npx + 2) * \
(npy + 2) * (npz + 2));
edge_xy = (double *)malloc(sizeof(double) * npx * npy);
edge_xz = (double *)malloc(sizeof(double) * npx * npz);
edge_yz = (double *)malloc(sizeof(double) * npy * npz);
CUDA_ERROR(hipMemcpy(gpu_edge_xy, edge_xy, sizeof(double) * npx * npy, hipMemcpyHostToDevice));
CUDA_ERROR(hipMemcpy(gpu_edge_xz, edge_xz, sizeof(double) * npx * npz, hipMemcpyHostToDevice));
CUDA_ERROR(hipMemcpy(gpu_edge_yz, edge_yz, sizeof(double) * npy * npz, hipMemcpyHostToDevice));
for (i = 0; i < npx; ++i) {
for (j = 0; j < npy; ++j) {
for (k = 0; k < npz; ++k) {
data[_i(i, j, k)] = u0;
// fprintf(stderr, "%e ", data[_i(i, j, k)]);
}
// fprintf(stderr, "\n");
}
}
// fflush(stderr);
CUDA_ERROR(hipMemcpy(gpu_data, data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2), hipMemcpyHostToDevice));
CUDA_ERROR(hipMemcpy(gpu_next, data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2), hipMemcpyHostToDevice));
dim3 gblocks(32, 32);
dim3 threads(32, 32);
int dimensions[3];
dimensions[0] = npx;
dimensions[1] = npy;
dimensions[2] = npz;
CUDA_ERROR(hipMemcpyToSymbol(g_dimensions, dimensions, 3 * sizeof(int)));
double difference = 0.0;
do {
if (ib + 1 < blocks[on_x]) {
hipLaunchKernelGGL(( kernel_copy_edge_yz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_yz, gpu_data, npx, npy, npz, npx - 1, true, u0);
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipMemcpy(edge_yz, gpu_edge_yz, sizeof(double) * npy * npz, hipMemcpyDeviceToHost));
MPI_Send(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib + 1, jb, kb), 0, MPI_COMM_WORLD);
}
// Back
if (jb + 1 < blocks[on_y]) {
hipLaunchKernelGGL(( kernel_copy_edge_xz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xz, gpu_data, npx, npy, npz, npy - 1, true, u0);
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipMemcpy(edge_xz, gpu_edge_xz, sizeof(double) * npx * npz, hipMemcpyDeviceToHost));
MPI_Send(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb + 1, kb), 0, MPI_COMM_WORLD);
}
// Up
if (kb + 1 < blocks[on_z]) {
hipLaunchKernelGGL(( kernel_copy_edge_xy), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xy, gpu_data, npx, npy, npz, npz - 1, true, u0);
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipMemcpy(edge_xy, gpu_edge_xy, sizeof(double) * npx * npy, hipMemcpyDeviceToHost));
MPI_Send(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb + 1), 0, MPI_COMM_WORLD);
}
// Data recieve
if (ib > 0) {
MPI_Recv(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib - 1, jb, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(hipMemcpy(gpu_edge_yz, edge_yz, sizeof(double) * npy * npz, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_copy_edge_yz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_yz, gpu_data, npx, npy, npz, - 1, false, u0);
} else {
hipLaunchKernelGGL(( kernel_copy_edge_yz), dim3(gblocks), dim3(threads), 0, 0, NULL, gpu_data, npx, npy, npz, - 1, false, u[left]);
}
CUDA_ERROR(hipGetLastError());
if (jb > 0) {
MPI_Recv(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb - 1, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(hipMemcpy(gpu_edge_xz, edge_xz, sizeof(double) * npx * npz, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_copy_edge_xz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xz, gpu_data, npx, npy, npz, - 1, false, u0);
} else {
hipLaunchKernelGGL(( kernel_copy_edge_xz), dim3(gblocks), dim3(threads), 0, 0, NULL, gpu_data, npx, npy, npz, - 1, false, u[front]);
}
CUDA_ERROR(hipGetLastError());
if (kb > 0) {
MPI_Recv(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb - 1), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(hipMemcpy(gpu_edge_xy, edge_xy, sizeof(double) * npx * npy, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_copy_edge_xy), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xy, gpu_data, npx, npy, npz, - 1, false, u0);
} else {
hipLaunchKernelGGL(( kernel_copy_edge_xy), dim3(gblocks), dim3(threads), 0, 0, NULL, gpu_data, npx, npy, npz, - 1, false, u[down]);
}
CUDA_ERROR(hipGetLastError());
// Left
if (ib > 0) {
hipLaunchKernelGGL(( kernel_copy_edge_yz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_yz, gpu_data, npx, npy, npz, 0, true, u0);
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipMemcpy(edge_yz, gpu_edge_yz, sizeof(double) * npy * npz, hipMemcpyDeviceToHost));
MPI_Send(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib - 1, jb, kb), 0, MPI_COMM_WORLD);
}
// Front
if (jb > 0) {
hipLaunchKernelGGL(( kernel_copy_edge_xz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xz, gpu_data, npx, npy, npz, 0, true, u0);
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipMemcpy(edge_xz, gpu_edge_xz, sizeof(double) * npx * npz, hipMemcpyDeviceToHost));
MPI_Send(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb - 1, kb), 0, MPI_COMM_WORLD);
}
// Down
if (kb > 0) {
hipLaunchKernelGGL(( kernel_copy_edge_xy), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xy, gpu_data, npx, npy, npz, 0, true, u0);
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipMemcpy(edge_xy, gpu_edge_xy, sizeof(double) * npx * npy, hipMemcpyDeviceToHost));
MPI_Send(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb - 1), 0, MPI_COMM_WORLD);
}
// Data recieve
if (ib + 1 < blocks[on_x]) {
MPI_Recv(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib + 1, jb, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(hipMemcpy(gpu_edge_yz, edge_yz, sizeof(double) * npy * npz, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_copy_edge_yz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_yz, gpu_data, npx, npy, npz, npx, false, u0);
} else {
hipLaunchKernelGGL(( kernel_copy_edge_yz), dim3(gblocks), dim3(threads), 0, 0, NULL, gpu_data, npx, npy, npz, npx, false, u[right]);
}
CUDA_ERROR(hipGetLastError());
if (jb + 1 < blocks[on_y]) {
MPI_Recv(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb + 1, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(hipMemcpy(gpu_edge_xz, edge_xz, sizeof(double) * npx * npz, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_copy_edge_xz), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xz, gpu_data, npx, npy, npz, npy, false, u0);
} else {
hipLaunchKernelGGL(( kernel_copy_edge_xz), dim3(gblocks), dim3(threads), 0, 0, NULL, gpu_data, npx, npy, npz, npy, false, u[back]);
}
CUDA_ERROR(hipGetLastError());
if (kb + 1 < blocks[on_z]) {
MPI_Recv(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb + 1), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(hipMemcpy(gpu_edge_xy, edge_xy, sizeof(double) * npx * npy, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_copy_edge_xy), dim3(gblocks), dim3(threads), 0, 0, gpu_edge_xy, gpu_data, npx, npy, npz, npz, false, u0);
} else {
hipLaunchKernelGGL(( kernel_copy_edge_xy), dim3(gblocks), dim3(threads), 0, 0, NULL, gpu_data, npx, npy, npz, npz, false, u[up]);
}
CUDA_ERROR(hipGetLastError());
hipDeviceSynchronize();
// Recomputation
hipLaunchKernelGGL(( kernel_computation), dim3(dim3(8, 8, 8)), dim3(dim3(32, 4, 4)), 0, 0, gpu_next, gpu_data, npx, npy, npz, h2x, h2y, h2z, divisor);
CUDA_ERROR(hipGetLastError());
hipDeviceSynchronize();
// Error
double* gpu_difference;
CUDA_ERROR(hipMalloc((void**)&gpu_difference, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2)));
hipLaunchKernelGGL(( kernel_error), dim3(dim3(8, 8, 8)), dim3(dim3(32, 4, 4)), 0, 0, gpu_next, gpu_data, gpu_difference, npx, npy, npz);
CUDA_ERROR(hipGetLastError());
// fprintf(stderr, "Done gpu\n");
// fflush(stderr);
// Cast to thrust
thrust::device_ptr< double > pointers = thrust::device_pointer_cast(gpu_difference);
// Pointer of error
thrust::device_ptr< double > res = thrust::max_element(pointers, pointers + (npx + 2) * (npy + 2) * (npz + 2));
difference = 0.0;
double gpu_diff = 0.0;
// Get data from pointer
gpu_diff = *res;
temp = gpu_data;
gpu_data = gpu_next;
gpu_next = temp;
MPI_Allreduce(&gpu_diff, &difference, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
// fprintf(stderr, "difference=%f\n", difference);
// fflush(stderr);
CUDA_ERROR(hipFree(gpu_difference));
} while (difference > eps);
fprintf(stderr, "Done computation\n");
fflush(stderr);
CUDA_ERROR(hipMemcpy(data, gpu_data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2), hipMemcpyDeviceToHost));
CUDA_ERROR(hipFree(gpu_data));
CUDA_ERROR(hipFree(gpu_next));
CUDA_ERROR(hipFree(gpu_edge_xy));
CUDA_ERROR(hipFree(gpu_edge_xz));
CUDA_ERROR(hipFree(gpu_edge_yz));
// for (i = 0; i < dimensions[on_x]; ++i) {
// for (j = 0; j < dimensions[on_y]; ++j) {
// for (k = 0; k < dimensions[on_z]; ++k) {
// // data[_i(i, j, k)] = u0;
// fprintf(stderr, "%e ", data[_i(i, j, k)]);
// }
// fprintf(stderr, "\n");
// }
// }
// fprintf(stderr, "\n");
// fflush(stderr);
int buff_size = (npx + 2) * (npy + 2) * (npz + 2);
int new_symbol_size = 14;
// Allocate mem
char* buff = new char[buff_size * new_symbol_size];
memset(buff, (char)' ', buff_size * new_symbol_size * sizeof(char));
for (k = 0; k < dimensions[on_z]; ++k) {
for (j = 0; j < dimensions[on_y]; ++j) {
int len_new_symbol;
for (i = 0; i < dimensions[on_x] - 1; ++i) {
len_new_symbol = sprintf(&buff[_i(i, j, k) * new_symbol_size], "%.6e", data[_i(i, j, k)]);
// '\0' to ' ' (coz of new len_new_symbol)
if (len_new_symbol < new_symbol_size) {
buff[_i(i, j, k) * new_symbol_size + len_new_symbol] = ' ';
}
}
len_new_symbol = sprintf(&buff[_i(i, j, k) * new_symbol_size], "%.6e\n", data[_i(i, j, k)]);
if(len_new_symbol < new_symbol_size){
buff[_i(i, j, k) * new_symbol_size + len_new_symbol] = ' ';
}
}
}
/*
for(i = 0; i < buff_size * new_symbol_size; ++i) {
if (buff[i] == '\0') {
buff[i] = ' ';
}
fprintf(stderr, "% ", buff[i]);
}
*/
fprintf(stderr, "Done writting\n");
fflush(stderr);
MPI_Datatype new_representation;
MPI_Datatype memtype;
MPI_Datatype filetype;
int sizes[NDIM], starts[NDIM], f_sizes[NDIM], f_starts[NDIM];
MPI_Type_contiguous(new_symbol_size, MPI_CHAR, &new_representation);
MPI_Type_commit(&new_representation);
// Sizes for memtype
sizes[on_x] = npx + 2;
sizes[on_y] = npy + 2;
sizes[on_z] = npz + 2;
starts[on_x] = starts[on_y] = starts[on_z] = 1;
// Sizes for filetype
f_sizes[on_x] = dimensions[on_x] * blocks[on_x];
f_sizes[on_y] = dimensions[on_y] * blocks[on_y];
f_sizes[on_z] = dimensions[on_z] * blocks[on_z];
f_starts[on_x] = dimensions[on_x] * ib;
f_starts[on_y] = dimensions[on_y] * jb;
f_starts[on_z] = dimensions[on_z] * kb;
// Writting types
// Memtype
MPI_Type_create_subarray(3, sizes, dimensions, starts, MPI_ORDER_FORTRAN, new_representation, &memtype);
MPI_Type_commit(&memtype);
// Filetype
MPI_Type_create_subarray(3, f_sizes, dimensions, f_starts, MPI_ORDER_FORTRAN, new_representation, &filetype);
MPI_Type_commit(&filetype);
fprintf(stderr, "Done creating\n");
fflush(stderr);
// Create and open file
MPI_File fp;
MPI_File_delete(filename.c_str(), MPI_INFO_NULL);
MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fp);
MPI_File_set_view(fp, 0, MPI_CHAR, filetype, "native", MPI_INFO_NULL);
MPI_File_write_all(fp, buff, 1, memtype, MPI_STATUS_IGNORE);
MPI_File_close(&fp);
fprintf(stderr, "Done writting in file\n");
fflush(stderr);
MPI_Finalize();
if (id == 0) {
fprintf(stderr, "%d %d %d\n", blocks[on_x], blocks[on_y], blocks[on_z]);
fprintf(stderr, "%d %d %d\n", npx, npy, npz);
fprintf(stderr, "%s\n", filename.c_str());
fprintf(stderr, "%f\n", eps);
fprintf(stderr, "%f %f %f\n", l[on_x], l[on_y], l[on_z]);
fprintf(stderr, "%f %f\n", u[down], u[up]);
fprintf(stderr, "%f %f\n", u[left], u[right]);
fprintf(stderr, "%f %f\n", u[front], u[back]);
fprintf(stderr, "%f\n", u0);
}
free(buff);
free(data);
free(next);
free(edge_xy);
free(edge_xz);
free(edge_yz);
return 0;
}
| c60d60bbd543dc7209c65585f765c7e877d8108c.cu | #include "mpi.h"
// #include <bits/stdc++.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
using namespace std;
#define left 0
#define right 1
#define front 2
#define back 3
#define down 4
#define up 5
#define on_x 0
#define on_y 1
#define on_z 2
const int NDIM = 3;
const int NDIM_2 = 6;
int id, ib, jb, kb;
// int dimensions[NDIM];
int npx, npy, npz;
int blocks[NDIM];
double l[NDIM];
double u[NDIM_2];
string filename;
double eps, u0;
double hx, hy, hz;
__constant__ int g_dimensions[3];
// Индексация внутри блока
#define _i(i, j, k) (((k) + 1) * (npy + 2) * (npx + 2) + ((j) + 1) * (npx + 2) + (i) + 1)
#define _iz(id) (((id) / (npx + 2) / (npy + 2)) - 1)
#define _iy(id) ((((id) % ((npx + 2) * (npy + 2))) / (npx + 2)) - 1)
#define _ix(id) ((id) % (npx + 2) - 1)
// Индексация по блокам (процессам)
#define _ib(i, j, k) ((k) * blocks[on_y] * blocks[on_x] + (j) * blocks[on_x] + (i))
#define _ibz(id) ((id) / blocks[on_x] / blocks[on_y])
#define _iby(id) (((id) % (blocks[on_x] * blocks[on_y])) / blocks[on_x])
#define _ibx(id) ((id) % blocks[on_x])
#define CUDA_ERROR(err) { \
if (err != cudaSuccess) { \
fprintf(stderr, "ERROR: CUDA failed in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
return(1); \
} \
} \
__device__ int _ind(int i, int j, int k) {
return (((k) + 1) * (g_dimensions[1] + 2) * (g_dimensions[0] + 2) + ((j) + 1) * (g_dimensions[0] + 2) + (i) + 1);
}
__global__ void kernel_copy_edge_xy(double* edge_xy, double* data, int nx, int ny, int nz, int k, bool flag, double u) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int i, j;
if (flag) {
for (i = idx; i < nx; i += offsetX)
for (j = idy; j < ny; j += offsetY)
edge_xy[i + j * nx] = data[_ind(i, j, k)];
} else {
if (edge_xy) {
for (i = idx; i < nx; i += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = edge_xy[i + j * nx];
} else {
for (i = idx; i < nx; i += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = u;
}
}
}
__global__ void kernel_copy_edge_xz(double* edge_xz, double* data, int nx, int ny, int nz, int j, bool flag, double u) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int i, k;
if (flag) {
for (i = idx; i < nx; i += offsetX)
for (k = idy; k < nz; k += offsetY)
edge_xz[i + k * nx] = data[_ind(i, j, k)];
} else {
if (edge_xz) {
for (i = idx; i < nx; i += offsetX)
for (k = idy; k < nz; k += offsetY)
data[_ind(i, j, k)] = edge_xz[i + k * nx];
} else {
for (i = idx; i < nx; i += offsetX)
for (k = idy; k < nz; k += offsetY)
data[_ind(i, j, k)] = u;
}
}
}
__global__ void kernel_copy_edge_yz(double* edge_yz, double* data, int nx, int ny, int nz, int i, bool flag, double u) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int j, k;
if (flag) {
for (k = idx; k < nz; k += offsetX)
for (j = idy; j < ny; j += offsetY)
edge_yz[j + k * ny] = data[_ind(i, j, k)];
} else {
if (edge_yz) {
for (k = idx; k < nz; k += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = edge_yz[j + k * ny];
} else {
for (k = idx; k < nz; k += offsetX)
for (j = idy; j < ny; j += offsetY)
data[_ind(i, j, k)] = u;
}
}
}
__global__ void kernel_computation(double* next, double* data, int nx, int ny, int nz, double hx, double hy, double hz, double divisor) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idz = blockIdx.z * blockDim.z + threadIdx.z;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int offsetZ = blockDim.z * gridDim.z;
int i, j, k;
for (i = idx; i < nx; i += offsetX) {
for (j = idy; j < ny; j += offsetY) {
for (k = idz; k < nz; k += offsetZ) {
next[_ind(i, j, k)] = ((data[_ind(i - 1, j, k)] + data[_ind(i + 1, j, k)]) * hx + \
(data[_ind(i, j - 1, k)] + data[_ind(i, j + 1, k)]) * hy + \
(data[_ind(i, j, k - 1)] + data[_ind(i, j, k + 1)]) * hz) / \
divisor;
}
}
}
}
__global__ void kernel_error(double* next, double* data, double* diff, int nx, int ny, int nz) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idz = blockIdx.z * blockDim.z + threadIdx.z;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
int offsetZ = blockDim.z * gridDim.z;
int i, j, k;
for (i = idx - 1; i < nx + 1; i += offsetX) {
for (j = idy - 1; j < ny + 1; j += offsetY) {
for (k = idz - 1; k < nz + 1; k += offsetZ) {
diff[_ind(i, j, k)] = (i != -1 && j != -1 && k != -1 && i != nx && j != ny && k != nz) * abs(next[_ind(i, j, k)] - data[_ind(i, j, k)]);
}
}
}
}
int main(int argc, char *argv[]) {
ios_base::sync_with_stdio(false);
cin.tie(NULL);
cout.tie(NULL);
cout << fixed;
cout.precision(7);
// Input
int i, j, k;
double *data, *temp, *next;
double *edge_xy, *edge_xz, *edge_yz;
char proc_name[MPI_MAX_PROCESSOR_NAME];
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
int numproc, proc_name_len;
// MPI initialisation
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Get_processor_name(proc_name, &proc_name_len);
fprintf(stderr, "proc %2d(%d) on %s(%s)\n", id, numproc, proc_name, devProp.name);
fflush(stderr);
// int device_cnt;
// cudaGetDeviceCount(&device_cnt);
// cudaSetDevice(id % device_cnt);
if (id == 0) {
cin >> blocks[on_x] >> blocks[on_y] >> blocks[on_z];
cin >> npx >> npy >> npz;
cin >> filename;
cin >> eps;
cin >> l[on_x] >> l[on_y] >> l[on_z];
cin >> u[down] >> u[up];
cin >> u[left] >> u[right];
cin >> u[front] >> u[back];
cin >> u0;
}
// Передача параметров расчета всем процессам
// MPI_Bcast(dimensions, NDIM, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&npx, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&npy, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&npz, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(blocks, NDIM, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&eps, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(l, NDIM, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(u, NDIM_2, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&u0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// filename sending:
int filename_size = filename.size();
MPI_Bcast(&filename_size, 1, MPI_INT, 0, MPI_COMM_WORLD);
filename.resize(filename_size);
MPI_Bcast((char*) filename.c_str(), filename_size, MPI_CHAR, 0, MPI_COMM_WORLD);
if (blocks[on_x] * blocks[on_y] * blocks[on_z] * npx * npy * npz == 0) {
fprintf(stderr, "Error at proc %d on %s\n", id, proc_name);
if (blocks[on_x] * blocks[on_y] * blocks[on_z] != numproc) {
fprintf(stderr, "Dead because of blocks\n");
fprintf(stderr, "blocks[on_x]=%d, blocks[on_y]=%d, blocks[on_z]=%d, numproc=%d\n", blocks[on_x], blocks[on_y], blocks[on_z], numproc);
}
fflush(stderr);
MPI_Finalize();
return 0;
}
hx = l[on_x] / (double)(npx * blocks[on_x]);
hy = l[on_y] / (double)(npy * blocks[on_y]);
hz = l[on_z] / (double)(npz * blocks[on_z]);
// We need hx^2 hy^2 hz^2
double h2x = hx, h2y = hy, h2z = hz;
h2x *= hx;
h2y *= hy;
h2z *= hz;
// To a negative degree
h2x = 1.0 / h2x;
h2y = 1.0 / h2y;
h2z = 1.0 / h2z;
// Divisor as well
double divisor = 2 * (h2x + h2y + h2z);
// fprintf(stderr, "h2x=%f\n", h2x);
// fprintf(stderr, "h2y=%f\n", h2y);
// fprintf(stderr, "h2z=%f\n", h2z);
// fprintf(stderr, "divisor=%f\n", divisor);
// initiale bloks ids 3D
ib = _ibx(id);
jb = _iby(id);
kb = _ibz(id);
double* gpu_data, *gpu_next, *gpu_edge_xy, *gpu_edge_xz, *gpu_edge_yz;
CUDA_ERROR(cudaMalloc(&gpu_data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2)));
CUDA_ERROR(cudaMalloc(&gpu_next, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2)));
CUDA_ERROR(cudaMalloc(&gpu_edge_xy, sizeof(double) * npx * npy));
CUDA_ERROR(cudaMalloc(&gpu_edge_xz, sizeof(double) * npx * npz));
CUDA_ERROR(cudaMalloc(&gpu_edge_yz, sizeof(double) * npy * npz));
// Buffer initialisation
data = (double *)malloc(sizeof(double) * (npx + 2) * \
(npy + 2) * (npz + 2));
next = (double *)malloc(sizeof(double) * (npx + 2) * \
(npy + 2) * (npz + 2));
edge_xy = (double *)malloc(sizeof(double) * npx * npy);
edge_xz = (double *)malloc(sizeof(double) * npx * npz);
edge_yz = (double *)malloc(sizeof(double) * npy * npz);
CUDA_ERROR(cudaMemcpy(gpu_edge_xy, edge_xy, sizeof(double) * npx * npy, cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMemcpy(gpu_edge_xz, edge_xz, sizeof(double) * npx * npz, cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMemcpy(gpu_edge_yz, edge_yz, sizeof(double) * npy * npz, cudaMemcpyHostToDevice));
for (i = 0; i < npx; ++i) {
for (j = 0; j < npy; ++j) {
for (k = 0; k < npz; ++k) {
data[_i(i, j, k)] = u0;
// fprintf(stderr, "%e ", data[_i(i, j, k)]);
}
// fprintf(stderr, "\n");
}
}
// fflush(stderr);
CUDA_ERROR(cudaMemcpy(gpu_data, data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2), cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMemcpy(gpu_next, data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2), cudaMemcpyHostToDevice));
dim3 gblocks(32, 32);
dim3 threads(32, 32);
int dimensions[3];
dimensions[0] = npx;
dimensions[1] = npy;
dimensions[2] = npz;
CUDA_ERROR(cudaMemcpyToSymbol(g_dimensions, dimensions, 3 * sizeof(int)));
double difference = 0.0;
do {
if (ib + 1 < blocks[on_x]) {
kernel_copy_edge_yz<<<gblocks, threads>>>(gpu_edge_yz, gpu_data, npx, npy, npz, npx - 1, true, u0);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaMemcpy(edge_yz, gpu_edge_yz, sizeof(double) * npy * npz, cudaMemcpyDeviceToHost));
MPI_Send(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib + 1, jb, kb), 0, MPI_COMM_WORLD);
}
// Back
if (jb + 1 < blocks[on_y]) {
kernel_copy_edge_xz<<<gblocks, threads>>>(gpu_edge_xz, gpu_data, npx, npy, npz, npy - 1, true, u0);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaMemcpy(edge_xz, gpu_edge_xz, sizeof(double) * npx * npz, cudaMemcpyDeviceToHost));
MPI_Send(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb + 1, kb), 0, MPI_COMM_WORLD);
}
// Up
if (kb + 1 < blocks[on_z]) {
kernel_copy_edge_xy<<<gblocks, threads>>>(gpu_edge_xy, gpu_data, npx, npy, npz, npz - 1, true, u0);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaMemcpy(edge_xy, gpu_edge_xy, sizeof(double) * npx * npy, cudaMemcpyDeviceToHost));
MPI_Send(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb + 1), 0, MPI_COMM_WORLD);
}
// Data recieve
if (ib > 0) {
MPI_Recv(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib - 1, jb, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(cudaMemcpy(gpu_edge_yz, edge_yz, sizeof(double) * npy * npz, cudaMemcpyHostToDevice));
kernel_copy_edge_yz<<<gblocks, threads>>>(gpu_edge_yz, gpu_data, npx, npy, npz, - 1, false, u0);
} else {
kernel_copy_edge_yz<<<gblocks, threads>>>(NULL, gpu_data, npx, npy, npz, - 1, false, u[left]);
}
CUDA_ERROR(cudaGetLastError());
if (jb > 0) {
MPI_Recv(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb - 1, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(cudaMemcpy(gpu_edge_xz, edge_xz, sizeof(double) * npx * npz, cudaMemcpyHostToDevice));
kernel_copy_edge_xz<<<gblocks, threads>>>(gpu_edge_xz, gpu_data, npx, npy, npz, - 1, false, u0);
} else {
kernel_copy_edge_xz<<<gblocks, threads>>>(NULL, gpu_data, npx, npy, npz, - 1, false, u[front]);
}
CUDA_ERROR(cudaGetLastError());
if (kb > 0) {
MPI_Recv(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb - 1), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(cudaMemcpy(gpu_edge_xy, edge_xy, sizeof(double) * npx * npy, cudaMemcpyHostToDevice));
kernel_copy_edge_xy<<<gblocks, threads>>>(gpu_edge_xy, gpu_data, npx, npy, npz, - 1, false, u0);
} else {
kernel_copy_edge_xy<<<gblocks, threads>>>(NULL, gpu_data, npx, npy, npz, - 1, false, u[down]);
}
CUDA_ERROR(cudaGetLastError());
// Left
if (ib > 0) {
kernel_copy_edge_yz<<<gblocks, threads>>>(gpu_edge_yz, gpu_data, npx, npy, npz, 0, true, u0);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaMemcpy(edge_yz, gpu_edge_yz, sizeof(double) * npy * npz, cudaMemcpyDeviceToHost));
MPI_Send(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib - 1, jb, kb), 0, MPI_COMM_WORLD);
}
// Front
if (jb > 0) {
kernel_copy_edge_xz<<<gblocks, threads>>>(gpu_edge_xz, gpu_data, npx, npy, npz, 0, true, u0);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaMemcpy(edge_xz, gpu_edge_xz, sizeof(double) * npx * npz, cudaMemcpyDeviceToHost));
MPI_Send(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb - 1, kb), 0, MPI_COMM_WORLD);
}
// Down
if (kb > 0) {
kernel_copy_edge_xy<<<gblocks, threads>>>(gpu_edge_xy, gpu_data, npx, npy, npz, 0, true, u0);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaMemcpy(edge_xy, gpu_edge_xy, sizeof(double) * npx * npy, cudaMemcpyDeviceToHost));
MPI_Send(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb - 1), 0, MPI_COMM_WORLD);
}
// Data recieve
if (ib + 1 < blocks[on_x]) {
MPI_Recv(edge_yz, npy * npz, MPI_DOUBLE, _ib(ib + 1, jb, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(cudaMemcpy(gpu_edge_yz, edge_yz, sizeof(double) * npy * npz, cudaMemcpyHostToDevice));
kernel_copy_edge_yz<<<gblocks, threads>>>(gpu_edge_yz, gpu_data, npx, npy, npz, npx, false, u0);
} else {
kernel_copy_edge_yz<<<gblocks, threads>>>(NULL, gpu_data, npx, npy, npz, npx, false, u[right]);
}
CUDA_ERROR(cudaGetLastError());
if (jb + 1 < blocks[on_y]) {
MPI_Recv(edge_xz, npx * npz, MPI_DOUBLE, _ib(ib, jb + 1, kb), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(cudaMemcpy(gpu_edge_xz, edge_xz, sizeof(double) * npx * npz, cudaMemcpyHostToDevice));
kernel_copy_edge_xz<<<gblocks, threads>>>(gpu_edge_xz, gpu_data, npx, npy, npz, npy, false, u0);
} else {
kernel_copy_edge_xz<<<gblocks, threads>>>(NULL, gpu_data, npx, npy, npz, npy, false, u[back]);
}
CUDA_ERROR(cudaGetLastError());
if (kb + 1 < blocks[on_z]) {
MPI_Recv(edge_xy, npx * npy, MPI_DOUBLE, _ib(ib, jb, kb + 1), 0, MPI_COMM_WORLD, &status);
CUDA_ERROR(cudaMemcpy(gpu_edge_xy, edge_xy, sizeof(double) * npx * npy, cudaMemcpyHostToDevice));
kernel_copy_edge_xy<<<gblocks, threads>>>(gpu_edge_xy, gpu_data, npx, npy, npz, npz, false, u0);
} else {
kernel_copy_edge_xy<<<gblocks, threads>>>(NULL, gpu_data, npx, npy, npz, npz, false, u[up]);
}
CUDA_ERROR(cudaGetLastError());
cudaThreadSynchronize();
// Recomputation
kernel_computation<<<dim3(8, 8, 8), dim3(32, 4, 4)>>> (gpu_next, gpu_data, npx, npy, npz, h2x, h2y, h2z, divisor);
CUDA_ERROR(cudaGetLastError());
cudaThreadSynchronize();
// Error
double* gpu_difference;
CUDA_ERROR(cudaMalloc((void**)&gpu_difference, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2)));
kernel_error<<<dim3(8, 8, 8), dim3(32, 4, 4)>>> (gpu_next, gpu_data, gpu_difference, npx, npy, npz);
CUDA_ERROR(cudaGetLastError());
// fprintf(stderr, "Done gpu\n");
// fflush(stderr);
// Cast to thrust
thrust::device_ptr< double > pointers = thrust::device_pointer_cast(gpu_difference);
// Pointer of error
thrust::device_ptr< double > res = thrust::max_element(pointers, pointers + (npx + 2) * (npy + 2) * (npz + 2));
difference = 0.0;
double gpu_diff = 0.0;
// Get data from pointer
gpu_diff = *res;
temp = gpu_data;
gpu_data = gpu_next;
gpu_next = temp;
MPI_Allreduce(&gpu_diff, &difference, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
// fprintf(stderr, "difference=%f\n", difference);
// fflush(stderr);
CUDA_ERROR(cudaFree(gpu_difference));
} while (difference > eps);
fprintf(stderr, "Done computation\n");
fflush(stderr);
CUDA_ERROR(cudaMemcpy(data, gpu_data, sizeof(double) * (npx + 2) * (npy + 2) * (npz + 2), cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaFree(gpu_data));
CUDA_ERROR(cudaFree(gpu_next));
CUDA_ERROR(cudaFree(gpu_edge_xy));
CUDA_ERROR(cudaFree(gpu_edge_xz));
CUDA_ERROR(cudaFree(gpu_edge_yz));
// for (i = 0; i < dimensions[on_x]; ++i) {
// for (j = 0; j < dimensions[on_y]; ++j) {
// for (k = 0; k < dimensions[on_z]; ++k) {
// // data[_i(i, j, k)] = u0;
// fprintf(stderr, "%e ", data[_i(i, j, k)]);
// }
// fprintf(stderr, "\n");
// }
// }
// fprintf(stderr, "\n");
// fflush(stderr);
int buff_size = (npx + 2) * (npy + 2) * (npz + 2);
int new_symbol_size = 14;
// Allocate mem
char* buff = new char[buff_size * new_symbol_size];
memset(buff, (char)' ', buff_size * new_symbol_size * sizeof(char));
for (k = 0; k < dimensions[on_z]; ++k) {
for (j = 0; j < dimensions[on_y]; ++j) {
int len_new_symbol;
for (i = 0; i < dimensions[on_x] - 1; ++i) {
len_new_symbol = sprintf(&buff[_i(i, j, k) * new_symbol_size], "%.6e", data[_i(i, j, k)]);
// '\0' to ' ' (coz of new len_new_symbol)
if (len_new_symbol < new_symbol_size) {
buff[_i(i, j, k) * new_symbol_size + len_new_symbol] = ' ';
}
}
len_new_symbol = sprintf(&buff[_i(i, j, k) * new_symbol_size], "%.6e\n", data[_i(i, j, k)]);
if(len_new_symbol < new_symbol_size){
buff[_i(i, j, k) * new_symbol_size + len_new_symbol] = ' ';
}
}
}
/*
for(i = 0; i < buff_size * new_symbol_size; ++i) {
if (buff[i] == '\0') {
buff[i] = ' ';
}
fprintf(stderr, "%с ", buff[i]);
}
*/
fprintf(stderr, "Done writting\n");
fflush(stderr);
MPI_Datatype new_representation;
MPI_Datatype memtype;
MPI_Datatype filetype;
int sizes[NDIM], starts[NDIM], f_sizes[NDIM], f_starts[NDIM];
MPI_Type_contiguous(new_symbol_size, MPI_CHAR, &new_representation);
MPI_Type_commit(&new_representation);
// Sizes for memtype
sizes[on_x] = npx + 2;
sizes[on_y] = npy + 2;
sizes[on_z] = npz + 2;
starts[on_x] = starts[on_y] = starts[on_z] = 1;
// Sizes for filetype
f_sizes[on_x] = dimensions[on_x] * blocks[on_x];
f_sizes[on_y] = dimensions[on_y] * blocks[on_y];
f_sizes[on_z] = dimensions[on_z] * blocks[on_z];
f_starts[on_x] = dimensions[on_x] * ib;
f_starts[on_y] = dimensions[on_y] * jb;
f_starts[on_z] = dimensions[on_z] * kb;
// Writting types
// Memtype
MPI_Type_create_subarray(3, sizes, dimensions, starts, MPI_ORDER_FORTRAN, new_representation, &memtype);
MPI_Type_commit(&memtype);
// Filetype
MPI_Type_create_subarray(3, f_sizes, dimensions, f_starts, MPI_ORDER_FORTRAN, new_representation, &filetype);
MPI_Type_commit(&filetype);
fprintf(stderr, "Done creating\n");
fflush(stderr);
// Create and open file
MPI_File fp;
MPI_File_delete(filename.c_str(), MPI_INFO_NULL);
MPI_File_open(MPI_COMM_WORLD, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fp);
MPI_File_set_view(fp, 0, MPI_CHAR, filetype, "native", MPI_INFO_NULL);
MPI_File_write_all(fp, buff, 1, memtype, MPI_STATUS_IGNORE);
MPI_File_close(&fp);
fprintf(stderr, "Done writting in file\n");
fflush(stderr);
MPI_Finalize();
if (id == 0) {
fprintf(stderr, "%d %d %d\n", blocks[on_x], blocks[on_y], blocks[on_z]);
fprintf(stderr, "%d %d %d\n", npx, npy, npz);
fprintf(stderr, "%s\n", filename.c_str());
fprintf(stderr, "%f\n", eps);
fprintf(stderr, "%f %f %f\n", l[on_x], l[on_y], l[on_z]);
fprintf(stderr, "%f %f\n", u[down], u[up]);
fprintf(stderr, "%f %f\n", u[left], u[right]);
fprintf(stderr, "%f %f\n", u[front], u[back]);
fprintf(stderr, "%f\n", u0);
}
free(buff);
free(data);
free(next);
free(edge_xy);
free(edge_xz);
free(edge_yz);
return 0;
}
|
f20d508650e74b6968a4e9347f5088d2894f574c.hip | // !!! This is a file automatically generated by hipify!!!
#include "memBenchmark.h"
#include "termcolor.hpp"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
// NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <roctracer/roctx.h>
#include <cmath>
#include <cstdio>
#include <iomanip>
#include <iostream>
#include <numeric>
#include <random>
#include <string>
// Number of element to reduce
static const int n_elements = 8 * 1024 * 1024;
// Number of threads per block to use for all kernels
static const int threads = 256;
struct DIMS1D
{
int dimThreads;
int dimBlocks;
};
#define CUDA(call) do { \
hipError_t e = (call); \
if (e == hipSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, hipGetErrorString(e), e); \
exit(1); \
} while (0)
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
void printResults(double timeInMilliseconds, int iterations)
{
// print out the time required for the kernel to finish the transpose operation
double bandwidth = (iterations * 1e-9 * (double)(n_elements * sizeof(float)))
/ (timeInMilliseconds * 1e-3);
std::cout << "Elapsed Time for " << iterations << " runs = " << round(timeInMilliseconds) << "ms" << std::endl;
std::cout << termcolor::bold << termcolor::red << termcolor::on_white
<< "Bandwidth (GB/s) = " << std::setprecision(4) << bandwidth
<< termcolor::reset << std::endl;
std::cout.clear();
}
// Check errors
bool postprocess(const float *ref, const float *res, int n)
{
bool passed = true;
for(int i = 0; i < n; i++)
{
if (std::abs(res[i] - ref[i]) / n_elements > 1e-6)
{
std::cout.precision(6);
std::cout << "ID: " << i << " \t Res: " << res[i] << " \t Ref: " << ref[i] << std::endl;
std::cout << termcolor::blink << termcolor::white << termcolor::on_red << "*** FAILED ***" << termcolor::reset << std::endl;
passed = false;
break;
}
}
if(passed)
std::cout << termcolor::green << "Post process check passed!!" << termcolor::reset << std::endl;
return passed;
}
static float reduce_cpu(const float *data, int n)
{
float sum = 0;
for (int i = 0; i < n; i++)
sum += data[i];
return sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 0: Interleaved Addressing
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_stage0(const float* d_idata, float* d_odata, int n)
{
// Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ float smem[];
// Calculate 1D Index
int idx = 0;
// Copy input data to shared memory
// Note: Use block index for shared memory
// Also check for bounds
// Where do we need to put __syncthreads()? Do we need it at all?
// Reduce within block
// Start from c = 1, upto block size, each time doubling the offset
// Copy result of reduction to global memory
// Which index of d_odata do we write to?
// In which index of smem is the result stored?
// Do we need another syncthreads before writing to global memory?
// Use only one thread to write to global memory
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 1: Non-divergent Addressing
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
//
// The only difference between stage0 and stage1 is the reduction for loop
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_stage1(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory
// Exactly same as reduce_stage0
extern __shared__ float smem[];
int idx = 0;
// This is the part that differes from reduce_stage0
// Reduce within block with coalesced indexing pattern
// Change the for-loop to use indexing that reduces warp-divergence
// Copy result of reduction to global memory - Same as reduce_stage0
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 2: Warp Management without Bank Conflicts
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
//
// The only difference between stage1 and stage2 is the reduction for loop
// This time, we reduce start from blockDim.x and divide by 2 in each iteration
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_stage2(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory
// Exactly same as reduce_stage1
extern __shared__ float smem[];
int idx = 0;
// This is the part that differes from reduce_stage1
// Reduce within block with coalesced indexing pattern and avoid bank conflicts
// Change the for-loop to use indexing that reduces warp-divergence
// Start from blockDim.x / 2 and divide by 2 until we hit 1
// Copy result of reduction to global memory - Same as reduce_stage1
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 3: Add During Load, Use tile to reduce number of blocks
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
// stage3_TILE : Tiling factor
//
// In this kernel, we will add on load when copying data into shared memory
// The difference between stage3 and stage2 is how we load data into shared memory
// Each block does work of stage3_TILE * blockDim.x elements
////////////////////////////////////////////////////////////////////////////////
const int stage3_TILE = 2;
__global__ void reduce_stage3(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory
extern __shared__ float smem[];
// Calculate 1D index similar to stage2, but multiply by stage3_TILE
int idx = 0;
// Copy input data to shared memory. Add on load.
// Reduce the block same as reduce_stage2
//Copy result of reduction to global memory - Same as reduce_stage2
}
// warpReduce function for reduce_stag4 that reduces 2 warps into one value
__device__ void warpReduce(volatile float* smem, int tid)
{
//Write code for warp reduce here
smem[tid] += smem[tid + 32];
smem[tid] += smem[tid + 16];
smem[tid] += smem[tid + 8 ];
smem[tid] += smem[tid + 4 ];
smem[tid] += smem[tid + 2 ];
smem[tid] += smem[tid + 1 ];
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 4: Warp Loop Unrolling
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
// stage4_TILE : Tiling factor - How does tuning this change performance?
//
// The reduce_stage4 kernel improves on reduce_stage3 by unrolling the block
// reduction by unrolling the loop that operates within a warp.
// Each block does work of stage4_TILE * blockDim.x elements
//
// This kernel also uses the warpReduce device function above
////////////////////////////////////////////////////////////////////////////////
const int stage4_TILE = 2;
__global__ void reduce_stage4(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and
// Copy input data and add on load into shared memory
// Exactly same as reduce_stage3. Use stage4_TILE instead of stage3_TILE.
extern __shared__ float smem[];
int idx = 0;
// Reduce within block with coalesced indexing pattern and avoid bank conflicts
// Split the block reduction into 2 parts.
// Part 1 is the same as reduce stage3, but only for c > 32
// Part 2 then uses the warpReduce function to reduce the 2 warps
// The reason we stop the previous loop at c > 32 is because
// warpReduce can reduce 2 warps only 1 warp
// Copy result of reduction to global memory - Same as reduce_stage3
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 5: Completely unrolled blocks using templates
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
// stage5_TILE : Tiling factor - How does tuning this change performance?
//
// The reduce_stage5 kernel is the same as reduce_stage4 except part 1 of block reduction
// We simply unroll the entire for loop into individual statements wrapper by if conditions
// Why do we need to use templates? How do they improve performance?
// Each block does work of stage5_TILE * blockDim.x elements
//
// This kernel also uses the warpReduce device function above
////////////////////////////////////////////////////////////////////////////////
const int stage5_TILE = 2;
template<unsigned int blockSize>
__global__ void reduce_stage5(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and
// Copy input data and add on load into shared memory
// Exactly same as reduce_stage4. Use stage5_TILE instead of stage4_TILE.
// Use #pragma unroll around the load loop
extern __shared__ float smem[];
int idx = 0;
// Store the threadIdx.x in a register
int tid = threadIdx.x;
// Reduce the block using the same part1 and part2 split that we used in reduce_stage4
// Except, here write explicit statements instead of the for loops
// Part 2 is the same as reduce_stage4
// Copy result of reduction to global memory - Same as reduce_stage4
}
int main()
{
// Calculate bytes needed for input
const unsigned bytes = n_elements * sizeof(float);
// Allocate memory and initialize elements
// Let's use pinned memory for host
float *h_idata;
CUDA(hipHostMalloc((void**)&h_idata, bytes));
// Fill random values into the host array
{
std::random_device randomDevice;
std::mt19937 generator(randomDevice());
std::uniform_real_distribution<float> distribution(-1, 1);
for (int i = 0; i < n_elements; i++) {
h_idata[i] = distribution(generator);
}
}
// Copy input data into device memory
float *d_idata = NULL;
CUDA(hipMalloc((void **)&d_idata, bytes));
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
// Compute Gold Standard using CPU
const float gold_result = reduce_cpu(h_idata, n_elements);
// Create CUDA events for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***CPU Reduce***" << std::endl;
{
nvtxRangeId_t range = roctxRangeStart("CPU Reduce");
float cpu_result = 0;
int iterations = 100;
// start the timer
Timer hTimer;
nvtxRangeId_t rangeBenchmark = roctxRangeStart("CPU Reduce Benchmark");
for(int k = 0; k < iterations; k++)
{
cpu_result = reduce_cpu(h_idata, n_elements);
}
roctxRangeStop(rangeBenchmark);
// stop the timer
double time = hTimer.elapsed() * 1000; //ms
if(postprocess(&cpu_result, &gold_result, 1))
printResults(time, iterations);
roctxRangeStop(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 0***" << std::endl;
{
nvtxRangeId_t range = roctxRangeStart("Reduction Stage 0");
//Calculate Threads per block and total blocks required
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// Copy input data to device
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
hipLaunchKernelGGL(( reduce_stage0), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(dims.dimBlocks * sizeof(float));
CUDA(hipMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), hipMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 0 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(hipEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(( reduce_stage0), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
hipMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), hipMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
roctxRangeStop(rangeBenchmark);
float time_ms;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
hipFree(d_odata);
roctxRangeStop(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 1***" << std::endl;
{
nvtxRangeId_t range = roctxRangeStart("Reduction Stage 1");
//Calculate Threads per block and total blocks required
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// Copy input data to device
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
hipLaunchKernelGGL(( reduce_stage1), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 1 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(hipEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(( reduce_stage1), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
roctxRangeStop(rangeBenchmark);
float time_ms;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
hipFree(d_odata);
roctxRangeStop(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 2***" << std::endl;
{
nvtxRangeId_t range = roctxRangeStart("Reduction Stage 2");
//Calculate Threads per block and total blocks required
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// Copy input data to device
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
hipLaunchKernelGGL(( reduce_stage2), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 2 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(hipEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(( reduce_stage2), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
roctxRangeStop(rangeBenchmark);
float time_ms;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
hipFree(d_odata);
roctxRangeStop(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 3***" << std::endl;
{
nvtxRangeId_t range = roctxRangeStart("Reduction Stage 3");
// Calculate Threads per block and total blocks required
// Use stage3_TILE in your grid calculation
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads * stage3_TILE);
// Copy input data to device
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
hipLaunchKernelGGL(( reduce_stage3), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 3 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(hipEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(( reduce_stage3), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
roctxRangeStop(rangeBenchmark);
float time_ms;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
hipFree(d_odata);
roctxRangeStop(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 4***" << std::endl;
{
nvtxRangeId_t range = roctxRangeStart("Reduction Stage 4");
// Calculate Threads per block and total blocks required
// Use stage4_TILE in your grid calculation
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads * stage4_TILE);
// Copy input data to device
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
hipLaunchKernelGGL(( reduce_stage4), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 4 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(hipEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(( reduce_stage4), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
roctxRangeStop(rangeBenchmark);
float time_ms;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
hipFree(d_odata);
roctxRangeStop(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 5***" << std::endl;
{
nvtxRangeId_t range = roctxRangeStart("Reduction Stage 5");
// Calculate Threads per block and total blocks required
// Use stage5_TILE in your grid calculation
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads * stage5_TILE);
// Copy input data to device
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(hipMalloc((void**)&d_odata, block_bytes));
CUDA(hipMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
// Don't forget to add the template
hipLaunchKernelGGL(( reduce_stage5<threads>), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = roctxRangeStart("Reduction Stage 5 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(hipEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(( reduce_stage5<threads>), dim3(dims.dimBlocks), dim3(dims.dimThreads), sizeof(float) * dims.dimThreads, 0, d_idata, d_odata, n_elements);
hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
roctxRangeStop(rangeBenchmark);
float time_ms;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
hipFree(d_odata);
roctxRangeStop(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
// Cleanup
CUDA(hipEventDestroy(start));
CUDA(hipEventDestroy(stop));
CUDA(hipHostFree(h_idata));
CUDA(hipFree(d_idata));
return 0;
}
| f20d508650e74b6968a4e9347f5088d2894f574c.cu | #include "memBenchmark.h"
#include "termcolor.hpp"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <nvToolsExt.h>
#include <cmath>
#include <cstdio>
#include <iomanip>
#include <iostream>
#include <numeric>
#include <random>
#include <string>
// Number of element to reduce
static const int n_elements = 8 * 1024 * 1024;
// Number of threads per block to use for all kernels
static const int threads = 256;
struct DIMS1D
{
int dimThreads;
int dimBlocks;
};
#define CUDA(call) do { \
cudaError_t e = (call); \
if (e == cudaSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, cudaGetErrorString(e), e); \
exit(1); \
} while (0)
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
void printResults(double timeInMilliseconds, int iterations)
{
// print out the time required for the kernel to finish the transpose operation
double bandwidth = (iterations * 1e-9 * (double)(n_elements * sizeof(float)))
/ (timeInMilliseconds * 1e-3);
std::cout << "Elapsed Time for " << iterations << " runs = " << round(timeInMilliseconds) << "ms" << std::endl;
std::cout << termcolor::bold << termcolor::red << termcolor::on_white
<< "Bandwidth (GB/s) = " << std::setprecision(4) << bandwidth
<< termcolor::reset << std::endl;
std::cout.clear();
}
// Check errors
bool postprocess(const float *ref, const float *res, int n)
{
bool passed = true;
for(int i = 0; i < n; i++)
{
if (std::abs(res[i] - ref[i]) / n_elements > 1e-6)
{
std::cout.precision(6);
std::cout << "ID: " << i << " \t Res: " << res[i] << " \t Ref: " << ref[i] << std::endl;
std::cout << termcolor::blink << termcolor::white << termcolor::on_red << "*** FAILED ***" << termcolor::reset << std::endl;
passed = false;
break;
}
}
if(passed)
std::cout << termcolor::green << "Post process check passed!!" << termcolor::reset << std::endl;
return passed;
}
static float reduce_cpu(const float *data, int n)
{
float sum = 0;
for (int i = 0; i < n; i++)
sum += data[i];
return sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 0: Interleaved Addressing
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_stage0(const float* d_idata, float* d_odata, int n)
{
// Dynamic allocation of shared memory - See kernel call in host code
extern __shared__ float smem[];
// Calculate 1D Index
int idx = 0;
// Copy input data to shared memory
// Note: Use block index for shared memory
// Also check for bounds
// Where do we need to put __syncthreads()? Do we need it at all?
// Reduce within block
// Start from c = 1, upto block size, each time doubling the offset
// Copy result of reduction to global memory
// Which index of d_odata do we write to?
// In which index of smem is the result stored?
// Do we need another syncthreads before writing to global memory?
// Use only one thread to write to global memory
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 1: Non-divergent Addressing
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
//
// The only difference between stage0 and stage1 is the reduction for loop
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_stage1(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory
// Exactly same as reduce_stage0
extern __shared__ float smem[];
int idx = 0;
// This is the part that differes from reduce_stage0
// Reduce within block with coalesced indexing pattern
// Change the for-loop to use indexing that reduces warp-divergence
// Copy result of reduction to global memory - Same as reduce_stage0
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 2: Warp Management without Bank Conflicts
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
//
// The only difference between stage1 and stage2 is the reduction for loop
// This time, we reduce start from blockDim.x and divide by 2 in each iteration
////////////////////////////////////////////////////////////////////////////////
__global__ void reduce_stage2(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and Copy input data into shared memory
// Exactly same as reduce_stage1
extern __shared__ float smem[];
int idx = 0;
// This is the part that differes from reduce_stage1
// Reduce within block with coalesced indexing pattern and avoid bank conflicts
// Change the for-loop to use indexing that reduces warp-divergence
// Start from blockDim.x / 2 and divide by 2 until we hit 1
// Copy result of reduction to global memory - Same as reduce_stage1
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 3: Add During Load, Use tile to reduce number of blocks
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
// stage3_TILE : Tiling factor
//
// In this kernel, we will add on load when copying data into shared memory
// The difference between stage3 and stage2 is how we load data into shared memory
// Each block does work of stage3_TILE * blockDim.x elements
////////////////////////////////////////////////////////////////////////////////
const int stage3_TILE = 2;
__global__ void reduce_stage3(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory
extern __shared__ float smem[];
// Calculate 1D index similar to stage2, but multiply by stage3_TILE
int idx = 0;
// Copy input data to shared memory. Add on load.
// Reduce the block same as reduce_stage2
//Copy result of reduction to global memory - Same as reduce_stage2
}
// warpReduce function for reduce_stag4 that reduces 2 warps into one value
__device__ void warpReduce(volatile float* smem, int tid)
{
//Write code for warp reduce here
smem[tid] += smem[tid + 32];
smem[tid] += smem[tid + 16];
smem[tid] += smem[tid + 8 ];
smem[tid] += smem[tid + 4 ];
smem[tid] += smem[tid + 2 ];
smem[tid] += smem[tid + 1 ];
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 4: Warp Loop Unrolling
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
// stage4_TILE : Tiling factor - How does tuning this change performance?
//
// The reduce_stage4 kernel improves on reduce_stage3 by unrolling the block
// reduction by unrolling the loop that operates within a warp.
// Each block does work of stage4_TILE * blockDim.x elements
//
// This kernel also uses the warpReduce device function above
////////////////////////////////////////////////////////////////////////////////
const int stage4_TILE = 2;
__global__ void reduce_stage4(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and
// Copy input data and add on load into shared memory
// Exactly same as reduce_stage3. Use stage4_TILE instead of stage3_TILE.
extern __shared__ float smem[];
int idx = 0;
// Reduce within block with coalesced indexing pattern and avoid bank conflicts
// Split the block reduction into 2 parts.
// Part 1 is the same as reduce stage3, but only for c > 32
// Part 2 then uses the warpReduce function to reduce the 2 warps
// The reason we stop the previous loop at c > 32 is because
// warpReduce can reduce 2 warps only 1 warp
// Copy result of reduction to global memory - Same as reduce_stage3
}
////////////////////////////////////////////////////////////////////////////////
// Reduction Stage 5: Completely unrolled blocks using templates
// d_idata : Device pointer to input
// d_odata : Device pointer to output
// n : Number of elements to reduce
// stage5_TILE : Tiling factor - How does tuning this change performance?
//
// The reduce_stage5 kernel is the same as reduce_stage4 except part 1 of block reduction
// We simply unroll the entire for loop into individual statements wrapper by if conditions
// Why do we need to use templates? How do they improve performance?
// Each block does work of stage5_TILE * blockDim.x elements
//
// This kernel also uses the warpReduce device function above
////////////////////////////////////////////////////////////////////////////////
const int stage5_TILE = 2;
template<unsigned int blockSize>
__global__ void reduce_stage5(const float* d_idata, float* d_odata, int n)
{
// Allocate dynamic shared memory, Calculate 1D Index and
// Copy input data and add on load into shared memory
// Exactly same as reduce_stage4. Use stage5_TILE instead of stage4_TILE.
// Use #pragma unroll around the load loop
extern __shared__ float smem[];
int idx = 0;
// Store the threadIdx.x in a register
int tid = threadIdx.x;
// Reduce the block using the same part1 and part2 split that we used in reduce_stage4
// Except, here write explicit statements instead of the for loops
// Part 2 is the same as reduce_stage4
// Copy result of reduction to global memory - Same as reduce_stage4
}
int main()
{
// Calculate bytes needed for input
const unsigned bytes = n_elements * sizeof(float);
// Allocate memory and initialize elements
// Let's use pinned memory for host
float *h_idata;
CUDA(cudaMallocHost((void**)&h_idata, bytes));
// Fill random values into the host array
{
std::random_device randomDevice;
std::mt19937 generator(randomDevice());
std::uniform_real_distribution<float> distribution(-1, 1);
for (int i = 0; i < n_elements; i++) {
h_idata[i] = distribution(generator);
}
}
// Copy input data into device memory
float *d_idata = NULL;
CUDA(cudaMalloc((void **)&d_idata, bytes));
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
// Compute Gold Standard using CPU
const float gold_result = reduce_cpu(h_idata, n_elements);
// Create CUDA events for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***CPU Reduce***" << std::endl;
{
nvtxRangeId_t range = nvtxRangeStart("CPU Reduce");
float cpu_result = 0;
int iterations = 100;
// start the timer
Timer hTimer;
nvtxRangeId_t rangeBenchmark = nvtxRangeStart("CPU Reduce Benchmark");
for(int k = 0; k < iterations; k++)
{
cpu_result = reduce_cpu(h_idata, n_elements);
}
nvtxRangeEnd(rangeBenchmark);
// stop the timer
double time = hTimer.elapsed() * 1000; //ms
if(postprocess(&cpu_result, &gold_result, 1))
printResults(time, iterations);
nvtxRangeEnd(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 0***" << std::endl;
{
nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 0");
//Calculate Threads per block and total blocks required
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// Copy input data to device
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
reduce_stage0<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(dims.dimBlocks * sizeof(float));
CUDA(cudaMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), cudaMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 0 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(cudaEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
reduce_stage0<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
cudaMemcpy(h_blocks, d_odata, dims.dimBlocks * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
nvtxRangeEnd(rangeBenchmark);
float time_ms;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
cudaFree(d_odata);
nvtxRangeEnd(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 1***" << std::endl;
{
nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 1");
//Calculate Threads per block and total blocks required
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// Copy input data to device
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
reduce_stage1<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 1 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(cudaEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
reduce_stage1<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
nvtxRangeEnd(rangeBenchmark);
float time_ms;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
cudaFree(d_odata);
nvtxRangeEnd(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 2***" << std::endl;
{
nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 2");
//Calculate Threads per block and total blocks required
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads);
// Copy input data to device
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
reduce_stage2<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 2 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(cudaEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
reduce_stage2<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
nvtxRangeEnd(rangeBenchmark);
float time_ms;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
cudaFree(d_odata);
nvtxRangeEnd(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 3***" << std::endl;
{
nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 3");
// Calculate Threads per block and total blocks required
// Use stage3_TILE in your grid calculation
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads * stage3_TILE);
// Copy input data to device
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
reduce_stage3<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 3 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(cudaEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
reduce_stage3<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
nvtxRangeEnd(rangeBenchmark);
float time_ms;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
cudaFree(d_odata);
nvtxRangeEnd(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 4***" << std::endl;
{
nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 4");
// Calculate Threads per block and total blocks required
// Use stage4_TILE in your grid calculation
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads * stage4_TILE);
// Copy input data to device
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
reduce_stage4<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 4 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(cudaEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
reduce_stage4<<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
nvtxRangeEnd(rangeBenchmark);
float time_ms;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
cudaFree(d_odata);
nvtxRangeEnd(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "******************************************" << std::endl;
std::cout << "***Reduction Stage 5***" << std::endl;
{
nvtxRangeId_t range = nvtxRangeStart("Reduction Stage 5");
// Calculate Threads per block and total blocks required
// Use stage5_TILE in your grid calculation
DIMS1D dims;
dims.dimThreads = threads;
dims.dimBlocks = divup(n_elements, dims.dimThreads * stage5_TILE);
// Copy input data to device
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
// Calculate bytes needed for output
size_t block_bytes = dims.dimBlocks * sizeof(float);
// Allocate memory for output on device
float *d_odata = NULL;
CUDA(cudaMalloc((void**)&d_odata, block_bytes));
CUDA(cudaMemset(d_odata, 0, block_bytes));
// Call the kernel. Allocate dynamic shared memory
// Don't forget to add the template
reduce_stage5<threads><<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
// Copy result of block reduce to CPU and run CPU reduce
float *h_blocks = (float *)malloc(block_bytes);
CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost));
// Secondary reduce on CPU
float gpu_result = 0;
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
// Check the result and then run the benchmark.
if(postprocess(&gpu_result, &gold_result, 1))
{
nvtxRangeId_t rangeBenchmark = nvtxRangeStart("Reduction Stage 5 Benchmark");
//Start Benchmark
int iterations = 100;
CUDA(cudaEventRecord(start, 0));
// Run multiple times for a good benchmark
for(int i = 0; i < iterations; i++)
{
reduce_stage5<threads><<<dims.dimBlocks, dims.dimThreads, sizeof(float) * dims.dimThreads>>>(d_idata, d_odata, n_elements);
cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < dims.dimBlocks; i++)
gpu_result += h_blocks[i];
}
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
nvtxRangeEnd(rangeBenchmark);
float time_ms;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
printResults(time_ms, iterations);
}
// Cleanup
free(h_blocks);
cudaFree(d_odata);
nvtxRangeEnd(range);
}
std::cout << "******************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
// Cleanup
CUDA(cudaEventDestroy(start));
CUDA(cudaEventDestroy(stop));
CUDA(cudaFreeHost(h_idata));
CUDA(cudaFree(d_idata));
return 0;
}
|
81d476579fc1d35fc43a939267497087a33e8e5c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "./polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 16384
#define NY 16384
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
/*
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
*/
hipStream_t streams[1];
hipStreamCreate(&(streams[0]));
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
/*
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice);
hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, hipMemcpyHostToDevice);
hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, hipMemcpyHostToDevice);
*/
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
hipMemPrefetchAsync(A, sizeof(DATA_TYPE) * NX * NY, 0,streams[0]);
hipLaunchKernelGGL(( atax_kernel1), dim3(grid1), dim3(block) , 0, 0, A,x,tmp);
hipDeviceSynchronize();
hipLaunchKernelGGL(( atax_kernel2), dim3(grid2), dim3(block) , 0, 0, A,y,tmp);
hipDeviceSynchronize();
//hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, hipMemcpyDeviceToHost);
hipMemPrefetchAsync(y, sizeof(DATA_TYPE) * NX, hipCpuDeviceId,streams[0]);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
/*
hipFree(A_gpu);
hipFree(x_gpu);
hipFree(y_gpu);
hipFree(tmp_gpu);
*/
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
/*
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
*/
/*
hipHostMalloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, hipHostMallocPortable);
hipHostMalloc((void **)&x, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&y, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, hipHostMallocPortable);
hipHostMalloc((void **)&tmp, sizeof(DATA_TYPE) * NX, hipHostMallocPortable);
*/
hipMallocManaged((void **)&A, sizeof(DATA_TYPE) * NX * NY);
hipMallocManaged((void **)&x, sizeof(DATA_TYPE) * NY);
hipMallocManaged((void **)&y, sizeof(DATA_TYPE) * NY);
hipMallocManaged((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY);
hipMallocManaged((void **)&tmp, sizeof(DATA_TYPE) * NX);
init_array(x, A);
GPU_argv_init();
ataxGpu(A, x, y, tmp, y_outputFromGpu);
/*
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
*/
hipFree(A);
hipFree(x);
hipFree(y);
hipFree(y_outputFromGpu);
hipFree(tmp);
return 0;
}
| 81d476579fc1d35fc43a939267497087a33e8e5c.cu | /**
* atax.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "./polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size. */
#define NX 16384
#define NY 16384
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#ifndef M_PI
#define M_PI 3.14159
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *x, DATA_TYPE *A)
{
int i, j;
for (i = 0; i < NX; i++)
{
x[i] = i * M_PI;
for (j = 0; j < NY; j++)
{
A[i*NY + j] = ((DATA_TYPE) i*(j)) / NX;
}
}
}
void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<NY; i++)
{
if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void atax_kernel1(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NX)
{
int j;
for(j=0; j < NY; j++)
{
tmp[i] += A[i * NY + j] * x[j];
}
}
}
__global__ void atax_kernel2(DATA_TYPE *A, DATA_TYPE *y, DATA_TYPE *tmp)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < NY)
{
int i;
for(i=0; i < NX; i++)
{
y[j] += A[i * NY + j] * tmp[i];
}
}
}
void atax_cpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp)
{
int i,j;
for (i= 0; i < NY; i++)
{
y[i] = 0;
}
for (i = 0; i < NX; i++)
{
tmp[i] = 0;
for (j = 0; j < NY; j++)
{
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++)
{
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
void ataxGpu(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
/*
DATA_TYPE *A_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * NY);
cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * NX);
*/
cudaStream_t streams[1];
cudaStreamCreate(&(streams[0]));
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
/*
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice);
cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * NY, cudaMemcpyHostToDevice);
cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * NX, cudaMemcpyHostToDevice);
*/
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NX) / ((float)block.x) )), 1);
dim3 grid2((size_t)(ceil( ((float)NY) / ((float)block.x) )), 1);
cudaMemPrefetchAsync(A, sizeof(DATA_TYPE) * NX * NY, 0,streams[0]);
atax_kernel1<<< grid1, block >>>(A,x,tmp);
cudaThreadSynchronize();
atax_kernel2<<< grid2, block >>>(A,y,tmp);
cudaThreadSynchronize();
//cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * NX, cudaMemcpyDeviceToHost);
cudaMemPrefetchAsync(y, sizeof(DATA_TYPE) * NX, cudaCpuDeviceId,streams[0]);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
/*
cudaFree(A_gpu);
cudaFree(x_gpu);
cudaFree(y_gpu);
cudaFree(tmp_gpu);
*/
}
int main(int argc, char** argv)
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
/*
A = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(NY*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(NX*sizeof(DATA_TYPE));
*/
/*
cudaHostAlloc((void **)&A, sizeof(DATA_TYPE) * NX * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&x, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&y, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY, cudaHostAllocPortable);
cudaHostAlloc((void **)&tmp, sizeof(DATA_TYPE) * NX, cudaHostAllocPortable);
*/
cudaMallocManaged((void **)&A, sizeof(DATA_TYPE) * NX * NY);
cudaMallocManaged((void **)&x, sizeof(DATA_TYPE) * NY);
cudaMallocManaged((void **)&y, sizeof(DATA_TYPE) * NY);
cudaMallocManaged((void **)&y_outputFromGpu, sizeof(DATA_TYPE) * NY);
cudaMallocManaged((void **)&tmp, sizeof(DATA_TYPE) * NX);
init_array(x, A);
GPU_argv_init();
ataxGpu(A, x, y, tmp, y_outputFromGpu);
/*
t_start = rtclock();
atax_cpu(A, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
*/
cudaFree(A);
cudaFree(x);
cudaFree(y);
cudaFree(y_outputFromGpu);
cudaFree(tmp);
return 0;
}
|
487062f9af55bb656978590f54a967c21a015d19.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.hu"
#include <cmath>
#define PI 3.141592654f
float calc_w(float r, float n)
{
return (- (2 * PI * r) / n);
}
float bluestein(hipEvent_t start, hipEvent_t stop,
DATA_TYPE *dev_data, DATA_TYPE *dev_middle)
{
float time;
size_t N = DATA_SIZE;
size_t M = pow(2.0, ceil(log2((double)(N - 1)) + 1));
hipfftHandle plan;
cufftCheckReturn(hipfftCreate(&plan));
long long len = M;
size_t ws = 0;
cufftCheckReturn(
cufftXtMakePlanMany(
plan, 1, &len,
NULL, 1, 1, HIP_C_32F,
NULL, 1, 1, HIP_C_32F,
1, &ws, HIP_C_32F));
cudaCheckReturn(hipDeviceSynchronize());
cudaCheckReturn(hipEventRecord(start));
DATA_TYPE *h, *hh, *x, *y, *CY, *hCZ, *CZ, *CX;
cudaCheckReturn(hipHostMalloc(&h, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&hh, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&x, N * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&y, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&CY, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&hCZ, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&CZ, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&CX, N * sizeof(DATA_TYPE)));
for (int l = 0; l <= N-1; l++) {
float p = calc_w((- 1 / (float)2) * (l * l), N);;
h[l].x = cosf(p);
h[l].y = sinf(p);
}
for (int l = M - N + 1; l <= M - 1; l++) {
h[l] = h[M - l];
}
for (int l = N; l <= M - N; l++) {
h[l].x = 0.f;
h[l].y = 0.f;
}
for (int r = 0; r <= M - 1; r++) {
hh[r].x = 0.f;
hh[r].y = 0.f;
for (int l = 0; l <= M - 1; l++) {
float p = calc_w(r * l, M);
hh[r].x += h[l].x * cosf(p) - h[l].y * sinf(p);
hh[r].y += h[l].x * sinf(p) + h[l].y * cosf(p);
}
}
cudaCheckReturn(hipMemcpy(x, dev_data, N * sizeof(DATA_TYPE),
hipMemcpyDeviceToHost));
for (int l = 0; l <= N - 1; l++) {
float p = calc_w((1 / (float)2) * l * l, N);
y[l].x = x[l].x * cosf(p) - x[l].y * sinf(p);
y[l].y = x[l].x * sinf(p) + x[l].y * cosf(p);
}
for (int l = N; l <= M - 1; l++) {
y[l].x = 0.f;
y[l].y = 0.f;
}
for (int r = 0; r <= M - 1; r++) {
CY[r].x = 0.f;
CY[r].y = 0.f;
for (int l = 0; l <= M - 1; l++) {
float p = calc_w(r * l, M);
CY[r].x += y[l].x * cosf(p) - y[l].y * sinf(p);
CY[r].y += y[l].x * sinf(p) + y[l].y * cosf(p);
}
}
for (int r = 0; r <= M - 1; r++) {
hCZ[r].x = CY[r].x * hh[r].x - CY[r].y * hh[r].y;
hCZ[r].y = CY[r].x * hh[r].y + CY[r].y * hh[r].x;
}
for (int r = 0; r <= M - 1; r++) {
CZ[r].x = 0.f;
CZ[r].y = 0.f;
for (int l = 0; l <= M - 1; l++) {
float p = calc_w(- r * l, M);
CZ[r].x += hCZ[l].x * cosf(p) - hCZ[l].y * sinf(p);
CZ[r].y += hCZ[l].x * sinf(p) + hCZ[l].y * cosf(p);
}
CZ[r].x /= M;
CZ[r].y /= M;
}
for (int r = 0; r <= N - 1; r++) {
float p = calc_w((1 / (float)2) * r * r, N);
CX[r].x = CZ[r].x * cosf(p) - CZ[r].y * sinf(p);
CX[r].y = CZ[r].x * sinf(p) + CZ[r].y * cosf(p);
}
cudaCheckReturn(hipMemcpy(dev_middle, CX, N * sizeof(DATA_TYPE),
hipMemcpyHostToDevice));
// cufftCheckReturn(cufftXtExec(plan, dev_data, dev_middle, HIPFFT_FORWARD));
cudaCheckReturn(hipEventRecord(stop));
cudaCheckReturn(hipEventSynchronize(stop));
cudaCheckReturn(hipEventElapsedTime(&time, start, stop));
return time;
}
std::vector<float> benchmark(DATA_TYPE *output,
DATA_TYPE *data,
hipEvent_t start, hipEvent_t stop)
{
DATA_TYPE *dev_output, *dev_middle, *dev_data, *middle;
std::vector<float> time(2);
size_t N = DATA_SIZE;
size_t M = pow(2.0, ceil(log2((double)(N - 1)) + 1));
/*
Setup
*/
cudaCheckReturn(hipHostMalloc(&middle, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMalloc(&dev_data, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMalloc(&dev_middle, M * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMalloc(&dev_output, N * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMemcpy(dev_data, data, N * sizeof(DATA_TYPE),
hipMemcpyHostToDevice));
hipfftHandle plan;
cufftCheckReturn(hipfftCreate(&plan));
long long len = N;
size_t ws = 0;
cufftCheckReturn(
cufftXtMakePlanMany(
plan, 1, &len,
NULL, 1, 1, HIP_C_32F,
NULL, 1, 1, HIP_C_32F,
1, &ws, HIP_C_32F));
/*
FFT
*/
time[0] = bluestein(start, stop, dev_data, dev_middle);
cudaCheckKernel();
/*
Scaling
*/
cudaCheckReturn(hipMemcpy(middle, dev_middle, N * sizeof(DATA_TYPE),
hipMemcpyDeviceToHost));
for (size_t i = 0; i < DATA_SIZE; i++) {
float2 m = middle[i];
m.x /= DATA_SIZE;
m.y /= DATA_SIZE;
middle[i] = m;
}
cudaCheckReturn(hipMemcpy(dev_middle, middle, N * sizeof(DATA_TYPE),
hipMemcpyHostToDevice));
/*
IFFT
*/
cudaCheckReturn(hipDeviceSynchronize());
cudaCheckReturn(hipEventRecord(start));
cufftCheckReturn(cufftXtExec(plan, dev_middle, dev_output, HIPFFT_BACKWARD));
cudaCheckReturn(hipEventRecord(stop));
cudaCheckReturn(hipEventSynchronize(stop));
cudaCheckKernel();
cudaCheckReturn(hipEventElapsedTime(&time[1], start, stop));
/*
Close
*/
cufftCheckReturn(hipfftDestroy(plan));
cudaCheckReturn(hipMemcpy(output, dev_output, N * sizeof(DATA_TYPE),
hipMemcpyDeviceToHost));
cudaCheckReturn(hipHostFree(middle));
cudaCheckReturn(hipFree(dev_output));
cudaCheckReturn(hipFree(dev_middle));
cudaCheckReturn(hipFree(dev_data));
return time;
}
| 487062f9af55bb656978590f54a967c21a015d19.cu | #include "common.hu"
#include <cmath>
#define PI 3.141592654f
float calc_w(float r, float n)
{
return (- (2 * PI * r) / n);
}
float bluestein(cudaEvent_t start, cudaEvent_t stop,
DATA_TYPE *dev_data, DATA_TYPE *dev_middle)
{
float time;
size_t N = DATA_SIZE;
size_t M = pow(2.0, ceil(log2((double)(N - 1)) + 1));
cufftHandle plan;
cufftCheckReturn(cufftCreate(&plan));
long long len = M;
size_t ws = 0;
cufftCheckReturn(
cufftXtMakePlanMany(
plan, 1, &len,
NULL, 1, 1, CUDA_C_32F,
NULL, 1, 1, CUDA_C_32F,
1, &ws, CUDA_C_32F));
cudaCheckReturn(cudaDeviceSynchronize());
cudaCheckReturn(cudaEventRecord(start));
DATA_TYPE *h, *hh, *x, *y, *CY, *hCZ, *CZ, *CX;
cudaCheckReturn(cudaMallocHost(&h, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&hh, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&x, N * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&y, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&CY, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&hCZ, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&CZ, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&CX, N * sizeof(DATA_TYPE)));
for (int l = 0; l <= N-1; l++) {
float p = calc_w((- 1 / (float)2) * (l * l), N);;
h[l].x = cosf(p);
h[l].y = sinf(p);
}
for (int l = M - N + 1; l <= M - 1; l++) {
h[l] = h[M - l];
}
for (int l = N; l <= M - N; l++) {
h[l].x = 0.f;
h[l].y = 0.f;
}
for (int r = 0; r <= M - 1; r++) {
hh[r].x = 0.f;
hh[r].y = 0.f;
for (int l = 0; l <= M - 1; l++) {
float p = calc_w(r * l, M);
hh[r].x += h[l].x * cosf(p) - h[l].y * sinf(p);
hh[r].y += h[l].x * sinf(p) + h[l].y * cosf(p);
}
}
cudaCheckReturn(cudaMemcpy(x, dev_data, N * sizeof(DATA_TYPE),
cudaMemcpyDeviceToHost));
for (int l = 0; l <= N - 1; l++) {
float p = calc_w((1 / (float)2) * l * l, N);
y[l].x = x[l].x * cosf(p) - x[l].y * sinf(p);
y[l].y = x[l].x * sinf(p) + x[l].y * cosf(p);
}
for (int l = N; l <= M - 1; l++) {
y[l].x = 0.f;
y[l].y = 0.f;
}
for (int r = 0; r <= M - 1; r++) {
CY[r].x = 0.f;
CY[r].y = 0.f;
for (int l = 0; l <= M - 1; l++) {
float p = calc_w(r * l, M);
CY[r].x += y[l].x * cosf(p) - y[l].y * sinf(p);
CY[r].y += y[l].x * sinf(p) + y[l].y * cosf(p);
}
}
for (int r = 0; r <= M - 1; r++) {
hCZ[r].x = CY[r].x * hh[r].x - CY[r].y * hh[r].y;
hCZ[r].y = CY[r].x * hh[r].y + CY[r].y * hh[r].x;
}
for (int r = 0; r <= M - 1; r++) {
CZ[r].x = 0.f;
CZ[r].y = 0.f;
for (int l = 0; l <= M - 1; l++) {
float p = calc_w(- r * l, M);
CZ[r].x += hCZ[l].x * cosf(p) - hCZ[l].y * sinf(p);
CZ[r].y += hCZ[l].x * sinf(p) + hCZ[l].y * cosf(p);
}
CZ[r].x /= M;
CZ[r].y /= M;
}
for (int r = 0; r <= N - 1; r++) {
float p = calc_w((1 / (float)2) * r * r, N);
CX[r].x = CZ[r].x * cosf(p) - CZ[r].y * sinf(p);
CX[r].y = CZ[r].x * sinf(p) + CZ[r].y * cosf(p);
}
cudaCheckReturn(cudaMemcpy(dev_middle, CX, N * sizeof(DATA_TYPE),
cudaMemcpyHostToDevice));
// cufftCheckReturn(cufftXtExec(plan, dev_data, dev_middle, CUFFT_FORWARD));
cudaCheckReturn(cudaEventRecord(stop));
cudaCheckReturn(cudaEventSynchronize(stop));
cudaCheckReturn(cudaEventElapsedTime(&time, start, stop));
return time;
}
std::vector<float> benchmark(DATA_TYPE *output,
DATA_TYPE *data,
cudaEvent_t start, cudaEvent_t stop)
{
DATA_TYPE *dev_output, *dev_middle, *dev_data, *middle;
std::vector<float> time(2);
size_t N = DATA_SIZE;
size_t M = pow(2.0, ceil(log2((double)(N - 1)) + 1));
/*
Setup
*/
cudaCheckReturn(cudaMallocHost(&middle, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMalloc(&dev_data, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMalloc(&dev_middle, M * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMalloc(&dev_output, N * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMemcpy(dev_data, data, N * sizeof(DATA_TYPE),
cudaMemcpyHostToDevice));
cufftHandle plan;
cufftCheckReturn(cufftCreate(&plan));
long long len = N;
size_t ws = 0;
cufftCheckReturn(
cufftXtMakePlanMany(
plan, 1, &len,
NULL, 1, 1, CUDA_C_32F,
NULL, 1, 1, CUDA_C_32F,
1, &ws, CUDA_C_32F));
/*
FFT
*/
time[0] = bluestein(start, stop, dev_data, dev_middle);
cudaCheckKernel();
/*
Scaling
*/
cudaCheckReturn(cudaMemcpy(middle, dev_middle, N * sizeof(DATA_TYPE),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < DATA_SIZE; i++) {
float2 m = middle[i];
m.x /= DATA_SIZE;
m.y /= DATA_SIZE;
middle[i] = m;
}
cudaCheckReturn(cudaMemcpy(dev_middle, middle, N * sizeof(DATA_TYPE),
cudaMemcpyHostToDevice));
/*
IFFT
*/
cudaCheckReturn(cudaDeviceSynchronize());
cudaCheckReturn(cudaEventRecord(start));
cufftCheckReturn(cufftXtExec(plan, dev_middle, dev_output, CUFFT_INVERSE));
cudaCheckReturn(cudaEventRecord(stop));
cudaCheckReturn(cudaEventSynchronize(stop));
cudaCheckKernel();
cudaCheckReturn(cudaEventElapsedTime(&time[1], start, stop));
/*
Close
*/
cufftCheckReturn(cufftDestroy(plan));
cudaCheckReturn(cudaMemcpy(output, dev_output, N * sizeof(DATA_TYPE),
cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaFreeHost(middle));
cudaCheckReturn(cudaFree(dev_output));
cudaCheckReturn(cudaFree(dev_middle));
cudaCheckReturn(cudaFree(dev_data));
return time;
}
|
570732a51ab27b192fe2fde340c46dc49a074431.hip | // !!! This is a file automatically generated by hipify!!!
#include <builtin_types.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <unistd.h>
#include "cuda_retina_kernels.cuh"
#include "hip/hip_runtime_api.h"
namespace gpu {
constexpr std::size_t PolarPixelMappingSize = 21;
constexpr float MaxPolarRadiusSquared = 2.0 * 2.0 + 1.0;
constexpr float ConeRadiusSquared = 0.5 * 0.5;
__constant__ int PolarPixelXMappingE[PolarPixelMappingSize] = {0, 1, 0, -1, 0, 1, -1, -1, 1, 2, 0,
-2, 0, 2, 1, -1, -2, -2, -1, 1, 2};
__constant__ int PolarPixelYMappingE[PolarPixelMappingSize] = {0, 0, 1, 0, -1, 1, 1, -1, -1, 0, 2,
0, -2, 1, 2, 2, 1, 1, -2, -2, -1};
Ganglionar* loadCellsArrayToGPU(Ganglionar* cellsArrayHost, int width, int height) {
int size = width * height * sizeof(Ganglionar);
Ganglionar* cellsArrayDevice;
hipMalloc((void**)&cellsArrayDevice, size);
hipMemcpy(cellsArrayDevice, cellsArrayHost, size, hipMemcpyHostToDevice);
return cellsArrayDevice;
}
void unloadArray(Ganglionar* cell) {
// destruction des matrices, dsormais inutilises
hipFree(cell);
}
__global__ void photoreceptorSamplingKernel1C(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Cone* conesArray, int conesArrayWidth, int /*conesArrayHeight*/) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
Cone cone = conesArray[(xdst + ydst * conesArrayWidth)];
if (cone.type == PHOTO_TYPE::NONE) {
return;
}
int x = cone.center_x;
int y = cone.center_y;
// if(xdst<imgDst.cols && ydst < imgDst.rows)
imgDst(ydst, xdst) = imgSrc(y, x);
}
__global__ void photoreceptorSamplingKernel3C(cv::cuda::PtrStepSz<uchar3> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Cone* conesArray, int conesArrayWidth, int /*conesArrayHeight*/) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
Cone cone = conesArray[(xdst + ydst * conesArrayWidth)];
if (cone.type == PHOTO_TYPE::NONE) {
return;
}
int x = cone.center_x;
int y = cone.center_y;
uchar3 pixel = imgSrc(y, x);
switch (cone.type) {
case PHOTO_TYPE::S_CONE:
imgDst(ydst, xdst) = pixel.x;
break;
case PHOTO_TYPE::M_CONE:
imgDst(ydst, xdst) = pixel.y;
break;
case PHOTO_TYPE::L_CONE:
imgDst(ydst, xdst) = pixel.z;
break;
default:
break;
}
}
__global__ void multiConvolveKernel(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Ganglionar* cellsArray, int cellsArrayWidth, int cellsArrayHeight) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
int nbcols = imgSrc.cols;
int nbrows = imgSrc.rows;
Ganglionar cell = cellsArray[(xdst + ydst * cellsArrayWidth)];
if (cell.type == GC_RESPONSE_TYPE::NONE) {
imgDst(ydst, xdst) = 0;
return;
}
int x = cell.center_x;
int y = cell.center_y;
float in_radius_squarred = cell.intern_radius * cell.intern_radius;
float ex_radius_squarred = cell.extern_radius * cell.extern_radius;
int xi;
int yi;
int value_center = 0;
int value_ext = 0;
int nbCenter = 0;
int nbOut = 0;
if (ex_radius_squarred < ConeRadiusSquared) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += imgSrc(y, x);
} else {
value_center -= imgSrc(y, x);
}
nbCenter = 1;
imgDst(ydst, xdst) = 10;
return;
} else if (ex_radius_squarred < MaxPolarRadiusSquared) {
// If the kernel are too small, use polar loop
int inside_cones = in_radius_squarred / ConeRadiusSquared;
int all_cones = ex_radius_squarred / ConeRadiusSquared;
int cone_index = 0;
while (cone_index < inside_cones) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
} else {
value_center -= imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
}
nbCenter++;
cone_index++;
};
while (cone_index < all_cones) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_ext -= imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
} else {
value_ext += imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
}
nbOut++;
cone_index++;
};
} else {
for (xi = -cell.extern_radius; xi <= cell.extern_radius; xi++) {
for (yi = -cell.extern_radius; yi <= cell.extern_radius; yi++) {
if (x + xi > 0 && x + xi < nbcols && y + yi > 0 && y + yi < nbrows) {
if (xi * xi + yi * yi <= in_radius_squarred) { // if we are in the radius
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += imgSrc(y + yi, x + xi);
} else {
value_center -= imgSrc(y + yi, x + xi);
}
nbCenter++;
} else if (xi * xi + yi * yi <= ex_radius_squarred) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_ext -= imgSrc(y + yi, x + xi);
} else {
value_ext += imgSrc(y + yi, x + xi);
}
nbOut++;
}
} else {
// receptive field outside cone map
imgDst(ydst, xdst) = 255;
return;
}
}
}
}
int total_value;
// the ganglionar response is centred on 128
// [0,128[ low pulsing frequencies
// ]128,255] high pulsing frequencies
if (nbOut == 0) {
total_value = value_center / (float)nbCenter / 2.0 + 128;
} else if (nbCenter == 0) {
total_value = 128;
} else {
total_value = (value_center / (float)nbCenter + value_ext / (float)nbOut) / 2.0 + 128; //*cell.extern_radius;
// total_value = (value_center/(float)nbCenter + value_ext/(float)nbOut)/2.0;//*cell.extern_radius;
}
if (total_value < 0) {
total_value = 0;
} else if (total_value > 255) {
total_value = 255;
}
if (xdst < imgDst.cols && ydst < imgDst.rows)
imgDst(ydst, xdst) = total_value;
}
__global__ void legacyMultiConvolveKernel(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Ganglionar* cellsArray, int cellsArrayWidth, int cellsArrayHeight) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
int nbcols = imgSrc.cols;
int nbrows = imgSrc.rows;
Ganglionar cell = cellsArray[(xdst + ydst * cellsArrayWidth)];
if (cell.type == GC_RESPONSE_TYPE::NONE) {
imgDst(ydst, xdst) = 0;
}
int x = cell.center_x;
int y = cell.center_y;
int in_radius_squarred = cell.intern_radius * cell.intern_radius;
int ex_radius_squarred = cell.extern_radius * cell.extern_radius;
int xi;
int yi;
int value_center = 0;
int value_ext = 0;
int nbCenter = 0;
int nbOut = 0;
if (ex_radius_squarred == 1) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += (imgSrc(y, x) - 128);
} else {
value_center -= (imgSrc(y, x) - 128);
}
nbCenter = 1;
} else {
for (xi = -cell.extern_radius; xi <= cell.extern_radius; xi++) {
for (yi = -cell.extern_radius; yi <= cell.extern_radius; yi++) {
if (x + xi > 0 && x + xi < nbcols && y + yi > 0 && y + yi < nbrows) {
if (xi * xi + yi * yi < in_radius_squarred) { // if we are in the radius
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += (imgSrc(y + yi, x + xi) - 128);
} else {
value_center -= (imgSrc(y + yi, x + xi) - 128);
}
nbCenter++;
} else if (xi * xi + yi * yi < ex_radius_squarred) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_ext -= (imgSrc(y + yi, x + xi) - 128);
} else {
value_ext += (imgSrc(y + yi, x + xi) - 128);
}
nbOut++;
}
}
}
}
}
int total_value;
if (nbOut == 0) {
nbOut = 1;
}
if (nbCenter == 0) {
total_value = 128;
} else {
total_value = 128 + (value_center / (float)nbCenter + value_ext / (float)nbOut) / 2.0; //*cell.extern_radius;
}
if (total_value < 0) {
total_value = 0;
} else if (total_value > 255) {
total_value = 255;
}
if (xdst < imgDst.cols && ydst < imgDst.rows)
imgDst(ydst, xdst) = total_value;
}
__global__ void directionSelectiveKernel(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
cv::cuda::PtrStepSz<u_char> imgPrev, Point* directiveMappingSrc,
Point* directiveMappingDst, int size) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= size) {
return;
}
Point pointSrc = directiveMappingSrc[id];
Point pointDst = directiveMappingDst[id];
int xdst = pointSrc.x;
int ydst = pointSrc.y;
int nbcols = imgSrc.cols;
int nbrows = imgSrc.rows;
int type = xdst % 4; // There are 4 types, to top, left right bottom
int response = 0;
int delta = 0;
if (xdst % 2 == 0) {
delta += 1;
} else {
delta -= 1;
}
int dx = xdst + delta;
int dy = ydst + delta;
if (type < 2 && dx < nbcols && dx >= 0) {
response = ((int)imgPrev(ydst, dx) + (int)imgSrc(ydst, xdst)) -
((int)imgSrc(ydst, dx) + (int)imgPrev(ydst, xdst)); // on directive
} else if (dy < nbrows && dy >= 0) {
response = ((int)imgPrev(dy, xdst) + (int)imgSrc(ydst, xdst)) -
((int)imgSrc(dy, xdst) + (int)imgPrev(ydst, xdst)); // on directive
}
if (response < 0) {
response = 0;
} else if (response > 255) {
response = 255;
}
imgDst(pointDst.y, pointDst.x) = response;
}
void photoreceptorSampling1C(cv::cuda::PtrStepSz<uchar> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst, Cone* coneArrayGPU,
int conesWidth, int conesHeight, hipStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = BLOCK_SIZE;
block.y = BLOCK_SIZE;
grid.x = (int)ceil((float)(imgDst.cols) / block.x);
grid.y = (int)ceil((float)(imgDst.rows) / block.y);
hipLaunchKernelGGL(( photoreceptorSamplingKernel1C), dim3(grid), dim3(block), 0, 0, imgSrc, imgDst, coneArrayGPU, conesWidth, conesHeight);
}
void photoreceptorSampling3C(cv::cuda::PtrStepSz<uchar3> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst, Cone* coneArrayGPU,
int conesWidth, int conesHeight, hipStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = BLOCK_SIZE;
block.y = BLOCK_SIZE;
grid.x = (int)ceil((float)(imgDst.cols) / block.x);
grid.y = (int)ceil((float)(imgDst.rows) / block.y);
hipLaunchKernelGGL(( photoreceptorSamplingKernel3C), dim3(grid), dim3(block), 0, 0, imgSrc, imgDst, coneArrayGPU, conesWidth, conesHeight);
}
void multiConvolve(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst, Ganglionar* cellsArrayGPU,
int cellsArrayWidth, int cellsArrayHeight, hipStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = BLOCK_SIZE;
block.y = BLOCK_SIZE;
grid.x = (int)ceil((float)(imgDst.cols) / block.x);
grid.y = (int)ceil((float)(imgDst.rows) / block.y);
hipLaunchKernelGGL(( multiConvolveKernel), dim3(grid), dim3(block), 0, 0, imgSrc, imgDst, cellsArrayGPU, cellsArrayWidth, cellsArrayHeight);
}
void directionSelectiveComputation(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
cv::cuda::PtrStepSz<u_char> imgPrev, Point* directiveMappingSrc,
Point* directiveMappingDst, int directiveMappingSize, hipStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = 256;
grid.x = (int)ceil((float)(directiveMappingSize) / (float)block.x);
hipLaunchKernelGGL(( directionSelectiveKernel), dim3(grid), dim3(block), 0, 0, imgSrc, imgDst, imgPrev, directiveMappingSrc, directiveMappingDst,
directiveMappingSize);
}
} // namespace gpu
| 570732a51ab27b192fe2fde340c46dc49a074431.cu | #include <builtin_types.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <unistd.h>
#include "cuda_retina_kernels.cuh"
#include "cuda_runtime_api.h"
namespace gpu {
constexpr std::size_t PolarPixelMappingSize = 21;
constexpr float MaxPolarRadiusSquared = 2.0 * 2.0 + 1.0;
constexpr float ConeRadiusSquared = 0.5 * 0.5;
__constant__ int PolarPixelXMappingE[PolarPixelMappingSize] = {0, 1, 0, -1, 0, 1, -1, -1, 1, 2, 0,
-2, 0, 2, 1, -1, -2, -2, -1, 1, 2};
__constant__ int PolarPixelYMappingE[PolarPixelMappingSize] = {0, 0, 1, 0, -1, 1, 1, -1, -1, 0, 2,
0, -2, 1, 2, 2, 1, 1, -2, -2, -1};
Ganglionar* loadCellsArrayToGPU(Ganglionar* cellsArrayHost, int width, int height) {
int size = width * height * sizeof(Ganglionar);
Ganglionar* cellsArrayDevice;
cudaMalloc((void**)&cellsArrayDevice, size);
cudaMemcpy(cellsArrayDevice, cellsArrayHost, size, cudaMemcpyHostToDevice);
return cellsArrayDevice;
}
void unloadArray(Ganglionar* cell) {
// destruction des matrices, désormais inutilisées
cudaFree(cell);
}
__global__ void photoreceptorSamplingKernel1C(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Cone* conesArray, int conesArrayWidth, int /*conesArrayHeight*/) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
Cone cone = conesArray[(xdst + ydst * conesArrayWidth)];
if (cone.type == PHOTO_TYPE::NONE) {
return;
}
int x = cone.center_x;
int y = cone.center_y;
// if(xdst<imgDst.cols && ydst < imgDst.rows)
imgDst(ydst, xdst) = imgSrc(y, x);
}
__global__ void photoreceptorSamplingKernel3C(cv::cuda::PtrStepSz<uchar3> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Cone* conesArray, int conesArrayWidth, int /*conesArrayHeight*/) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
Cone cone = conesArray[(xdst + ydst * conesArrayWidth)];
if (cone.type == PHOTO_TYPE::NONE) {
return;
}
int x = cone.center_x;
int y = cone.center_y;
uchar3 pixel = imgSrc(y, x);
switch (cone.type) {
case PHOTO_TYPE::S_CONE:
imgDst(ydst, xdst) = pixel.x;
break;
case PHOTO_TYPE::M_CONE:
imgDst(ydst, xdst) = pixel.y;
break;
case PHOTO_TYPE::L_CONE:
imgDst(ydst, xdst) = pixel.z;
break;
default:
break;
}
}
__global__ void multiConvolveKernel(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Ganglionar* cellsArray, int cellsArrayWidth, int cellsArrayHeight) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
int nbcols = imgSrc.cols;
int nbrows = imgSrc.rows;
Ganglionar cell = cellsArray[(xdst + ydst * cellsArrayWidth)];
if (cell.type == GC_RESPONSE_TYPE::NONE) {
imgDst(ydst, xdst) = 0;
return;
}
int x = cell.center_x;
int y = cell.center_y;
float in_radius_squarred = cell.intern_radius * cell.intern_radius;
float ex_radius_squarred = cell.extern_radius * cell.extern_radius;
int xi;
int yi;
int value_center = 0;
int value_ext = 0;
int nbCenter = 0;
int nbOut = 0;
if (ex_radius_squarred < ConeRadiusSquared) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += imgSrc(y, x);
} else {
value_center -= imgSrc(y, x);
}
nbCenter = 1;
imgDst(ydst, xdst) = 10;
return;
} else if (ex_radius_squarred < MaxPolarRadiusSquared) {
// If the kernel are too small, use polar loop
int inside_cones = in_radius_squarred / ConeRadiusSquared;
int all_cones = ex_radius_squarred / ConeRadiusSquared;
int cone_index = 0;
while (cone_index < inside_cones) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
} else {
value_center -= imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
}
nbCenter++;
cone_index++;
};
while (cone_index < all_cones) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_ext -= imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
} else {
value_ext += imgSrc(y + PolarPixelYMappingE[cone_index], x + PolarPixelXMappingE[cone_index]);
}
nbOut++;
cone_index++;
};
} else {
for (xi = -cell.extern_radius; xi <= cell.extern_radius; xi++) {
for (yi = -cell.extern_radius; yi <= cell.extern_radius; yi++) {
if (x + xi > 0 && x + xi < nbcols && y + yi > 0 && y + yi < nbrows) {
if (xi * xi + yi * yi <= in_radius_squarred) { // if we are in the radius
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += imgSrc(y + yi, x + xi);
} else {
value_center -= imgSrc(y + yi, x + xi);
}
nbCenter++;
} else if (xi * xi + yi * yi <= ex_radius_squarred) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_ext -= imgSrc(y + yi, x + xi);
} else {
value_ext += imgSrc(y + yi, x + xi);
}
nbOut++;
}
} else {
// receptive field outside cone map
imgDst(ydst, xdst) = 255;
return;
}
}
}
}
int total_value;
// the ganglionar response is centred on 128
// [0,128[ low pulsing frequencies
// ]128,255] high pulsing frequencies
if (nbOut == 0) {
total_value = value_center / (float)nbCenter / 2.0 + 128;
} else if (nbCenter == 0) {
total_value = 128;
} else {
total_value = (value_center / (float)nbCenter + value_ext / (float)nbOut) / 2.0 + 128; //*cell.extern_radius;
// total_value = (value_center/(float)nbCenter + value_ext/(float)nbOut)/2.0;//*cell.extern_radius;
}
if (total_value < 0) {
total_value = 0;
} else if (total_value > 255) {
total_value = 255;
}
if (xdst < imgDst.cols && ydst < imgDst.rows)
imgDst(ydst, xdst) = total_value;
}
__global__ void legacyMultiConvolveKernel(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
Ganglionar* cellsArray, int cellsArrayWidth, int cellsArrayHeight) {
// Get our global thread ID
int xdst = blockIdx.x * blockDim.x + threadIdx.x;
int ydst = blockIdx.y * blockDim.y + threadIdx.y;
int nbcols = imgSrc.cols;
int nbrows = imgSrc.rows;
Ganglionar cell = cellsArray[(xdst + ydst * cellsArrayWidth)];
if (cell.type == GC_RESPONSE_TYPE::NONE) {
imgDst(ydst, xdst) = 0;
}
int x = cell.center_x;
int y = cell.center_y;
int in_radius_squarred = cell.intern_radius * cell.intern_radius;
int ex_radius_squarred = cell.extern_radius * cell.extern_radius;
int xi;
int yi;
int value_center = 0;
int value_ext = 0;
int nbCenter = 0;
int nbOut = 0;
if (ex_radius_squarred == 1) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += (imgSrc(y, x) - 128);
} else {
value_center -= (imgSrc(y, x) - 128);
}
nbCenter = 1;
} else {
for (xi = -cell.extern_radius; xi <= cell.extern_radius; xi++) {
for (yi = -cell.extern_radius; yi <= cell.extern_radius; yi++) {
if (x + xi > 0 && x + xi < nbcols && y + yi > 0 && y + yi < nbrows) {
if (xi * xi + yi * yi < in_radius_squarred) { // if we are in the radius
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_center += (imgSrc(y + yi, x + xi) - 128);
} else {
value_center -= (imgSrc(y + yi, x + xi) - 128);
}
nbCenter++;
} else if (xi * xi + yi * yi < ex_radius_squarred) {
if (cell.type == GC_RESPONSE_TYPE::ON) {
value_ext -= (imgSrc(y + yi, x + xi) - 128);
} else {
value_ext += (imgSrc(y + yi, x + xi) - 128);
}
nbOut++;
}
}
}
}
}
int total_value;
if (nbOut == 0) {
nbOut = 1;
}
if (nbCenter == 0) {
total_value = 128;
} else {
total_value = 128 + (value_center / (float)nbCenter + value_ext / (float)nbOut) / 2.0; //*cell.extern_radius;
}
if (total_value < 0) {
total_value = 0;
} else if (total_value > 255) {
total_value = 255;
}
if (xdst < imgDst.cols && ydst < imgDst.rows)
imgDst(ydst, xdst) = total_value;
}
__global__ void directionSelectiveKernel(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
cv::cuda::PtrStepSz<u_char> imgPrev, Point* directiveMappingSrc,
Point* directiveMappingDst, int size) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= size) {
return;
}
Point pointSrc = directiveMappingSrc[id];
Point pointDst = directiveMappingDst[id];
int xdst = pointSrc.x;
int ydst = pointSrc.y;
int nbcols = imgSrc.cols;
int nbrows = imgSrc.rows;
int type = xdst % 4; // There are 4 types, to top, left right bottom
int response = 0;
int delta = 0;
if (xdst % 2 == 0) {
delta += 1;
} else {
delta -= 1;
}
int dx = xdst + delta;
int dy = ydst + delta;
if (type < 2 && dx < nbcols && dx >= 0) {
response = ((int)imgPrev(ydst, dx) + (int)imgSrc(ydst, xdst)) -
((int)imgSrc(ydst, dx) + (int)imgPrev(ydst, xdst)); // on directive
} else if (dy < nbrows && dy >= 0) {
response = ((int)imgPrev(dy, xdst) + (int)imgSrc(ydst, xdst)) -
((int)imgSrc(dy, xdst) + (int)imgPrev(ydst, xdst)); // on directive
}
if (response < 0) {
response = 0;
} else if (response > 255) {
response = 255;
}
imgDst(pointDst.y, pointDst.x) = response;
}
void photoreceptorSampling1C(cv::cuda::PtrStepSz<uchar> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst, Cone* coneArrayGPU,
int conesWidth, int conesHeight, cudaStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = BLOCK_SIZE;
block.y = BLOCK_SIZE;
grid.x = (int)ceil((float)(imgDst.cols) / block.x);
grid.y = (int)ceil((float)(imgDst.rows) / block.y);
photoreceptorSamplingKernel1C<<<grid, block>>>(imgSrc, imgDst, coneArrayGPU, conesWidth, conesHeight);
}
void photoreceptorSampling3C(cv::cuda::PtrStepSz<uchar3> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst, Cone* coneArrayGPU,
int conesWidth, int conesHeight, cudaStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = BLOCK_SIZE;
block.y = BLOCK_SIZE;
grid.x = (int)ceil((float)(imgDst.cols) / block.x);
grid.y = (int)ceil((float)(imgDst.rows) / block.y);
photoreceptorSamplingKernel3C<<<grid, block>>>(imgSrc, imgDst, coneArrayGPU, conesWidth, conesHeight);
}
void multiConvolve(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst, Ganglionar* cellsArrayGPU,
int cellsArrayWidth, int cellsArrayHeight, cudaStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = BLOCK_SIZE;
block.y = BLOCK_SIZE;
grid.x = (int)ceil((float)(imgDst.cols) / block.x);
grid.y = (int)ceil((float)(imgDst.rows) / block.y);
multiConvolveKernel<<<grid, block>>>(imgSrc, imgDst, cellsArrayGPU, cellsArrayWidth, cellsArrayHeight);
}
void directionSelectiveComputation(cv::cuda::PtrStepSz<u_char> imgSrc, cv::cuda::PtrStepSz<u_char> imgDst,
cv::cuda::PtrStepSz<u_char> imgPrev, Point* directiveMappingSrc,
Point* directiveMappingDst, int directiveMappingSize, cudaStream_t stream) {
dim3 grid, block;
// Number of threads in each thread block
block.x = 256;
grid.x = (int)ceil((float)(directiveMappingSize) / (float)block.x);
directionSelectiveKernel<<<grid, block>>>(imgSrc, imgDst, imgPrev, directiveMappingSrc, directiveMappingDst,
directiveMappingSize);
}
} // namespace gpu
|
30067cb2f6b2b961c35e81f7d3c76edcde24cedc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_y2;
int xdim0_advec_mom_kernel_y2_h = -1;
__constant__ int ydim0_advec_mom_kernel_y2;
int ydim0_advec_mom_kernel_y2_h = -1;
__constant__ int xdim1_advec_mom_kernel_y2;
int xdim1_advec_mom_kernel_y2_h = -1;
__constant__ int ydim1_advec_mom_kernel_y2;
int ydim1_advec_mom_kernel_y2_h = -1;
__constant__ int xdim2_advec_mom_kernel_y2;
int xdim2_advec_mom_kernel_y2_h = -1;
__constant__ int ydim2_advec_mom_kernel_y2;
int ydim2_advec_mom_kernel_y2_h = -1;
__constant__ int xdim3_advec_mom_kernel_y2;
int xdim3_advec_mom_kernel_y2_h = -1;
__constant__ int ydim3_advec_mom_kernel_y2;
int ydim3_advec_mom_kernel_y2_h = -1;
__constant__ int xdim4_advec_mom_kernel_y2;
int xdim4_advec_mom_kernel_y2_h = -1;
__constant__ int ydim4_advec_mom_kernel_y2;
int ydim4_advec_mom_kernel_y2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_y2 * (y) + \
xdim0_advec_mom_kernel_y2 * ydim0_advec_mom_kernel_y2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_y2 * (y) + \
xdim1_advec_mom_kernel_y2 * ydim1_advec_mom_kernel_y2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel_y2 * (y) + \
xdim2_advec_mom_kernel_y2 * ydim2_advec_mom_kernel_y2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel_y2 * (y) + \
xdim3_advec_mom_kernel_y2 * ydim3_advec_mom_kernel_y2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_mom_kernel_y2 * (y) + \
xdim4_advec_mom_kernel_y2 * ydim4_advec_mom_kernel_y2 * (z))
// user function
__device__
inline void
advec_mom_kernel_y2_gpu(double *pre_vol, double *post_vol,
const double *volume, const double *vol_flux_x,
const double *vol_flux_y) {
post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] +
vol_flux_x[OPS_ACC3(1, 0, 0)] -
vol_flux_x[OPS_ACC3(0, 0, 0)];
pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] +
vol_flux_y[OPS_ACC4(0, 1, 0)] -
vol_flux_y[OPS_ACC4(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel_y2(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
const double *__restrict arg4,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_y2 * ydim0_advec_mom_kernel_y2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_y2 * ydim1_advec_mom_kernel_y2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim2_advec_mom_kernel_y2 * ydim2_advec_mom_kernel_y2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim3_advec_mom_kernel_y2 * ydim3_advec_mom_kernel_y2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim4_advec_mom_kernel_y2 * ydim4_advec_mom_kernel_y2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_y2_gpu(arg0, arg1, arg2, arg3, arg4);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel_y2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 5, range, 22))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(22, "advec_mom_kernel_y2");
OPS_kernels[22].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_y2_h ||
ydim0 != ydim0_advec_mom_kernel_y2_h ||
xdim1 != xdim1_advec_mom_kernel_y2_h ||
ydim1 != ydim1_advec_mom_kernel_y2_h ||
xdim2 != xdim2_advec_mom_kernel_y2_h ||
ydim2 != ydim2_advec_mom_kernel_y2_h ||
xdim3 != xdim3_advec_mom_kernel_y2_h ||
ydim3 != ydim3_advec_mom_kernel_y2_h ||
xdim4 != xdim4_advec_mom_kernel_y2_h ||
ydim4 != ydim4_advec_mom_kernel_y2_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel_y2, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_y2_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel_y2, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_y2_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel_y2, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_y2_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel_y2, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_y2_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_mom_kernel_y2, &xdim2, sizeof(int));
xdim2_advec_mom_kernel_y2_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_mom_kernel_y2, &ydim2, sizeof(int));
ydim2_advec_mom_kernel_y2_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_mom_kernel_y2, &xdim3, sizeof(int));
xdim3_advec_mom_kernel_y2_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_mom_kernel_y2, &ydim3, sizeof(int));
ydim3_advec_mom_kernel_y2_h = ydim3;
hipMemcpyToSymbol(xdim4_advec_mom_kernel_y2, &xdim4, sizeof(int));
xdim4_advec_mom_kernel_y2_h = xdim4;
hipMemcpyToSymbol(ydim4_advec_mom_kernel_y2, &ydim4, sizeof(int));
ydim4_advec_mom_kernel_y2_h = ydim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[5];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[22].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel_y2), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[22].time += t1 - t2;
}
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[22].mpi_time += t2 - t1;
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
| 30067cb2f6b2b961c35e81f7d3c76edcde24cedc.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel_y2;
int xdim0_advec_mom_kernel_y2_h = -1;
__constant__ int ydim0_advec_mom_kernel_y2;
int ydim0_advec_mom_kernel_y2_h = -1;
__constant__ int xdim1_advec_mom_kernel_y2;
int xdim1_advec_mom_kernel_y2_h = -1;
__constant__ int ydim1_advec_mom_kernel_y2;
int ydim1_advec_mom_kernel_y2_h = -1;
__constant__ int xdim2_advec_mom_kernel_y2;
int xdim2_advec_mom_kernel_y2_h = -1;
__constant__ int ydim2_advec_mom_kernel_y2;
int ydim2_advec_mom_kernel_y2_h = -1;
__constant__ int xdim3_advec_mom_kernel_y2;
int xdim3_advec_mom_kernel_y2_h = -1;
__constant__ int ydim3_advec_mom_kernel_y2;
int ydim3_advec_mom_kernel_y2_h = -1;
__constant__ int xdim4_advec_mom_kernel_y2;
int xdim4_advec_mom_kernel_y2_h = -1;
__constant__ int ydim4_advec_mom_kernel_y2;
int ydim4_advec_mom_kernel_y2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel_y2 * (y) + \
xdim0_advec_mom_kernel_y2 * ydim0_advec_mom_kernel_y2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel_y2 * (y) + \
xdim1_advec_mom_kernel_y2 * ydim1_advec_mom_kernel_y2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel_y2 * (y) + \
xdim2_advec_mom_kernel_y2 * ydim2_advec_mom_kernel_y2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel_y2 * (y) + \
xdim3_advec_mom_kernel_y2 * ydim3_advec_mom_kernel_y2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_mom_kernel_y2 * (y) + \
xdim4_advec_mom_kernel_y2 * ydim4_advec_mom_kernel_y2 * (z))
// user function
__device__
inline void
advec_mom_kernel_y2_gpu(double *pre_vol, double *post_vol,
const double *volume, const double *vol_flux_x,
const double *vol_flux_y) {
post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] +
vol_flux_x[OPS_ACC3(1, 0, 0)] -
vol_flux_x[OPS_ACC3(0, 0, 0)];
pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] +
vol_flux_y[OPS_ACC4(0, 1, 0)] -
vol_flux_y[OPS_ACC4(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel_y2(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
const double *__restrict arg4,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim0_advec_mom_kernel_y2 * ydim0_advec_mom_kernel_y2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim1_advec_mom_kernel_y2 * ydim1_advec_mom_kernel_y2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim2_advec_mom_kernel_y2 * ydim2_advec_mom_kernel_y2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim3_advec_mom_kernel_y2 * ydim3_advec_mom_kernel_y2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_y2 +
idx_z * 1 * 1 * xdim4_advec_mom_kernel_y2 * ydim4_advec_mom_kernel_y2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel_y2_gpu(arg0, arg1, arg2, arg3, arg4);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel_y2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 5, range, 22))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(22, "advec_mom_kernel_y2");
OPS_kernels[22].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel_y2_h ||
ydim0 != ydim0_advec_mom_kernel_y2_h ||
xdim1 != xdim1_advec_mom_kernel_y2_h ||
ydim1 != ydim1_advec_mom_kernel_y2_h ||
xdim2 != xdim2_advec_mom_kernel_y2_h ||
ydim2 != ydim2_advec_mom_kernel_y2_h ||
xdim3 != xdim3_advec_mom_kernel_y2_h ||
ydim3 != ydim3_advec_mom_kernel_y2_h ||
xdim4 != xdim4_advec_mom_kernel_y2_h ||
ydim4 != ydim4_advec_mom_kernel_y2_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel_y2, &xdim0, sizeof(int));
xdim0_advec_mom_kernel_y2_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel_y2, &ydim0, sizeof(int));
ydim0_advec_mom_kernel_y2_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel_y2, &xdim1, sizeof(int));
xdim1_advec_mom_kernel_y2_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel_y2, &ydim1, sizeof(int));
ydim1_advec_mom_kernel_y2_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_mom_kernel_y2, &xdim2, sizeof(int));
xdim2_advec_mom_kernel_y2_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_mom_kernel_y2, &ydim2, sizeof(int));
ydim2_advec_mom_kernel_y2_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_mom_kernel_y2, &xdim3, sizeof(int));
xdim3_advec_mom_kernel_y2_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_mom_kernel_y2, &ydim3, sizeof(int));
ydim3_advec_mom_kernel_y2_h = ydim3;
cudaMemcpyToSymbol(xdim4_advec_mom_kernel_y2, &xdim4, sizeof(int));
xdim4_advec_mom_kernel_y2_h = xdim4;
cudaMemcpyToSymbol(ydim4_advec_mom_kernel_y2, &ydim4, sizeof(int));
ydim4_advec_mom_kernel_y2_h = ydim4;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
char *p_a[5];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[22].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel_y2<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[22].time += t1 - t2;
}
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[22].mpi_time += t2 - t1;
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[22].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
|
f1b578a0555a6bc7ce4f2bab49ba1ecf17767a02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nnIndex: B*M*K;
// nnCount: B*M;
// input: B*N*C;
// filter: filter_size*C*r;
// output: B*M*(C*r)
__global__ void depthwise_conv3d_forward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, float* output)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
output[i*M*C*r+j] += input[i*N*C+n*C+cin]*filter[f*C*r+cout]/nnSize;
}
}
}
}
__global__ void depthwise_input_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
float derIn = gradOutput[i*M*C*r+j]*filter[f*C*r+cout]/nnSize;
atomicAdd(&gradInput[i*N*C+n*C+cin],derIn);
}
}
}
}
__global__ void depthwise_filter_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* gradOutput, float* gradFilter, int sharedMemSize,
int startIdx)
{
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int endIdx = sharedMemSize+startIdx;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
float derFilt = gradOutput[i*M*C*r+j]*input[i*N*C+n*C+cin]/nnSize;
int currIdx = f*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],derFilt);
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
// nnIndex: B*M*K;
// nnCount: B*M;
// input: B*N*C;
// filter: filter_size*C*r;
// output: B*M*(C*r)
__global__ void fuzzy_depthwise_conv3d_forward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, float* output)
{
// T is the number of fuzzy bins each neighbor locates in
const int T = 4;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float weight = 0;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
weight += coeff*filter[f*C*r+cout];
}
}
output[i*M*C*r+j] += input[i*N*C+n*C+cin]*weight/nnSize;
}
}
}
}
__global__ void fuzzy_depthwise_input_backward(int B, int N, int M, int C, int r, int K,
const int* nnIndex, const int* nnCount, const int* binIndex,
const float* binCoeff, const float* input, const float* filter,
const float* gradOutput, float* gradInput)
{
const int T = 4;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float weight = 0;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
weight += coeff*filter[f*C*r+cout];
}
}
float derIn = gradOutput[i*M*C*r+j]*weight/nnSize;
atomicAdd(&gradInput[i*N*C+n*C+cin],derIn);
}
}
}
}
__global__ void fuzzy_depthwise_filter_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* gradOutput, float* gradFilter,
int sharedMemSize, int startIdx)
{
const int T = 4;
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int endIdx = sharedMemSize+startIdx;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float derFilt = gradOutput[i*M*C*r+j]*input[i*N*C+n*C+cin]/nnSize;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
int currIdx = f*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],coeff*derFilt);
}
}
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
void depthwiseConv3dLauncher(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, float* output)
{
hipLaunchKernelGGL(( depthwise_conv3d_forward), dim3(B),dim3(1024), 0, 0, B, N, M, C, r, K, nnIndex, nnCount, binIndex,
input, filter, output);
}
void depthwiseConv3dGradLauncher(int B, int N, int M, int F, int C, int r, int K,
const int* nnIndex, const int* nnCount, const int* binIndex,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
hipLaunchKernelGGL(( depthwise_input_backward), dim3(B),dim3(1024), 0, 0, B, N, M, C, r, K, nnIndex, nnCount, binIndex,
input, filter, gradOutput, gradInput);
int maxIter = (F*C*r)/maxSharedMemSize;
int remainder = (F*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
hipLaunchKernelGGL(( depthwise_filter_backward), dim3(B),dim3(1024),sizeof(float)*maxSharedMemSize, 0, B, N, M, C, r, K, nnIndex, nnCount,
binIndex, input, gradOutput, gradFilter,
maxSharedMemSize, maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
hipLaunchKernelGGL(( depthwise_filter_backward), dim3(B),dim3(1024),sizeof(float)*remainder, 0, B, N, M, C, r, K, nnIndex, nnCount,
binIndex, input, gradOutput, gradFilter,
remainder, maxSharedMemSize*maxIter);
}
}
void fuzzyDepthwiseConv3dLauncher(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, float* output)
{
hipLaunchKernelGGL(( fuzzy_depthwise_conv3d_forward), dim3(B),dim3(1024), 0, 0, B, N, M, C, r, K, nnIndex, nnCount, binIndex,
binCoeff, input, filter, output);
}
void fuzzyDepthwiseConv3dGradLauncher(int B, int N, int M, int F, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
hipLaunchKernelGGL(( fuzzy_depthwise_input_backward), dim3(B),dim3(1024), 0, 0, B, N, M, C, r, K, nnIndex, nnCount, binIndex, binCoeff,
input, filter, gradOutput, gradInput);
int maxIter = (F*C*r)/maxSharedMemSize;
int remainder = (F*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
hipLaunchKernelGGL(( fuzzy_depthwise_filter_backward), dim3(B),dim3(1024),sizeof(float)*maxSharedMemSize, 0, B, N, M, C, r, K, nnIndex, nnCount,
binIndex, binCoeff, input, gradOutput,
gradFilter, maxSharedMemSize,
maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
hipLaunchKernelGGL(( fuzzy_depthwise_filter_backward), dim3(B),dim3(1024),sizeof(float)*remainder, 0, B, N, M, C, r, K, nnIndex, nnCount,
binIndex, binCoeff, input, gradOutput,
gradFilter, remainder,
maxSharedMemSize*maxIter);
}
} | f1b578a0555a6bc7ce4f2bab49ba1ecf17767a02.cu | // nnIndex: B*M*K;
// nnCount: B*M;
// input: B*N*C;
// filter: filter_size*C*r;
// output: B*M*(C*r)
__global__ void depthwise_conv3d_forward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, float* output)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
output[i*M*C*r+j] += input[i*N*C+n*C+cin]*filter[f*C*r+cout]/nnSize;
}
}
}
}
__global__ void depthwise_input_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
float derIn = gradOutput[i*M*C*r+j]*filter[f*C*r+cout]/nnSize;
atomicAdd(&gradInput[i*N*C+n*C+cin],derIn);
}
}
}
}
__global__ void depthwise_filter_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* gradOutput, float* gradFilter, int sharedMemSize,
int startIdx)
{
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int endIdx = sharedMemSize+startIdx;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
float derFilt = gradOutput[i*M*C*r+j]*input[i*N*C+n*C+cin]/nnSize;
int currIdx = f*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],derFilt);
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
// nnIndex: B*M*K;
// nnCount: B*M;
// input: B*N*C;
// filter: filter_size*C*r;
// output: B*M*(C*r)
__global__ void fuzzy_depthwise_conv3d_forward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, float* output)
{
// T is the number of fuzzy bins each neighbor locates in
const int T = 4;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float weight = 0;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
weight += coeff*filter[f*C*r+cout];
}
}
output[i*M*C*r+j] += input[i*N*C+n*C+cin]*weight/nnSize;
}
}
}
}
__global__ void fuzzy_depthwise_input_backward(int B, int N, int M, int C, int r, int K,
const int* nnIndex, const int* nnCount, const int* binIndex,
const float* binCoeff, const float* input, const float* filter,
const float* gradOutput, float* gradInput)
{
const int T = 4;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float weight = 0;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
weight += coeff*filter[f*C*r+cout];
}
}
float derIn = gradOutput[i*M*C*r+j]*weight/nnSize;
atomicAdd(&gradInput[i*N*C+n*C+cin],derIn);
}
}
}
}
__global__ void fuzzy_depthwise_filter_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* gradOutput, float* gradFilter,
int sharedMemSize, int startIdx)
{
const int T = 4;
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int endIdx = sharedMemSize+startIdx;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float derFilt = gradOutput[i*M*C*r+j]*input[i*N*C+n*C+cin]/nnSize;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
int currIdx = f*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],coeff*derFilt);
}
}
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
void depthwiseConv3dLauncher(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, float* output)
{
depthwise_conv3d_forward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex,
input, filter, output);
}
void depthwiseConv3dGradLauncher(int B, int N, int M, int F, int C, int r, int K,
const int* nnIndex, const int* nnCount, const int* binIndex,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
depthwise_input_backward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex,
input, filter, gradOutput, gradInput);
int maxIter = (F*C*r)/maxSharedMemSize;
int remainder = (F*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
depthwise_filter_backward<<<B,1024,sizeof(float)*maxSharedMemSize>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, input, gradOutput, gradFilter,
maxSharedMemSize, maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
depthwise_filter_backward<<<B,1024,sizeof(float)*remainder>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, input, gradOutput, gradFilter,
remainder, maxSharedMemSize*maxIter);
}
}
void fuzzyDepthwiseConv3dLauncher(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, float* output)
{
fuzzy_depthwise_conv3d_forward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex,
binCoeff, input, filter, output);
}
void fuzzyDepthwiseConv3dGradLauncher(int B, int N, int M, int F, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
fuzzy_depthwise_input_backward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex, binCoeff,
input, filter, gradOutput, gradInput);
int maxIter = (F*C*r)/maxSharedMemSize;
int remainder = (F*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
fuzzy_depthwise_filter_backward<<<B,1024,sizeof(float)*maxSharedMemSize>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, binCoeff, input, gradOutput,
gradFilter, maxSharedMemSize,
maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
fuzzy_depthwise_filter_backward<<<B,1024,sizeof(float)*remainder>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, binCoeff, input, gradOutput,
gradFilter, remainder,
maxSharedMemSize*maxIter);
}
} |
3e71b5161b8e51d70df99ea23a16cc4497ad8d83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
using namespace std;
void load(const char* path, float** imageData, int* imgRows, int* imgCols, float** convKernelData, int* convKernelSize, float* convKernelCoeff)
{
FILE* file;
file = fopen(path, "r");
if (file == NULL)
{
printf("Cannot open file.\n");
return;
}
fscanf(file, "%d %d ", imgRows, imgCols);
*imageData = (float*)malloc(*imgRows * *imgCols * sizeof(float));
for (int i = 0; i < *imgRows * *imgCols; i++)
fscanf(file, "%f ", &(*imageData)[i]);
fscanf(file, "%d %f ", convKernelSize, convKernelCoeff);
*convKernelData = (float*)malloc(*convKernelSize * *convKernelSize * sizeof(float));
for (int i = 0; i < *convKernelSize * *convKernelSize; i++)
fscanf(file, "%f ", &(*convKernelData)[i]);
fclose(file);
}
__global__ void applyConvolution_GPU(float* resultImageData, const float* sourceImageData, const int imageRowsSize, const int imageColsSize,
const float* convKernelData, const int convKernelSize, const float convKernelCoeff)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int row = index / imageColsSize;
int col = index % imageColsSize;
if (row < convKernelSize / 2 || col < convKernelSize / 2 || row > imageRowsSize - convKernelSize / 2 || col > imageColsSize - convKernelSize / 2)
{
return;
}
float roiSum = 0;
for (int roiRow = 0; roiRow < convKernelSize; roiRow++)
{
for (int roiCol = 0; roiCol < convKernelSize; roiCol++)
{
int imageRow = row - (convKernelSize / 2) + roiRow;
int imageCol = col - (convKernelSize / 2) + roiCol;
roiSum += sourceImageData[imageRow*imageColsSize + imageCol] * convKernelData[roiRow * convKernelSize + roiCol];
}
}
resultImageData[row * imageColsSize + col] = roiSum * convKernelCoeff;
}
int main()
{
float* imageData = NULL;
float* convKernelData = NULL;
int imgRows, imgCols, convKernelSize;
float convKernelCoeff;
load("srcImgData1.txt", &imageData, &imgRows, &imgCols, &convKernelData, &convKernelSize, &convKernelCoeff);
unsigned int arraySize = imgRows * imgCols;
unsigned int numOfThreadsInBlock = 512;
unsigned int numOfBlocks = (arraySize + numOfThreadsInBlock - 1) / numOfThreadsInBlock;
float *hostSourceImageData, *hostConvKernelData, *hostResultImageData;
float *devSourceImageData, *devConvKernelData, *devResultImageData;
// Choose which GPU to run on, change this on a multi-GPU system.
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
// Allocate memory on GPU
hipMalloc((void**)&devSourceImageData, arraySize * sizeof(float));
hipMalloc((void**)&devConvKernelData, convKernelSize * convKernelSize * sizeof(float));
hipMalloc((void**)&devResultImageData, arraySize * sizeof(float));
// Allocate memory on CPU (possible even by malloc or new)
hipHostMalloc((void**)&hostSourceImageData, arraySize * sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&hostConvKernelData, convKernelSize * convKernelSize * sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&hostResultImageData, arraySize * sizeof(float), hipHostMallocDefault);
// Initialize arrays on the host
for (int i = 0; i < arraySize; i++)
{
hostSourceImageData[i] = imageData[i];
hostResultImageData[i] = imageData[i];
}
for (int i = 0; i < convKernelSize * convKernelSize; i++)
hostConvKernelData[i] = convKernelData[i];
// Copy data CPU -> GPU
hipMemcpy(devSourceImageData, hostSourceImageData, arraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(devConvKernelData, hostConvKernelData, convKernelSize * convKernelSize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(devResultImageData, hostResultImageData, arraySize * sizeof(float), hipMemcpyHostToDevice);
// Launch a kernel on the GPU
hipLaunchKernelGGL(( applyConvolution_GPU), dim3(numOfBlocks), dim3(numOfThreadsInBlock), 0, 0, devResultImageData, devSourceImageData, imgRows, imgCols,
devConvKernelData, convKernelSize, 1 / 256.0);
hipDeviceSynchronize(); // wait for kernel end
// Copy data GPU -> CPU
hipMemcpy(hostResultImageData, devResultImageData, arraySize * sizeof(float), hipMemcpyDeviceToHost);
// free memory blocks on CPU
hipHostFree(hostSourceImageData);
hipHostFree(hostConvKernelData);
hipHostFree(hostResultImageData);
// free memory blocks on GPU
hipFree(devSourceImageData);
hipFree(devConvKernelData);
hipFree(devResultImageData);
return 0;
}
| 3e71b5161b8e51d70df99ea23a16cc4497ad8d83.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
using namespace std;
void load(const char* path, float** imageData, int* imgRows, int* imgCols, float** convKernelData, int* convKernelSize, float* convKernelCoeff)
{
FILE* file;
file = fopen(path, "r");
if (file == NULL)
{
printf("Cannot open file.\n");
return;
}
fscanf(file, "%d %d ", imgRows, imgCols);
*imageData = (float*)malloc(*imgRows * *imgCols * sizeof(float));
for (int i = 0; i < *imgRows * *imgCols; i++)
fscanf(file, "%f ", &(*imageData)[i]);
fscanf(file, "%d %f ", convKernelSize, convKernelCoeff);
*convKernelData = (float*)malloc(*convKernelSize * *convKernelSize * sizeof(float));
for (int i = 0; i < *convKernelSize * *convKernelSize; i++)
fscanf(file, "%f ", &(*convKernelData)[i]);
fclose(file);
}
__global__ void applyConvolution_GPU(float* resultImageData, const float* sourceImageData, const int imageRowsSize, const int imageColsSize,
const float* convKernelData, const int convKernelSize, const float convKernelCoeff)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int row = index / imageColsSize;
int col = index % imageColsSize;
if (row < convKernelSize / 2 || col < convKernelSize / 2 || row > imageRowsSize - convKernelSize / 2 || col > imageColsSize - convKernelSize / 2)
{
return;
}
float roiSum = 0;
for (int roiRow = 0; roiRow < convKernelSize; roiRow++)
{
for (int roiCol = 0; roiCol < convKernelSize; roiCol++)
{
int imageRow = row - (convKernelSize / 2) + roiRow;
int imageCol = col - (convKernelSize / 2) + roiCol;
roiSum += sourceImageData[imageRow*imageColsSize + imageCol] * convKernelData[roiRow * convKernelSize + roiCol];
}
}
resultImageData[row * imageColsSize + col] = roiSum * convKernelCoeff;
}
int main()
{
float* imageData = NULL;
float* convKernelData = NULL;
int imgRows, imgCols, convKernelSize;
float convKernelCoeff;
load("srcImgData1.txt", &imageData, &imgRows, &imgCols, &convKernelData, &convKernelSize, &convKernelCoeff);
unsigned int arraySize = imgRows * imgCols;
unsigned int numOfThreadsInBlock = 512;
unsigned int numOfBlocks = (arraySize + numOfThreadsInBlock - 1) / numOfThreadsInBlock;
float *hostSourceImageData, *hostConvKernelData, *hostResultImageData;
float *devSourceImageData, *devConvKernelData, *devResultImageData;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
// Allocate memory on GPU
cudaMalloc((void**)&devSourceImageData, arraySize * sizeof(float));
cudaMalloc((void**)&devConvKernelData, convKernelSize * convKernelSize * sizeof(float));
cudaMalloc((void**)&devResultImageData, arraySize * sizeof(float));
// Allocate memory on CPU (possible even by malloc or new)
cudaHostAlloc((void**)&hostSourceImageData, arraySize * sizeof(float), cudaHostAllocDefault);
cudaHostAlloc((void**)&hostConvKernelData, convKernelSize * convKernelSize * sizeof(float), cudaHostAllocDefault);
cudaHostAlloc((void**)&hostResultImageData, arraySize * sizeof(float), cudaHostAllocDefault);
// Initialize arrays on the host
for (int i = 0; i < arraySize; i++)
{
hostSourceImageData[i] = imageData[i];
hostResultImageData[i] = imageData[i];
}
for (int i = 0; i < convKernelSize * convKernelSize; i++)
hostConvKernelData[i] = convKernelData[i];
// Copy data CPU -> GPU
cudaMemcpy(devSourceImageData, hostSourceImageData, arraySize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(devConvKernelData, hostConvKernelData, convKernelSize * convKernelSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(devResultImageData, hostResultImageData, arraySize * sizeof(float), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU
applyConvolution_GPU<<<numOfBlocks, numOfThreadsInBlock>>>(devResultImageData, devSourceImageData, imgRows, imgCols,
devConvKernelData, convKernelSize, 1 / 256.0);
cudaDeviceSynchronize(); // wait for kernel end
// Copy data GPU -> CPU
cudaMemcpy(hostResultImageData, devResultImageData, arraySize * sizeof(float), cudaMemcpyDeviceToHost);
// free memory blocks on CPU
cudaFreeHost(hostSourceImageData);
cudaFreeHost(hostConvKernelData);
cudaFreeHost(hostResultImageData);
// free memory blocks on GPU
cudaFree(devSourceImageData);
cudaFree(devConvKernelData);
cudaFree(devResultImageData);
return 0;
}
|
8bae2b5a4b82f61d3a69e635bc364b2f0430fd0d.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using paddle::framework;
TEST(TEST_FLEET, graph_comm) {
int gpu_count = 3;
std::vector<int> dev_ids;
dev_ids.push_back(0);
dev_ids.push_back(1);
dev_ids.push_back(2);
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(dev_ids);
resource->enable_p2p();
GpuPsGraphTable g(resource);
int node_count = 10;
std::vector<std::vector<int64_t>> neighbors(node_count);
int ind = 0;
int64_t node_id = 0;
std::vector<GpuPsCommGraph> graph_list(gpu_count);
while (ind < node_count) {
int neighbor_size = ind + 1;
graph_list[ind % gpu_count].node_size++;
graph_list[ind % gpu_count].neighbor_size += neighbor_size;
while (neighbor_size--) {
neighbors[ind].push_back(node_id++);
}
ind++;
}
std::vector<int> neighbor_offset(gpu_count, 0), node_index(gpu_count, 0);
for (int i = 0; i < graph_list.size(); i++) {
graph_list[i].node_list = new uint64_t[graph_list[i].node_size];
graph_list[i].node_info_list = new GpuPsNodeInfo[graph_list[i].node_size];
graph_list[i].neighbor_list = new int64_t[graph_list[i].neighbor_size];
}
for (int i = 0; i < node_count; i++) {
ind = i % gpu_count;
graph_list[ind].node_list[node_index[ind]] = i;
graph_list[ind].node_info_list[node_index[ind]].neighbor_offset =
neighbor_offset[ind];
graph_list[ind].node_info_list[node_index[ind]].neighbor_size =
neighbors[i].size();
for (auto x : neighbors[i]) {
graph_list[ind].neighbor_list[neighbor_offset[ind]++] = x;
}
node_index[ind]++;
}
g.build_graph_from_cpu(graph_list);
/*
gpu 0:
0,3,6,9
gpu 1:
1,4,7
gpu 2:
2,5,8
query(2,6) returns nodes [6,9,1,4,7,2]
*/
int64_t answer[6] = {6, 9, 1, 4, 7, 2};
int64_t *res = new int64_t[6];
auto query_res = g.query_node_list(0, 2, 6);
hipMemcpy(res, query_res->val, 48, hipMemcpyDeviceToHost);
ASSERT_EQ(query_res->actual_sample_size, 6);
for (int i = 0; i < 6; i++) {
ASSERT_EQ(res[i], answer[i]);
}
delete[] res;
delete query_res;
/*
node x's neighbor list = [(1+x)*x/2,(1+x)*x/2 + 1,.....,(1+x)*x/2 + x]
so node 6's neighbors are [21,22...,27]
node 7's neighbors are [28,29,..35]
node 0's neighbors are [0]
query([7,0,6],sample_size=3) should return [28,29,30,0,x,x,21,22,23]
6 --index-->2
0 --index--->0
7 --index-->2
*/
int64_t cpu_key[3] = {7, 0, 6};
void *key;
hipMalloc(reinterpret_cast<void **>(&key), 3 * sizeof(int64_t));
hipMemcpy(key, cpu_key, 3 * sizeof(int64_t), hipMemcpyHostToDevice);
auto neighbor_sample_res =
g.graph_neighbor_sample(0, reinterpret_cast<int64_t *>(key), 3, 3);
res = new int64_t[7];
hipMemcpy(res, neighbor_sample_res->val, 56, hipMemcpyDeviceToHost);
int *actual_sample_size = new int[3];
hipMemcpy(actual_sample_size,
neighbor_sample_res->actual_sample_size,
12,
hipMemcpyDeviceToHost); // 3, 1, 3
int *cumsum_sample_size = new int[3];
hipMemcpy(cumsum_sample_size,
neighbor_sample_res->offset,
12,
hipMemcpyDeviceToHost); // 0, 3, 4
std::vector<std::vector<int64_t>> neighbors_;
std::vector<int64_t> neighbors_7 = {28, 29, 30, 31, 32, 33, 34, 35};
std::vector<int64_t> neighbors_0 = {0};
std::vector<int64_t> neighbors_6 = {21, 22, 23, 24, 25, 26, 27};
neighbors_.push_back(neighbors_7);
neighbors_.push_back(neighbors_0);
neighbors_.push_back(neighbors_6);
for (int i = 0; i < 3; i++) {
for (int j = cumsum_sample_size[i];
j < cumsum_sample_size[i] + actual_sample_size[i];
j++) {
bool flag = false;
for (int k = 0; k < neighbors_[i].size(); k++) {
if (res[j] == neighbors_[i][k]) {
flag = true;
break;
}
}
ASSERT_EQ(flag, true);
}
}
delete[] res;
delete[] actual_sample_size;
delete[] cumsum_sample_size;
delete neighbor_sample_res;
}
| 8bae2b5a4b82f61d3a69e635bc364b2f0430fd0d.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using paddle::framework;
TEST(TEST_FLEET, graph_comm) {
int gpu_count = 3;
std::vector<int> dev_ids;
dev_ids.push_back(0);
dev_ids.push_back(1);
dev_ids.push_back(2);
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(dev_ids);
resource->enable_p2p();
GpuPsGraphTable g(resource);
int node_count = 10;
std::vector<std::vector<int64_t>> neighbors(node_count);
int ind = 0;
int64_t node_id = 0;
std::vector<GpuPsCommGraph> graph_list(gpu_count);
while (ind < node_count) {
int neighbor_size = ind + 1;
graph_list[ind % gpu_count].node_size++;
graph_list[ind % gpu_count].neighbor_size += neighbor_size;
while (neighbor_size--) {
neighbors[ind].push_back(node_id++);
}
ind++;
}
std::vector<int> neighbor_offset(gpu_count, 0), node_index(gpu_count, 0);
for (int i = 0; i < graph_list.size(); i++) {
graph_list[i].node_list = new uint64_t[graph_list[i].node_size];
graph_list[i].node_info_list = new GpuPsNodeInfo[graph_list[i].node_size];
graph_list[i].neighbor_list = new int64_t[graph_list[i].neighbor_size];
}
for (int i = 0; i < node_count; i++) {
ind = i % gpu_count;
graph_list[ind].node_list[node_index[ind]] = i;
graph_list[ind].node_info_list[node_index[ind]].neighbor_offset =
neighbor_offset[ind];
graph_list[ind].node_info_list[node_index[ind]].neighbor_size =
neighbors[i].size();
for (auto x : neighbors[i]) {
graph_list[ind].neighbor_list[neighbor_offset[ind]++] = x;
}
node_index[ind]++;
}
g.build_graph_from_cpu(graph_list);
/*
gpu 0:
0,3,6,9
gpu 1:
1,4,7
gpu 2:
2,5,8
query(2,6) returns nodes [6,9,1,4,7,2]
*/
int64_t answer[6] = {6, 9, 1, 4, 7, 2};
int64_t *res = new int64_t[6];
auto query_res = g.query_node_list(0, 2, 6);
cudaMemcpy(res, query_res->val, 48, cudaMemcpyDeviceToHost);
ASSERT_EQ(query_res->actual_sample_size, 6);
for (int i = 0; i < 6; i++) {
ASSERT_EQ(res[i], answer[i]);
}
delete[] res;
delete query_res;
/*
node x's neighbor list = [(1+x)*x/2,(1+x)*x/2 + 1,.....,(1+x)*x/2 + x]
so node 6's neighbors are [21,22...,27]
node 7's neighbors are [28,29,..35]
node 0's neighbors are [0]
query([7,0,6],sample_size=3) should return [28,29,30,0,x,x,21,22,23]
6 --index-->2
0 --index--->0
7 --index-->2
*/
int64_t cpu_key[3] = {7, 0, 6};
void *key;
cudaMalloc(reinterpret_cast<void **>(&key), 3 * sizeof(int64_t));
cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice);
auto neighbor_sample_res =
g.graph_neighbor_sample(0, reinterpret_cast<int64_t *>(key), 3, 3);
res = new int64_t[7];
cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost);
int *actual_sample_size = new int[3];
cudaMemcpy(actual_sample_size,
neighbor_sample_res->actual_sample_size,
12,
cudaMemcpyDeviceToHost); // 3, 1, 3
int *cumsum_sample_size = new int[3];
cudaMemcpy(cumsum_sample_size,
neighbor_sample_res->offset,
12,
cudaMemcpyDeviceToHost); // 0, 3, 4
std::vector<std::vector<int64_t>> neighbors_;
std::vector<int64_t> neighbors_7 = {28, 29, 30, 31, 32, 33, 34, 35};
std::vector<int64_t> neighbors_0 = {0};
std::vector<int64_t> neighbors_6 = {21, 22, 23, 24, 25, 26, 27};
neighbors_.push_back(neighbors_7);
neighbors_.push_back(neighbors_0);
neighbors_.push_back(neighbors_6);
for (int i = 0; i < 3; i++) {
for (int j = cumsum_sample_size[i];
j < cumsum_sample_size[i] + actual_sample_size[i];
j++) {
bool flag = false;
for (int k = 0; k < neighbors_[i].size(); k++) {
if (res[j] == neighbors_[i][k]) {
flag = true;
break;
}
}
ASSERT_EQ(flag, true);
}
}
delete[] res;
delete[] actual_sample_size;
delete[] cumsum_sample_size;
delete neighbor_sample_res;
}
|
2d7166de3dcde7a29695cd21740d0dda608709a0.hip | // !!! This is a file automatically generated by hipify!!!
#include <float.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int infTwoExp(int val)
{
int inf=1;
while(val>inf) inf<<=1;
return inf;
}
void getGPULayout(
int dim0,int dim1,int dim2,
int* bdim0,int* bdim1,int* bdim2,
int* tdim0,int* tdim1,int* tdim2
)
{
(*tdim2)=64;
if(dim2<(*tdim2)) (*tdim2)=infTwoExp(dim2);
(*bdim2)=dim2/(*tdim2);
if(dim2%(*tdim2)>0) (*bdim2)++;
(*tdim1)=1024/(*tdim2);
if(dim1<(*tdim1)) (*tdim1)=infTwoExp(dim1);
(*bdim1)=dim1/(*tdim1);
if(dim1%(*tdim1)>0) (*bdim1)++;
(*tdim0)=1024/((*tdim1)*(*tdim2));
if(dim0<(*tdim0)) (*tdim0)=infTwoExp(dim0);
(*bdim0)=dim0/(*tdim0);
if(dim0%(*tdim0)>0) (*bdim0)++;
}
__global__
void findNearestFeatureIdxKernel(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
int bi = threadIdx.x + blockIdx.x*blockDim.x;
int p2i = threadIdx.y + blockIdx.y*blockDim.y;
if(p2i>=pn2||bi>=b) return;
float* que_pt=&que_pts[bi*pn2*dim+p2i*dim];
float min_dist=FLT_MAX;
int min_idx=0;
for(int p1i=0;p1i<pn1;p1i++)
{
if(exclude_self&&p1i==p2i) continue;
float* ref_pt=&ref_pts[bi*pn1*dim+p1i*dim];
float dist=0.f;
for(int di=0;di<dim;di++)
dist+=(ref_pt[di]-que_pt[di])*(ref_pt[di]-que_pt[di]);
if(dist<min_dist)
{
min_dist=dist;
min_idx=p1i;
}
}
idxs[bi*pn2+p2i]=min_idx;
}
__global__
void findFirstAndSecondNearestFeatureIdxKernel(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2,2]
float* dists, // [b,pn2,2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
int bi = threadIdx.x + blockIdx.x*blockDim.x;
int p2i = threadIdx.y + blockIdx.y*blockDim.y;
if(p2i>=pn2||bi>=b) return;
float* que_pt=&que_pts[bi*pn2*dim+p2i*dim];
float min_dist=FLT_MAX,min_dist2=FLT_MAX;
int min_idx=0, min_idx2=0;
for(int p1i=0;p1i<pn1;p1i++)
{
if(exclude_self&&p1i==p2i) continue;
float* ref_pt=&ref_pts[bi*pn1*dim+p1i*dim];
float dist=0.f;
for(int di=0;di<dim;di++)
dist+=(ref_pt[di]-que_pt[di])*(ref_pt[di]-que_pt[di]);
if(dist<min_dist)
{
min_dist2=min_dist;
min_idx2=min_idx;
min_dist=dist;
min_idx=p1i;
}
else if(dist<min_dist2)
{
min_dist2=dist;
min_idx2=p1i;
}
}
idxs[bi*pn2*2+p2i*2]=min_idx;
idxs[bi*pn2*2+p2i*2+1]=min_idx2;
dists[bi*pn2*2+p2i*2]=min_dist;
dists[bi*pn2*2+p2i*2+1]=min_dist2;
}
#ifdef __cplusplus
extern "C" {
#endif
void findNearestPointIdxLauncher(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
float* ref_pts_dev,* que_pts_dev;
int* idxs_dev;
gpuErrchk(hipMalloc(&ref_pts_dev,b*pn1*sizeof(float)*dim))
gpuErrchk(hipMalloc(&que_pts_dev,b*pn2*sizeof(float)*dim))
gpuErrchk(hipMalloc(&idxs_dev,b*pn2*sizeof(int)))
gpuErrchk(hipMemcpy(ref_pts_dev,ref_pts,b*pn1*sizeof(float)*dim,hipMemcpyHostToDevice))
gpuErrchk(hipMemcpy(que_pts_dev,que_pts,b*pn2*sizeof(float)*dim,hipMemcpyHostToDevice))
int bdim0,bdim1,bdim2;
int tdim0,tdim1,tdim2;
getGPULayout(b,pn2,1,&bdim0,&bdim1,&bdim2,&tdim0,&tdim1,&tdim2);
dim3 bdim(bdim0,bdim1,bdim2);
dim3 tdim(tdim0,tdim1,tdim2);
hipLaunchKernelGGL(( findNearestFeatureIdxKernel), dim3(bdim),dim3(tdim), 0, 0, ref_pts_dev,que_pts_dev,idxs_dev,b,pn1,pn2,dim,exclude_self);
gpuErrchk(hipGetLastError())
gpuErrchk(hipMemcpy(idxs,idxs_dev,b*pn2*sizeof(int),hipMemcpyDeviceToHost))
gpuErrchk(hipFree(ref_pts_dev))
gpuErrchk(hipFree(que_pts_dev))
gpuErrchk(hipFree(idxs_dev))
}
void findFirstAndSecondNearestFeatureIdxLauncher(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2,2]
float* dists, // [b,pn2,2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
float* ref_pts_dev,* que_pts_dev;
int* idxs_dev;
float* dists_dev;
gpuErrchk(hipMalloc(&ref_pts_dev,b*pn1*sizeof(float)*dim))
gpuErrchk(hipMalloc(&que_pts_dev,b*pn2*sizeof(float)*dim))
gpuErrchk(hipMalloc(&idxs_dev,b*pn2*2*sizeof(int)))
gpuErrchk(hipMalloc(&dists_dev,b*pn2*2*sizeof(float)))
gpuErrchk(hipMemcpy(ref_pts_dev,ref_pts,b*pn1*sizeof(float)*dim,hipMemcpyHostToDevice))
gpuErrchk(hipMemcpy(que_pts_dev,que_pts,b*pn2*sizeof(float)*dim,hipMemcpyHostToDevice))
int bdim0,bdim1,bdim2;
int tdim0,tdim1,tdim2;
getGPULayout(b,pn2,1,&bdim0,&bdim1,&bdim2,&tdim0,&tdim1,&tdim2);
dim3 bdim(bdim0,bdim1,bdim2);
dim3 tdim(tdim0,tdim1,tdim2);
hipLaunchKernelGGL(( findFirstAndSecondNearestFeatureIdxKernel), dim3(bdim),dim3(tdim), 0, 0, ref_pts_dev,que_pts_dev,idxs_dev,
dists_dev,b,pn1,pn2,dim,exclude_self);
gpuErrchk(hipGetLastError())
gpuErrchk(hipMemcpy(idxs,idxs_dev,b*pn2*2*sizeof(int),hipMemcpyDeviceToHost))
gpuErrchk(hipMemcpy(dists,dists_dev,b*pn2*2*sizeof(float),hipMemcpyDeviceToHost))
gpuErrchk(hipFree(ref_pts_dev))
gpuErrchk(hipFree(que_pts_dev))
gpuErrchk(hipFree(idxs_dev))
gpuErrchk(hipFree(dists_dev))
}
#ifdef __cplusplus
}
#endif
| 2d7166de3dcde7a29695cd21740d0dda608709a0.cu | #include <float.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int infTwoExp(int val)
{
int inf=1;
while(val>inf) inf<<=1;
return inf;
}
void getGPULayout(
int dim0,int dim1,int dim2,
int* bdim0,int* bdim1,int* bdim2,
int* tdim0,int* tdim1,int* tdim2
)
{
(*tdim2)=64;
if(dim2<(*tdim2)) (*tdim2)=infTwoExp(dim2);
(*bdim2)=dim2/(*tdim2);
if(dim2%(*tdim2)>0) (*bdim2)++;
(*tdim1)=1024/(*tdim2);
if(dim1<(*tdim1)) (*tdim1)=infTwoExp(dim1);
(*bdim1)=dim1/(*tdim1);
if(dim1%(*tdim1)>0) (*bdim1)++;
(*tdim0)=1024/((*tdim1)*(*tdim2));
if(dim0<(*tdim0)) (*tdim0)=infTwoExp(dim0);
(*bdim0)=dim0/(*tdim0);
if(dim0%(*tdim0)>0) (*bdim0)++;
}
__global__
void findNearestFeatureIdxKernel(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
int bi = threadIdx.x + blockIdx.x*blockDim.x;
int p2i = threadIdx.y + blockIdx.y*blockDim.y;
if(p2i>=pn2||bi>=b) return;
float* que_pt=&que_pts[bi*pn2*dim+p2i*dim];
float min_dist=FLT_MAX;
int min_idx=0;
for(int p1i=0;p1i<pn1;p1i++)
{
if(exclude_self&&p1i==p2i) continue;
float* ref_pt=&ref_pts[bi*pn1*dim+p1i*dim];
float dist=0.f;
for(int di=0;di<dim;di++)
dist+=(ref_pt[di]-que_pt[di])*(ref_pt[di]-que_pt[di]);
if(dist<min_dist)
{
min_dist=dist;
min_idx=p1i;
}
}
idxs[bi*pn2+p2i]=min_idx;
}
__global__
void findFirstAndSecondNearestFeatureIdxKernel(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2,2]
float* dists, // [b,pn2,2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
int bi = threadIdx.x + blockIdx.x*blockDim.x;
int p2i = threadIdx.y + blockIdx.y*blockDim.y;
if(p2i>=pn2||bi>=b) return;
float* que_pt=&que_pts[bi*pn2*dim+p2i*dim];
float min_dist=FLT_MAX,min_dist2=FLT_MAX;
int min_idx=0, min_idx2=0;
for(int p1i=0;p1i<pn1;p1i++)
{
if(exclude_self&&p1i==p2i) continue;
float* ref_pt=&ref_pts[bi*pn1*dim+p1i*dim];
float dist=0.f;
for(int di=0;di<dim;di++)
dist+=(ref_pt[di]-que_pt[di])*(ref_pt[di]-que_pt[di]);
if(dist<min_dist)
{
min_dist2=min_dist;
min_idx2=min_idx;
min_dist=dist;
min_idx=p1i;
}
else if(dist<min_dist2)
{
min_dist2=dist;
min_idx2=p1i;
}
}
idxs[bi*pn2*2+p2i*2]=min_idx;
idxs[bi*pn2*2+p2i*2+1]=min_idx2;
dists[bi*pn2*2+p2i*2]=min_dist;
dists[bi*pn2*2+p2i*2+1]=min_dist2;
}
#ifdef __cplusplus
extern "C" {
#endif
void findNearestPointIdxLauncher(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
float* ref_pts_dev,* que_pts_dev;
int* idxs_dev;
gpuErrchk(cudaMalloc(&ref_pts_dev,b*pn1*sizeof(float)*dim))
gpuErrchk(cudaMalloc(&que_pts_dev,b*pn2*sizeof(float)*dim))
gpuErrchk(cudaMalloc(&idxs_dev,b*pn2*sizeof(int)))
gpuErrchk(cudaMemcpy(ref_pts_dev,ref_pts,b*pn1*sizeof(float)*dim,cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(que_pts_dev,que_pts,b*pn2*sizeof(float)*dim,cudaMemcpyHostToDevice))
int bdim0,bdim1,bdim2;
int tdim0,tdim1,tdim2;
getGPULayout(b,pn2,1,&bdim0,&bdim1,&bdim2,&tdim0,&tdim1,&tdim2);
dim3 bdim(bdim0,bdim1,bdim2);
dim3 tdim(tdim0,tdim1,tdim2);
findNearestFeatureIdxKernel<<<bdim,tdim>>>(ref_pts_dev,que_pts_dev,idxs_dev,b,pn1,pn2,dim,exclude_self);
gpuErrchk(cudaGetLastError())
gpuErrchk(cudaMemcpy(idxs,idxs_dev,b*pn2*sizeof(int),cudaMemcpyDeviceToHost))
gpuErrchk(cudaFree(ref_pts_dev))
gpuErrchk(cudaFree(que_pts_dev))
gpuErrchk(cudaFree(idxs_dev))
}
void findFirstAndSecondNearestFeatureIdxLauncher(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2,2]
float* dists, // [b,pn2,2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
float* ref_pts_dev,* que_pts_dev;
int* idxs_dev;
float* dists_dev;
gpuErrchk(cudaMalloc(&ref_pts_dev,b*pn1*sizeof(float)*dim))
gpuErrchk(cudaMalloc(&que_pts_dev,b*pn2*sizeof(float)*dim))
gpuErrchk(cudaMalloc(&idxs_dev,b*pn2*2*sizeof(int)))
gpuErrchk(cudaMalloc(&dists_dev,b*pn2*2*sizeof(float)))
gpuErrchk(cudaMemcpy(ref_pts_dev,ref_pts,b*pn1*sizeof(float)*dim,cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(que_pts_dev,que_pts,b*pn2*sizeof(float)*dim,cudaMemcpyHostToDevice))
int bdim0,bdim1,bdim2;
int tdim0,tdim1,tdim2;
getGPULayout(b,pn2,1,&bdim0,&bdim1,&bdim2,&tdim0,&tdim1,&tdim2);
dim3 bdim(bdim0,bdim1,bdim2);
dim3 tdim(tdim0,tdim1,tdim2);
findFirstAndSecondNearestFeatureIdxKernel<<<bdim,tdim>>>(ref_pts_dev,que_pts_dev,idxs_dev,
dists_dev,b,pn1,pn2,dim,exclude_self);
gpuErrchk(cudaGetLastError())
gpuErrchk(cudaMemcpy(idxs,idxs_dev,b*pn2*2*sizeof(int),cudaMemcpyDeviceToHost))
gpuErrchk(cudaMemcpy(dists,dists_dev,b*pn2*2*sizeof(float),cudaMemcpyDeviceToHost))
gpuErrchk(cudaFree(ref_pts_dev))
gpuErrchk(cudaFree(que_pts_dev))
gpuErrchk(cudaFree(idxs_dev))
gpuErrchk(cudaFree(dists_dev))
}
#ifdef __cplusplus
}
#endif
|
6783c3f54bd8469b5592590c94e330fd8f90bcf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************
* Sparse Auto-Encoder
* by
* David Klaus and Alex Welles
* EC527 Final Project
*
* Serial Implementation With Timing Code
*
* Compile with:
*
* nvcc -o cudaCHUNK sparseAutoencoder_CUDA_CHUNK.cu
*
* for rho_forwardProp2 variant (atomicAdd) use:
* nvcc -arch=sm_20 -o sparseAutoencoder_cuda sparseAutoencoder_cuda.cu
*
*******************************************************************/
#include <cstdio>//<stdio.h>
#include <cstdlib>//<stdio.h>
#include <time.h>
#include <math.h>
#include <string>
#include <sstream>
#include <fstream>
#include <iostream>
#include <string>
#include <numeric>
#include "cuPrintf.hip"
#include "cuPrintf_hip.cuh"
#define GIG 1000000000
#define CPG 2.527
#define OPTIONS 1
#define TOL 0.000001
#define PRINT_TIME 1
//Parameters necessary to set up network
#define PATCHES_PATH "c_patches.csv"//For DEBUG
#define W1_PATH "W1.csv"//For DEBUG
#define W2_PATH "W2.csv"//For DEBUG
#define IMAGE_DIM 512 //pixels in 1 dimension (assumes square)
#define SAMPLE_SIZE 10000 //number of input patches
#define FORWARD_CHUNK_SIZE 1024 //maximum size that thread in forward propagate can fit into contents of shared mem (estimate based on tests)
#define BACKWARD_CHUNK_SIZE 1024 //maximum size that thread in backward propagate can fit into contents of shared mem (estimate based on tests)
#define SPARSITY_CHUNK_SIZE 1024 // maximum size that thread in sparsity enforment block can fit into the contents of shared mem (estimate based on tests)
#define HIDDEN_LAYERS 1 //number hidden layers (NON-FUNCTIONAL)
#define NUM_SAMPLE_ELEMENTS SAMPLE_SIZE * visible_size
//desired average activation of hidden nodes
#define SPARSITY_PARAM 0.01
#define SPARSITY_COMPLEMENT 1-SPARSITY_PARAM
//weight decay paramater
#define LAMBDA 0.0001
//weight of sparsity penalty term
#define BETA 3.0
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
/***********************************
KERNEL VECTOR OPS
***********************************/
//http://stackoverflow.com/questions/14291233/confusion-about-cuda-partial-sum-codes-threadidx-x-blockdim-x-and-a-2
//https://code.google.com/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/sum_reduction.cu
__global__ void kernel_block_sum(const float *input, float *per_block_results, const size_t n)
{
extern __shared__ float sdata[];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(tid < n)
{
x = input[tid];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern --> div by 2 (may not be best)
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
/////////////////////////////////////////////////////////////////////////
// Chunked wave functions implementation
// all kernel calls broken into chunks. Alows for any size HIDDEN - VISIBLE - HIDDEN neural network
/////////////////////////////////////////////////////////////////////////
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(CHUNKED_SIZE,1,1);
__global__ void kernel_forwardProp_chunked(float *input, float* W1, float* b1, float* output, int whichProp, int COLUMN_SIZE)
{
//load shared memory with what you need
__shared__ float inputS[FORWARD_CHUNK_SIZE];
__shared__ float W1S[FORWARD_CHUNK_SIZE];
__shared__ float b1S;
b1S = b1[blockIdx.x];
float runningSum = 0.0;
int ii = 0;
for(; (ii + FORWARD_CHUNK_SIZE) < COLUMN_SIZE; ii+=FORWARD_CHUNK_SIZE) {
W1S[threadIdx.x] = W1[blockIdx.x * COLUMN_SIZE + threadIdx.x + ii];
inputS[threadIdx.x] = input[threadIdx.x + ii];
//multiplication
__syncthreads();
W1S[threadIdx.x] = W1S[threadIdx.x] * inputS[threadIdx.x];
__syncthreads();
//smart summation of chunk FORWARD_CHUNK_SIZE is always even.
for(int offset = FORWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W1S[threadIdx.x] += W1S[threadIdx.x + offset];
}
// wait until all threads in the block have updated partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W1S[0];
}
}
int remainder = (COLUMN_SIZE - ii);
if(threadIdx.x < remainder ) { //clean up slop
W1S[threadIdx.x] = W1[blockIdx.x * COLUMN_SIZE + threadIdx.x + ii];
inputS[threadIdx.x] = input[threadIdx.x + ii];
__syncthreads();
W1S[threadIdx.x] = W1S[threadIdx.x] * inputS[threadIdx.x];
__syncthreads();
//restrict remainder to even size unless only 1 element remains to be computed
if( (remainder%2 == 1) && (threadIdx.x == (remainder-1) ) && (remainder != 1)) {
W1S[threadIdx.x-1] += W1S[threadIdx.x];
}
__syncthreads();
//smart summation of remainder
for(int offset = remainder / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W1S[threadIdx.x] += W1S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W1S[0];
}
}
if( threadIdx.x == 0 ) //only needs to be done once
{
//add the bias vector value
runningSum += b1S;
//apply sigma function
runningSum = float(1/(1+exp(-runningSum)));
//set into output
if(whichProp == 0) { // this is rho forward propagation
output[blockIdx.x] += runningSum;
} else { // this is forward propagation 1 or 2
output[blockIdx.x] = runningSum;
}
}
}
//dim3 gridDim(1,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZE,1,1);
__global__ void kernel_backProp1_chunked(float* input, float* a3, float* d3, float* b2grad, float* cost, int visible_size, int hidden_size)
{
__shared__ float a3S[BACKWARD_CHUNK_SIZE];
__shared__ float b2gradS[BACKWARD_CHUNK_SIZE];
__shared__ float d3S[BACKWARD_CHUNK_SIZE];
__shared__ float inputS[BACKWARD_CHUNK_SIZE];
int ii = 0;
float runningSum = 0.0;
for(; (ii+BACKWARD_CHUNK_SIZE) < visible_size; ii+=BACKWARD_CHUNK_SIZE) { //move through chunks until slop
inputS[threadIdx.x] = input[threadIdx.x + ii];
b2gradS[threadIdx.x] = b2grad[threadIdx.x + ii];
a3S[threadIdx.x] = a3[threadIdx.x + ii];
//begin data updates
__syncthreads();
//d3 = -(xM - a3) .* (a3 .* (1 - a3));
d3S[threadIdx.x] = -(inputS[threadIdx.x] - a3S[threadIdx.x]) * (a3S[threadIdx.x] * (1-a3S[threadIdx.x]));
//update the gradient
//b2grad = b2grad + d3;
b2gradS[threadIdx.x] += d3S[threadIdx.x];
b2grad[threadIdx.x + ii] = b2gradS[threadIdx.x];
d3[threadIdx.x + ii] = d3S[threadIdx.x];
//cost = cost + norm(a3 - xM)^2;
a3S[threadIdx.x] -= inputS[threadIdx.x];
a3S[threadIdx.x] *= a3S[threadIdx.x];
__syncthreads();
for(int offset = BACKWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
a3S[threadIdx.x] += a3S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += a3S[0];
}
}
int remainder = (visible_size - ii);
if(threadIdx.x < remainder) {
inputS[threadIdx.x] = input[threadIdx.x + ii];
b2gradS[threadIdx.x] = b2grad[threadIdx.x + ii];
a3S[threadIdx.x] = a3[threadIdx.x + ii];
//begin data updates
__syncthreads();
//d3 = -(xM - a3) .* (a3 .* (1 - a3));
d3S[threadIdx.x] = -(inputS[threadIdx.x] - a3S[threadIdx.x]) * (a3S[threadIdx.x] * (1-a3S[threadIdx.x]));
//update the gradient
//b2grad = b2grad + d3;
b2gradS[threadIdx.x] += d3S[threadIdx.x];
b2grad[threadIdx.x + ii] = b2gradS[threadIdx.x];
d3[threadIdx.x + ii] = d3S[threadIdx.x];
//cost = cost + norm(a3 - xM)^2;
a3S[threadIdx.x] -= inputS[threadIdx.x];
a3S[threadIdx.x] *= a3S[threadIdx.x];
__syncthreads();
//restrict remainder to even size
if( (remainder%2 == 1) && (threadIdx.x == (remainder-1) ) && (remainder != 1)) {
a3S[threadIdx.x-1] += a3S[threadIdx.x];
}
for(int offset = remainder / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
a3S[threadIdx.x] += a3S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += a3S[0];
}
}
if( threadIdx.x == 0 )
{
cost[0] = runningSum;
}
}
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZE,1,1);
__global__ void kernel_backProp2_chunked(float* W2, float* d2, float* d3, int visible_size, int hidden_size)
{
__shared__ float d3S[BACKWARD_CHUNK_SIZE];
__shared__ float W2S[BACKWARD_CHUNK_SIZE];
d3S[threadIdx.x] = d3[threadIdx.x];
int ii = 0;
float runningSum = 0.0;
for(; ii < visible_size; ii+=BACKWARD_CHUNK_SIZE) {
//here we are loading the transpose into memory hence the somewhat odd looking indexing
W2S[threadIdx.x] = W2[threadIdx.x * gridDim.x + blockIdx.x + ii];
d3S[threadIdx.x] = d3[threadIdx.x + ii];
//perform operations
__syncthreads();
W2S[threadIdx.x] *= d3S[threadIdx.x];
__syncthreads();
for(int offset = BACKWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W2S[threadIdx.x] += W2S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W2S[0];
}
}
int remainder = (visible_size - ii);
if(threadIdx.x < remainder) {
//here we are loading the transpose into memory hence the somewhat odd looking indexing
W2S[threadIdx.x] = W2[threadIdx.x * gridDim.x + blockIdx.x + ii];
d3S[threadIdx.x] = d3[threadIdx.x + ii];
//perform operations
__syncthreads();
W2S[threadIdx.x] *= d3S[threadIdx.x];
__syncthreads();
//restrict remainder to even size
if( (remainder%2 == 1) && (threadIdx.x == (remainder-1) ) && (remainder != 1)) {
W2S[threadIdx.x-1] += W2S[threadIdx.x];
}
for(int offset = BACKWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W2S[threadIdx.x] += W2S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W2S[0];
}
}
if(threadIdx.x == 0)
{
//storing the interim value of d2 in d_d2 for next kernel call.
//d2 = (W2' * d3)
d2[blockIdx.x] = runningSum;
}
}
//dim3 gridDim(1,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZEE,1,1);
__global__ void kernel_backProp3_chunked(float* a2, float* d2, float* rhoHat, float* b1grad, int visible_size, int hidden_size)
{
__shared__ float d2S[BACKWARD_CHUNK_SIZE];
__shared__ float rhoHatS[BACKWARD_CHUNK_SIZE];
__shared__ float a2S[BACKWARD_CHUNK_SIZE];
__shared__ float b1gradS[BACKWARD_CHUNK_SIZE];
int ii = 0;
for(; (ii + BACKWARD_CHUNK_SIZE) < hidden_size; ii+=BACKWARD_CHUNK_SIZE) {
b1gradS[threadIdx.x] = b1grad[threadIdx.x +ii];
d2S[threadIdx.x] = d2[threadIdx.x + ii];
rhoHatS[threadIdx.x] = rhoHat[threadIdx.x + ii];
a2S[threadIdx.x] = a2[threadIdx.x + ii];
__syncthreads();
//calculate d2
d2S[threadIdx.x] = (d2S[threadIdx.x] + BETA * (-(SPARSITY_PARAM/rhoHatS[threadIdx.x]))
+ (1-SPARSITY_PARAM)/(1-rhoHatS[threadIdx.x])) * (a2S[threadIdx.x]
* (1 - a2S[threadIdx.x]));
d2[threadIdx.x + ii] = d2S[threadIdx.x];
//update b1 gradient
b1gradS[threadIdx.x] += d2S[threadIdx.x];
b1grad[threadIdx.x + ii] = b1gradS[threadIdx.x];
__syncthreads();
}
int remainder = (hidden_size - ii);
if(threadIdx.x < remainder) { //clean up slop
b1gradS[threadIdx.x] = b1grad[threadIdx.x +ii];
d2S[threadIdx.x] = d2[threadIdx.x + ii];
rhoHatS[threadIdx.x] = rhoHat[threadIdx.x + ii];
a2S[threadIdx.x] = a2[threadIdx.x + ii];
__syncthreads();
//calculate d2
d2S[threadIdx.x] = (d2S[threadIdx.x] + BETA * (-(SPARSITY_PARAM/rhoHatS[threadIdx.x]))
+ (1-SPARSITY_PARAM)/(1-rhoHatS[threadIdx.x])) * (a2S[threadIdx.x]
* (1 - a2S[threadIdx.x]));
d2[threadIdx.x + ii] = d2S[threadIdx.x];
//update b1 gradient
b1gradS[threadIdx.x] += d2S[threadIdx.x];
b1grad[threadIdx.x + ii] = b1gradS[threadIdx.x];
__syncthreads();
}
}
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZEE,1,1);
__global__ void kernel_backProp4_chunked(float* input, float* a2, float* d2, float* d3, float* W1grad, float* W2grad, int visible_size, int hidden_size)
{
__shared__ float d3S[BACKWARD_CHUNK_SIZE];
__shared__ float a2S;
__shared__ float d2S;
__shared__ float W2gradS[BACKWARD_CHUNK_SIZE];
__shared__ float W1gradS[BACKWARD_CHUNK_SIZE];
__shared__ float inputS[BACKWARD_CHUNK_SIZE];
//function not completed yet
if(threadIdx.x == 0)
{
a2S = a2[blockIdx.x];
d2S = d2[blockIdx.x];
}
int ii = 0.0;
for(; ii < visible_size; ii+=BACKWARD_CHUNK_SIZE) {
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x]; //W2gradS is read in transposed
d3S[threadIdx.x] = d3[threadIdx.x];
inputS[threadIdx.x] = input[threadIdx.x];
__syncthreads(); //finished initializing variables for this loop
//W2grad = W2grad + d3 * a2';
W2gradS[threadIdx.x] += a2S * d3S[threadIdx.x];
W2grad[threadIdx.x + ii] = W2gradS[threadIdx.x];
//W1grad = W1grad + d2 * xM';
W1gradS[threadIdx.x] += d2S * inputS[threadIdx.x];
W1grad[threadIdx.x + ii] = W1gradS[threadIdx.x];
__syncthreads();
}
int remainder = (visible_size - ii);
if( threadIdx.x < remainder) { //clean up slop
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x]; //W2gradS is read in transposed
d3S[threadIdx.x] = d3[threadIdx.x];
inputS[threadIdx.x] = input[threadIdx.x];
__syncthreads(); //finished initializing variables for last iteration
//W2grad = W2grad + d3 * a2';
W2gradS[threadIdx.x] += a2S * d3S[threadIdx.x];
W2grad[threadIdx.x + ii] = W2gradS[threadIdx.x];
//W1grad = W1grad + d2 * xM';
W1gradS[threadIdx.x] += d2S * inputS[threadIdx.x];
W1grad[threadIdx.x + ii] = W1gradS[threadIdx.x];
__syncthreads();
}
}
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(visible_size,1,1);
__global__ void kernel_sparsityEnforcement_chunked(float* W1, float* W2, float* W1grad, float* W2grad, int visible_size, int hidden_size)
{
__shared__ float W1gradS[SPARSITY_CHUNK_SIZE];
__shared__ float W2gradS[SPARSITY_CHUNK_SIZE];
__shared__ float W1S[SPARSITY_CHUNK_SIZE];
__shared__ float W2S[SPARSITY_CHUNK_SIZE];
int ii = 0;
for(; (ii + SPARSITY_CHUNK_SIZE) < visible_size; ii+=SPARSITY_CHUNK_SIZE) {
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W1S[threadIdx.x] = W1[blockIdx.x * visible_size + threadIdx.x + ii];
//read W2 in as transpose
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x];
W2S[threadIdx.x] = W2[(threadIdx.x + ii) * hidden_size + blockIdx.x];
__syncthreads();
//W1grad = W1grad ./ M + lambda .* W1;
W1gradS[threadIdx.x] = W1gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W1S[threadIdx.x];
W2gradS[threadIdx.x] = W2gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W2S[threadIdx.x];
//W2grad = W2grad ./ M + lambda .* W2;
W1grad[blockIdx.x * blockDim.x + threadIdx.x + ii] = W1gradS[threadIdx.x];
W2grad[(threadIdx.x + ii) * gridDim.x + blockIdx.x] = W2gradS[threadIdx.x];
__syncthreads(); //David's note not sure if this sync threads is necessary. Double check.
}
int remainder = (visible_size - ii);
if(threadIdx.x < remainder) {
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W1S[threadIdx.x] = W1[blockIdx.x * visible_size + threadIdx.x + ii];
//read W2 in as transpose
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x];
W2S[threadIdx.x] = W2[(threadIdx.x + ii) * hidden_size + blockIdx.x];
__syncthreads();
//W1grad = W1grad ./ M + lambda .* W1;
W1gradS[threadIdx.x] = W1gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W1S[threadIdx.x];
W2gradS[threadIdx.x] = W2gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W2S[threadIdx.x];
//W2grad = W2grad ./ M + lambda .* W2;
W1grad[blockIdx.x * blockDim.x + threadIdx.x + ii] = W1gradS[threadIdx.x];
W2grad[(threadIdx.x + ii) * gridDim.x + blockIdx.x] = W2gradS[threadIdx.x];
}
}
/**********************************
SERIAL VECTOR OPS
***********************************/
void initializeMatrixWeightsRand(float *arr, int rows, int cols, int seed);
void initializeMatrixWeightsZero(float *arr, int rows, int cols);
void initializeVectorWeightsZero(float *arr, int numElements);
void mmm_kij(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2);
void mmm_ijk(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2);
void dotPdt(float* src1,float* src2, float* dest, int length);
void readCSV(float* array, int numElements, string filename);
void addVectors(float* src1, float* src2, float* dest, int length);
void subVectors(float* src1, float* src2, float* dest, int length);
void vectElemSigmoid(float* src,float* dest,int length);
void vectElemIntDiv(float* src, float* dest,int length,int divisor);
void vectElemFloatDiv(float* src, float* dest,int length,float divisor);
void vectElemVectDiv(float* src1,float* src2,float* dest,int length);
void initializeVector(float *array, int length, float val);
void vectElemVectMult(float* src1, float* src2, float* dest, int length);
void vectElemFloatMult(float* src, float* dest, int length,float multiplicand);
void matrixTranspose(float* src,float* dest,int rows, int cols);
float normVector(float* src,int length);
void vectElemLog(float* src,float* dest,int length);
float sumVector(float* src,int length);
/* PRINTOUT, DEBUG, AND TIMING FUNCTIONS */
void printVector(float* A, int length);
void printMatrix(float* A, int rows, int cols);
void printTiming(struct timespec* time_stamp,int numTimings);
int main(int argc, char *argv[])
{
int visible_size;
int hidden_size;
sscanf (argv[1],"%d",&visible_size);
sscanf (argv[2],"%d",&hidden_size);
/***********************************
TIMING STUFF
***********************************/
//CPU
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2;
struct timespec time_stamp[OPTIONS];//Can be increased if necessary.
// GPU Timing variables
hipEvent_t start, stop;
float elapsed_gpu;
/***********************************
ALLOCATE HOST MEMORY
***********************************/
//Arrays on host memory (CPU)
//input patches to train the autoencoder
float *h_inputs;// 64 x 10000 [visible x sample]
//sparsity vector
float *h_rhoHat;//hidden x 1 [25 x 1]
//weight matrices
float *h_W1;//hidden X visible [25 x 64]
float *h_W2;//visible X hidden [64 x 25]
//weight vectors
float *h_b1;//hidden X 1 [25 x 1]
float *h_b2;//visible X 1 [64 x 1]
//weight gradient matrices
float *h_W1grad;//hidden x visible [25 x 64]
float *h_W2grad;//visible x hidden [64 x 25]
//weight gradient vectors
float *h_b1grad;//hidden x 1 [25 x 1]
float *h_b2grad;//visible x 1 [64 x 1]
//z product vectors
float *h_z2;//hidden x 1 [25 x 1]
float *h_z3;//visible x 1 [64 x 1]
//a product vectors
float *h_a2;//hidden x 1 [25 x 1]
float *h_a3;//visible x 1 [64 x 1]
//partial derivatives for back prop
float *h_d2;//hidden x 1 [25 x 1]
float *h_d3;//visible x 1 [64 x 1]
//temp vectors: both are 64 elements but will not always be used
float *h_temp1;//64 x 1
float *h_temp2;//64 x1
//temp matrix
float *h_Wtemp1;//64 x 25 or 25 x 64
float *h_Wtemp2;//25 x 64 or 64 x 25
//sparsity penalty
float *h_sparsePen;//25x1
float *h_cost;
//Allocate input patches on host memory (CPU)
size_t allocSize = visible_size * SAMPLE_SIZE * sizeof(float);
h_inputs = (float *) malloc(allocSize);
//Allocate sparsity vector on host memory (CPU)
allocSize = hidden_size * sizeof(float);
h_rhoHat = (float *) malloc(allocSize);
//Alocate weight arrays on host memory (CPU)
allocSize = visible_size * hidden_size * sizeof(float);
h_W1 = (float *) malloc(allocSize);
h_W2 = (float *) malloc(allocSize);
//Alocate gradient arrays on host memory (CPU)
allocSize = visible_size * hidden_size * sizeof(float);
h_W1grad = (float *) malloc(allocSize);
h_W2grad = (float *) malloc(allocSize);
//Allocate weight vectors on host memory (CPU)
allocSize = hidden_size * sizeof(float);
h_b1 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_b2 = (float *) malloc(allocSize);
//Allocate weight vectors on host memory (CPU)
allocSize = hidden_size * sizeof(float);
h_b1grad = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_b2grad = (float *) malloc(allocSize);
//Allocate z product vectors (CPU)
allocSize = hidden_size * sizeof(float);
h_z2 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_z3 = (float *) malloc(allocSize);
//Allocate a product vectors (CPU)
allocSize = hidden_size * sizeof(float);
h_a2 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_a3 = (float *) malloc(allocSize);
//Allocate partial vectors (CPU)
allocSize = hidden_size * sizeof(float);
h_d2 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_d3 = (float *) malloc(allocSize);
//Allocate temp vectors (CPU)
allocSize = visible_size * sizeof(float);
h_temp1 = (float *) malloc(allocSize);
h_temp2 = (float *) malloc(allocSize);
//Allocate temp matrix (CPU)
allocSize = visible_size * hidden_size * sizeof(float);
h_Wtemp1 = (float *) malloc(allocSize);
h_Wtemp2 = (float *) malloc(allocSize);
//Allocate sparsity penalty vector (CPU)
allocSize = hidden_size * sizeof(float);
h_sparsePen = (float *) malloc(allocSize);
allocSize = sizeof(float);
h_cost = (float *) malloc(allocSize);
/***********************************
ALLOCATE DEVICE MEMORY
***********************************/
//input patches to train the autoencoder
float *d_inputs;// 64 x 10000 [visible x sample]
//sparsity vector
float *d_rhoHat;//hidden x 1 [25 x 1]
//weight matrices
float *d_W1;//hidden X visible [25 x 64]
float *d_W2;//visible X hidden [64 x 25]
//weight vectors
float *d_b1;//hidden X 1 [25 x 1]
float *d_b2;//visible X 1 [64 x 1]
//weight gradient matrices
float *d_W1grad;//hidden x visible [25 x 64]
float *d_W2grad;//visible x hidden [64 x 25]
//weight gradient vectors
float *d_b1grad;//hidden x 1 [25 x 1]
float *d_b2grad;//visible x 1 [64 x 1]
//a product vectors
float *d_a2;//hidden x 1 [25 x 1]
float *d_a3;//visible x 1 [64 x 1]
//partial derivatives for back prop
float *d_d2;//hidden x 1 [25 x 1]
float *d_d3;//visible x 1 [64 x 1]
//sparsity penalty
float *d_sparsePen;//25x1
float *d_cost;//1 lonely float
//Allocate input patches on device memory (GPU)
allocSize = visible_size * SAMPLE_SIZE * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_inputs,allocSize));
//Allocate sparsity vector on device memory (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_rhoHat,allocSize));
//Alocate weight arrays on device memory (GPU)
allocSize = visible_size * hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_W1,allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_W2,allocSize));
//Alocate gradient arrays on device memory (GPU)
allocSize = visible_size * hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_W1grad,allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_W2grad,allocSize));
//Allocate weight vectors on device memory (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_b1,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_b2,allocSize));
//Allocate weight vectors on device memory (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_b1grad,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_b2grad,allocSize));
//Allocate a product vectors (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_a2,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_a3,allocSize));
//Allocate partial vectors (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_d2,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_d3,allocSize));
//Allocate sparsity penalty vector (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_sparsePen,allocSize));
//Allocate cost (GPU)
allocSize = sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_cost,allocSize));
/***********************************
INITIALIZE NETWORK WEIGHTS
***********************************/
//Initialize the weight matrices to random values
//initializeMatrixWeightsRand(h_inputs, visible_size, SAMPLE_SIZE, 2254);
initializeMatrixWeightsRand(h_W1, hidden_size, visible_size, 2254);
initializeMatrixWeightsRand(h_W2, visible_size, hidden_size, 1345);
initializeMatrixWeightsZero(h_W2grad,visible_size,hidden_size);
initializeMatrixWeightsZero(h_W1grad,hidden_size,visible_size);
initializeVectorWeightsZero(h_b1, hidden_size);
initializeVectorWeightsZero(h_b2, visible_size);
initializeVectorWeightsZero(h_rhoHat, hidden_size);
initializeVectorWeightsZero(h_z2, hidden_size);
initializeVectorWeightsZero(h_a2, hidden_size);
initializeVectorWeightsZero(h_z3, visible_size);
initializeVectorWeightsZero(h_a3, visible_size);
/***********************************
READ IN SAMPLE PATCHES
***********************************/
readCSV(h_inputs, NUM_SAMPLE_ELEMENTS, PATCHES_PATH);
//the following are for debug only
readCSV(h_W1, hidden_size*visible_size, W1_PATH);
readCSV(h_W2, hidden_size*visible_size, W2_PATH);
/***************************************
BEGIN CUDA TIMING
****************************************/
#if PRINT_TIME
// Create the cuda events
hipEventCreate(&start);
hipEventCreate(&stop);
// Record event on the default stream
hipEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
allocSize = visible_size * SAMPLE_SIZE * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(d_inputs,h_inputs, allocSize, hipMemcpyHostToDevice));
allocSize = visible_size * hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(d_W1,h_W1, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_W2,h_W2, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_W1grad,h_W1grad, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_W2grad,h_W2grad, allocSize, hipMemcpyHostToDevice));
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(d_rhoHat,h_rhoHat, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_b1,h_b1, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_b1grad,h_b1grad, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_a2,h_a2, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_d2,h_d2, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_sparsePen,h_sparsePen, allocSize, hipMemcpyHostToDevice));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(d_b2,h_b2, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_b2grad,h_b2grad, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_a3,h_a3, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_d3,h_d3, allocSize, hipMemcpyHostToDevice));
//cout << "CPU Inputs" <<endl;//DEBUG
//printVector(h_inputs, hidden_size);//DEBUG
//cout <<"CPU W1" << endl;//DEBUG
//printVector(h_W1,hidden_size);//DEBUG
//**************************************
// kernal_rho_forwardProp_chunked
//**************************************
dim3 gridDim1(hidden_size,1,1);
dim3 chunkBlockDim(FORWARD_CHUNK_SIZE,1,1);
dim3 blockDim1(visible_size,1,1);
for(int i = 0;i < NUM_SAMPLE_ELEMENTS; i+= visible_size)
{
hipLaunchKernelGGL(( kernel_forwardProp_chunked), dim3(gridDim1), dim3(chunkBlockDim), 0, 0, &d_inputs[i], d_W1, d_b1, d_rhoHat, 0, visible_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
}
//***************************************
// kernel_rho_forwardProp1
//***************************************
/*
dim3 gridDim1(hidden_size,1,1);
dim3 blockDim1(visible_size,1,1);
for(int i = 0;i < NUM_SAMPLE_ELEMENTS; i+= visible_size)
{
kernel_rho_forwardProp<<<gridDim1, blockDim1>>>(&d_inputs[i], d_W1, d_b1, d_rhoHat);
CUDA_SAFE_CALL(hipPeekAtLastError());
}*/
//average rhoHat
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(h_rhoHat,d_rhoHat, allocSize, hipMemcpyDeviceToHost));
vectElemFloatDiv(h_rhoHat, h_rhoHat, hidden_size, SAMPLE_SIZE);
CUDA_SAFE_CALL(hipMemcpy(d_rhoHat,h_rhoHat, allocSize, hipMemcpyHostToDevice));
cout <<"GPU rhoHat" << endl;//DEBUG
printVector(h_rhoHat,1);//DEBUG
//printVector(h_rhoHat,hidden_size);//DEBUG
//allocSize = visible_size * sizeof(float);
//CUDA_SAFE_CALL(hipMemcpy(h_inputs,d_inputs, allocSize, hipMemcpyDeviceToHost));
//cout <<"GPU Inputs" << endl;//DEBUG
//printVector(h_inputs,hidden_size);//DEBUG
//allocSize = visible_size * hidden_size * sizeof(float);
//CUDA_SAFE_CALL(hipMemcpy(h_W1,d_W1, allocSize, hipMemcpyDeviceToHost));
//cout <<"GPU W1" << endl;//DEBUG
//printVector(h_W1,hidden_size);//DEBUG
dim3 gridDim2(visible_size,1,1);
dim3 blockDim2(hidden_size,1,1);
dim3 gridDim3(1,1,1);
dim3 blockDim3(visible_size,1,1);
dim3 gridDim4(hidden_size,1,1);
dim3 blockDim4(visible_size,1,1);
dim3 gridDim5(1,1,1);
dim3 blockDim5(hidden_size,1,1);
dim3 gridDim6(hidden_size,1,1);
dim3 blockDim6(visible_size,1,1);
dim3 gridDim7(hidden_size,1,1);
dim3 blockDim7(visible_size,1,1);
for(int i = 0;i < NUM_SAMPLE_ELEMENTS; i+= visible_size)
{
//***************************************
// FORWARD PROPAGATION a(1) --> a(2)
//***************************************
//kernel_forwardProp1<<<gridDim1, blockDim1>>>(&d_inputs[i], d_W1, d_b1, d_a2);
//CUDA_SAFE_CALL(hipPeekAtLastError());
hipLaunchKernelGGL(( kernel_forwardProp_chunked), dim3(gridDim1), dim3(chunkBlockDim), 0, 0, &d_inputs[i], d_W1, d_b1, d_a2,1,visible_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
//***************************************
// FORWARD PROPAGATION a(2) --> a(3)
//***************************************
//kernel_forwardProp2<<<gridDim2, blockDim2>>>(&d_inputs[i], d_W2, d_b2, d_a3);
//CUDA_SAFE_CALL(hipPeekAtLastError());
hipLaunchKernelGGL(( kernel_forwardProp_chunked), dim3(gridDim1), dim3(chunkBlockDim), 0, 0, &d_inputs[i], d_W1, d_b1, d_a2,2,hidden_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
//***************************************
// BACK PROPAGATION d(3) --> d(2)
//***************************************
//kernel_backProp1<<<gridDim3, blockDim3>>>(&d_inputs[i], d_a3, d_d3, d_b2grad, d_cost);
//CUDA_SAFE_CALL(hipPeekAtLastError());
hipLaunchKernelGGL(( kernel_backProp1_chunked), dim3(gridDim3), dim3(chunkBlockDim), 0, 0, &d_inputs[i], d_a3, d_d3, d_b2grad, d_cost, visible_size, hidden_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
//***************************************
// BACK PROPAGATION d(2) --> input
//***************************************
//kernel_backProp2<<<gridDim4, blockDim4>>>(d_W2, d_d2, d_d3);
//CUDA_SAFE_CALL(hipPeekAtLastError());
hipLaunchKernelGGL(( kernel_backProp2_chunked), dim3(gridDim4), dim3(chunkBlockDim), 0, 0, d_W2, d_d2, d_d3, visible_size, hidden_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
//kernel_backProp3<<<gridDim5, blockDim5>>>(d_a2, d_d2, d_rhoHat, d_b1grad);
//CUDA_SAFE_CALL(hipPeekAtLastError());
hipLaunchKernelGGL(( kernel_backProp3_chunked), dim3(gridDim5), dim3(chunkBlockDim), 0, 0, d_a2, d_d2, d_rhoHat, d_b1grad, visible_size, hidden_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
//kernel_backProp4<<<gridDim6, blockDim6>>>(&d_inputs[i], d_a2, d_d2, d_d3, d_W1grad, d_W2grad);
//CUDA_SAFE_CALL(hipPeekAtLastError());
hipLaunchKernelGGL(( kernel_backProp4_chunked), dim3(gridDim6), dim3(chunkBlockDim), 0, 0, &d_inputs[i], d_a2, d_d2, d_d3, d_W1grad, d_W2grad, visible_size, hidden_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
}
//kernel_sparsityEnforcement<<<gridDim7, blockDim7>>>(d_W1, d_W2, d_W1grad, d_W2grad);
//CUDA_SAFE_CALL(hipPeekAtLastError());
hipLaunchKernelGGL(( kernel_sparsityEnforcement_chunked), dim3(gridDim7), dim3(chunkBlockDim), 0, 0, d_W1, d_W2, d_W1grad, d_W2grad, visible_size, hidden_size);
CUDA_SAFE_CALL(hipPeekAtLastError());
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(h_a2,d_a2, allocSize, hipMemcpyDeviceToHost));
cout <<"GPU a2" << endl;//DEBUG
printVector(h_a2,1);
//printVector(h_a2,hidden_size);
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(h_a3,d_a3, allocSize, hipMemcpyDeviceToHost));
cout <<"GPU a3" << endl;//DEBUG
printVector(h_a3,1);
//printVector(h_a3,visible_size);
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(hipMemcpy(h_d3,d_d3, allocSize, hipMemcpyDeviceToHost));
cout << "GPU d3" << endl;//DEBUG
printVector(h_d3,1);//DEBUG
//printVector(h_d3,visible_size);//DEBUG
#if PRINT_TIME
// Stop and destroy the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (sec)\n", elapsed_gpu*1000000/GIG);
hipEventDestroy(start);
hipEventDestroy(stop);
#endif
/***************************************
FREEING HOST MEMORY
****************************************/
free(h_inputs);
free(h_rhoHat);
free(h_W1);
free(h_W2);
free(h_b1);
free(h_b2);
free(h_W1grad);
free(h_W2grad);
free(h_b1grad);
free(h_b2grad);
free(h_z2);
free(h_z3);
free(h_a2);
free(h_a3);
free(h_d2);
free(h_d3);
free(h_temp1);
free(h_temp2);
free(h_Wtemp1);
free(h_Wtemp2);
free(h_sparsePen);
free(h_cost);
/***************************************
FREEING DEVICE MEMORY
****************************************/
CUDA_SAFE_CALL(hipFree(d_inputs));
CUDA_SAFE_CALL(hipFree(d_rhoHat));
CUDA_SAFE_CALL(hipFree(d_W1));
CUDA_SAFE_CALL(hipFree(d_W2));
CUDA_SAFE_CALL(hipFree(d_b1));
CUDA_SAFE_CALL(hipFree(d_b2));
CUDA_SAFE_CALL(hipFree(d_W1grad));
CUDA_SAFE_CALL(hipFree(d_W2grad));
CUDA_SAFE_CALL(hipFree(d_b1grad));
CUDA_SAFE_CALL(hipFree(d_b2grad));
CUDA_SAFE_CALL(hipFree(d_a2));
CUDA_SAFE_CALL(hipFree(d_a3));
CUDA_SAFE_CALL(hipFree(d_d2));
CUDA_SAFE_CALL(hipFree(d_d3));
CUDA_SAFE_CALL(hipFree(d_sparsePen));
CUDA_SAFE_CALL(hipFree(d_cost));
return 0;
}
/***********************************************
TIMING FUNCTIONS AND STRUCTS
***********************************************/
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0)
{
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
}
else
{
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
void printTiming(struct timespec* time_stamp,int numTimings)
{
for (int j = 0; j < numTimings; j++)
{
if (j != 0) printf(", ");
printf("\nCPU time: %f (sec)", ((double)(CPG)*(double)
(GIG * time_stamp[j].tv_sec + time_stamp[j].tv_nsec)/GIG));
}
printf("\n");
}
/***********************************************
NAIVE VECTOR OPERATIONS
***********************************************/
float sumVector(float* src,int length)
{
float sum = 0;
for(int i = 0;i < length;i++)
{
sum += src[i];
}
return sum;
}
void vectElemLog(float* src,float* dest,int length)
{
for(int i = 0;i < length;i++)
{
dest[i] = log(src[i]);
}
}
float normVector(float* src,int length)
{
float x = 0;
float sum = 0;
for(int i = 0;i < length;i++)
{
x = src[i];
sum += x*x;
}
sum = sqrt(sum);
return sum;
}
void vectElemSigmoid(float* src,float* dest,int length)
{
for(int i = 0; i < length;i++)
{
dest[i] = float(1/(1+exp(-src[i])));
}
}
void vectElemVectMult(float* src1, float* src2, float* dest, int length)
{
for(int i = 0; i < length;i++)
{
dest[i] = src1[i] * src2[i];
}
}
//faster if float is used instead?
void vectElemIntDiv(float* src, float* dest,int length,int divisor)
{
for(int i = 0;i < length;i++)
{
dest[i] = float(src[i]/divisor);
}
}
void vectElemFloatDiv(float* src, float* dest,int length,float divisor)
{
for(int i = 0;i < length;i++)
{
dest[i] = float(src[i]/divisor);
}
}
void vectElemFloatMult(float* src, float* dest, int length,float multiplicand)
{
for(int i = 0;i < length;i++)
{
dest[i] = src[i] * multiplicand;
}
}
void vectElemVectDiv(float* src1,float* src2,float* dest,int length)
{
for(int i = 0;i < length;i++)
{
dest[i] = (src1[i]/src2[i]);
}
}
//Just for debugging eh?
void printVector(float* A, int length)
{
for(int i = 0;i < length; i++)
{
cout << A[i] << endl;
}
}
void initializeVector(float *array, int length, float val)
{
for(int i = 0; i < length; i++)
{
array[i] = val;
}
}
//Just for debugging eh?
void printMatrix(float* A, int rows, int cols)
{
for(int i = 0;i < rows; i++)
{
for(int j = 0;j < cols;j++)
{
cout << A[i*rows+j] << "\t";
}
cout << endl;
}
}
void addVectors(float* src1, float* src2, float* dest, int length)
{
for(int i = 0;i < length; i++)
{
dest[i] = src1[i] + src2[i];
}
}
void subVectors(float* src1, float* src2, float* dest, int length)
{
for(int i = 0;i < length;i++)
{
dest[i] = src1[i] - src2[i];
}
}
void dotPdt(float* src1,float* src2, float *dest, int length)
{
float accum = 0;
for(int i = 0; i< length;i++)
{
accum += src1[i] * src2[i];
}
*dest = accum;
//cout << accum << endl;//DEBUG
}
void matrixTranspose(float* src,float* dest,int rows,int cols)
{
for(int i = 0;i < rows;i++)
{
for(int j = 0;j < cols;j++)
{
//cout << src[i*rows+j] << "I: " << i << "J: " << j << endl;//DEBUG
dest[j*rows+i] = src[i*cols+j];
}
}
}
void initializeMatrixWeightsRand(float *arr, int rows, int cols, int seed)
{
int i;
float randNum, r;
srand(seed);
//rows and cols depend on hidden and visible sizes
int numElements = rows*cols;
for (i = 0; i < numElements; i++)
{
//Choose weights uniformly from the interval [-r, r]
r = sqrt(6) / sqrt(rows+cols+1);
randNum = float(rand()%10000)/10000;
randNum = randNum * 2 * r - r;
arr[i] = randNum;
}
}
void initializeMatrixWeightsZero(float *arr, int rows, int cols)
{
//rows and cols depend on hidden and visible sizes
int numElements = rows*cols;
for (int i = 0; i < numElements; i++)
{
arr[i] = 0.0;
}
}
//initialize the vector weights to 0
void initializeVectorWeightsZero(float *arr, int numElements)
{
int i;
for (i = 0; i < numElements; i++)
{
arr[i] = 0;
}
}
/* mmm kij */ //BROKEN CURRENTLY
void mmm_kij(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2)
{
float r = 0;
for (int k = 0; k < row2; k++)
{
for (int i = 0; i < row1; i++)
{
r = src1[i*col1+k];
for (int j = 0; j < col2; j++)
{
dest[i*row1+j] += r*src2[k*row2+j];
}
}
}
}
void mmm_ijk(float* src1, float* src2, float* dest, int row1, int col1, int row2, int col2)
{
for(int i = 0;i < row1;i++)
{
for(int j = 0;j < col1;j++)
{//or row2
for(int k = 0;k < col2;k++)
{
dest[i*col2+k] += src1[i*col1+j] * src2[j*col2+k];
//cout << "src1: " << src1[i*col1+j] << " src2: " << src2[j*col2+k] << endl;//DEBUG
//cout << "I: " << i << " J: " << j << " K: " << k << endl;//DEBUG
}
}
}
}
//http://www.cplusplus.com/forum/general/13087/
//http://www.cplusplus.com/forum/general/17771/
//http://www.cplusplus.com/forum/beginner/24906/
void readCSV(float* array, int numElements, string filename)
{
ifstream infile(filename.c_str());
int index = 0;
if(infile){
string line;
while(getline(infile,line))
{
istringstream sep(line);
string result;
while(getline(sep, result,','))
{
array[index] = atof(result.c_str());
if(array[index] == 0)
{
cout << index << endl;//DEBUG
}
index++;
}
}
}
//cout << "COUNT WAS " << index << endl;//DEBUG
//cout << "Last val was " << array[index-1] << endl;//DEBUG
}
| 6783c3f54bd8469b5592590c94e330fd8f90bcf5.cu | /*******************************************************************
* Sparse Auto-Encoder
* by
* David Klaus and Alex Welles
* EC527 Final Project
*
* Serial Implementation With Timing Code
*
* Compile with:
*
* nvcc -o cudaCHUNK sparseAutoencoder_CUDA_CHUNK.cu
*
* for rho_forwardProp2 variant (atomicAdd) use:
* nvcc -arch=sm_20 -o sparseAutoencoder_cuda sparseAutoencoder_cuda.cu
*
*******************************************************************/
#include <cstdio>//<stdio.h>
#include <cstdlib>//<stdio.h>
#include <time.h>
#include <math.h>
#include <string>
#include <sstream>
#include <fstream>
#include <iostream>
#include <string>
#include <numeric>
#include "cuPrintf.cu"
#include "cuPrintf.cuh"
#define GIG 1000000000
#define CPG 2.527
#define OPTIONS 1
#define TOL 0.000001
#define PRINT_TIME 1
//Parameters necessary to set up network
#define PATCHES_PATH "c_patches.csv"//For DEBUG
#define W1_PATH "W1.csv"//For DEBUG
#define W2_PATH "W2.csv"//For DEBUG
#define IMAGE_DIM 512 //pixels in 1 dimension (assumes square)
#define SAMPLE_SIZE 10000 //number of input patches
#define FORWARD_CHUNK_SIZE 1024 //maximum size that thread in forward propagate can fit into contents of shared mem (estimate based on tests)
#define BACKWARD_CHUNK_SIZE 1024 //maximum size that thread in backward propagate can fit into contents of shared mem (estimate based on tests)
#define SPARSITY_CHUNK_SIZE 1024 // maximum size that thread in sparsity enforment block can fit into the contents of shared mem (estimate based on tests)
#define HIDDEN_LAYERS 1 //number hidden layers (NON-FUNCTIONAL)
#define NUM_SAMPLE_ELEMENTS SAMPLE_SIZE * visible_size
//desired average activation of hidden nodes
#define SPARSITY_PARAM 0.01
#define SPARSITY_COMPLEMENT 1-SPARSITY_PARAM
//weight decay paramater
#define LAMBDA 0.0001
//weight of sparsity penalty term
#define BETA 3.0
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
/***********************************
KERNEL VECTOR OPS
***********************************/
//http://stackoverflow.com/questions/14291233/confusion-about-cuda-partial-sum-codes-threadidx-x-blockdim-x-and-a-2
//https://code.google.com/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/sum_reduction.cu
__global__ void kernel_block_sum(const float *input, float *per_block_results, const size_t n)
{
extern __shared__ float sdata[];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(tid < n)
{
x = input[tid];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern --> div by 2 (may not be best)
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
/////////////////////////////////////////////////////////////////////////
// Chunked wave functions implementation
// all kernel calls broken into chunks. Alows for any size HIDDEN - VISIBLE - HIDDEN neural network
/////////////////////////////////////////////////////////////////////////
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(CHUNKED_SIZE,1,1);
__global__ void kernel_forwardProp_chunked(float *input, float* W1, float* b1, float* output, int whichProp, int COLUMN_SIZE)
{
//load shared memory with what you need
__shared__ float inputS[FORWARD_CHUNK_SIZE];
__shared__ float W1S[FORWARD_CHUNK_SIZE];
__shared__ float b1S;
b1S = b1[blockIdx.x];
float runningSum = 0.0;
int ii = 0;
for(; (ii + FORWARD_CHUNK_SIZE) < COLUMN_SIZE; ii+=FORWARD_CHUNK_SIZE) {
W1S[threadIdx.x] = W1[blockIdx.x * COLUMN_SIZE + threadIdx.x + ii];
inputS[threadIdx.x] = input[threadIdx.x + ii];
//multiplication
__syncthreads();
W1S[threadIdx.x] = W1S[threadIdx.x] * inputS[threadIdx.x];
__syncthreads();
//smart summation of chunk FORWARD_CHUNK_SIZE is always even.
for(int offset = FORWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W1S[threadIdx.x] += W1S[threadIdx.x + offset];
}
// wait until all threads in the block have updated partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W1S[0];
}
}
int remainder = (COLUMN_SIZE - ii);
if(threadIdx.x < remainder ) { //clean up slop
W1S[threadIdx.x] = W1[blockIdx.x * COLUMN_SIZE + threadIdx.x + ii];
inputS[threadIdx.x] = input[threadIdx.x + ii];
__syncthreads();
W1S[threadIdx.x] = W1S[threadIdx.x] * inputS[threadIdx.x];
__syncthreads();
//restrict remainder to even size unless only 1 element remains to be computed
if( (remainder%2 == 1) && (threadIdx.x == (remainder-1) ) && (remainder != 1)) {
W1S[threadIdx.x-1] += W1S[threadIdx.x];
}
__syncthreads();
//smart summation of remainder
for(int offset = remainder / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W1S[threadIdx.x] += W1S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W1S[0];
}
}
if( threadIdx.x == 0 ) //only needs to be done once
{
//add the bias vector value
runningSum += b1S;
//apply sigma function
runningSum = float(1/(1+exp(-runningSum)));
//set into output
if(whichProp == 0) { // this is rho forward propagation
output[blockIdx.x] += runningSum;
} else { // this is forward propagation 1 or 2
output[blockIdx.x] = runningSum;
}
}
}
//dim3 gridDim(1,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZE,1,1);
__global__ void kernel_backProp1_chunked(float* input, float* a3, float* d3, float* b2grad, float* cost, int visible_size, int hidden_size)
{
__shared__ float a3S[BACKWARD_CHUNK_SIZE];
__shared__ float b2gradS[BACKWARD_CHUNK_SIZE];
__shared__ float d3S[BACKWARD_CHUNK_SIZE];
__shared__ float inputS[BACKWARD_CHUNK_SIZE];
int ii = 0;
float runningSum = 0.0;
for(; (ii+BACKWARD_CHUNK_SIZE) < visible_size; ii+=BACKWARD_CHUNK_SIZE) { //move through chunks until slop
inputS[threadIdx.x] = input[threadIdx.x + ii];
b2gradS[threadIdx.x] = b2grad[threadIdx.x + ii];
a3S[threadIdx.x] = a3[threadIdx.x + ii];
//begin data updates
__syncthreads();
//d3 = -(xM - a3) .* (a3 .* (1 - a3));
d3S[threadIdx.x] = -(inputS[threadIdx.x] - a3S[threadIdx.x]) * (a3S[threadIdx.x] * (1-a3S[threadIdx.x]));
//update the gradient
//b2grad = b2grad + d3;
b2gradS[threadIdx.x] += d3S[threadIdx.x];
b2grad[threadIdx.x + ii] = b2gradS[threadIdx.x];
d3[threadIdx.x + ii] = d3S[threadIdx.x];
//cost = cost + norm(a3 - xM)^2;
a3S[threadIdx.x] -= inputS[threadIdx.x];
a3S[threadIdx.x] *= a3S[threadIdx.x];
__syncthreads();
for(int offset = BACKWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
a3S[threadIdx.x] += a3S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += a3S[0];
}
}
int remainder = (visible_size - ii);
if(threadIdx.x < remainder) {
inputS[threadIdx.x] = input[threadIdx.x + ii];
b2gradS[threadIdx.x] = b2grad[threadIdx.x + ii];
a3S[threadIdx.x] = a3[threadIdx.x + ii];
//begin data updates
__syncthreads();
//d3 = -(xM - a3) .* (a3 .* (1 - a3));
d3S[threadIdx.x] = -(inputS[threadIdx.x] - a3S[threadIdx.x]) * (a3S[threadIdx.x] * (1-a3S[threadIdx.x]));
//update the gradient
//b2grad = b2grad + d3;
b2gradS[threadIdx.x] += d3S[threadIdx.x];
b2grad[threadIdx.x + ii] = b2gradS[threadIdx.x];
d3[threadIdx.x + ii] = d3S[threadIdx.x];
//cost = cost + norm(a3 - xM)^2;
a3S[threadIdx.x] -= inputS[threadIdx.x];
a3S[threadIdx.x] *= a3S[threadIdx.x];
__syncthreads();
//restrict remainder to even size
if( (remainder%2 == 1) && (threadIdx.x == (remainder-1) ) && (remainder != 1)) {
a3S[threadIdx.x-1] += a3S[threadIdx.x];
}
for(int offset = remainder / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
a3S[threadIdx.x] += a3S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += a3S[0];
}
}
if( threadIdx.x == 0 )
{
cost[0] = runningSum;
}
}
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZE,1,1);
__global__ void kernel_backProp2_chunked(float* W2, float* d2, float* d3, int visible_size, int hidden_size)
{
__shared__ float d3S[BACKWARD_CHUNK_SIZE];
__shared__ float W2S[BACKWARD_CHUNK_SIZE];
d3S[threadIdx.x] = d3[threadIdx.x];
int ii = 0;
float runningSum = 0.0;
for(; ii < visible_size; ii+=BACKWARD_CHUNK_SIZE) {
//here we are loading the transpose into memory hence the somewhat odd looking indexing
W2S[threadIdx.x] = W2[threadIdx.x * gridDim.x + blockIdx.x + ii];
d3S[threadIdx.x] = d3[threadIdx.x + ii];
//perform operations
__syncthreads();
W2S[threadIdx.x] *= d3S[threadIdx.x];
__syncthreads();
for(int offset = BACKWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W2S[threadIdx.x] += W2S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W2S[0];
}
}
int remainder = (visible_size - ii);
if(threadIdx.x < remainder) {
//here we are loading the transpose into memory hence the somewhat odd looking indexing
W2S[threadIdx.x] = W2[threadIdx.x * gridDim.x + blockIdx.x + ii];
d3S[threadIdx.x] = d3[threadIdx.x + ii];
//perform operations
__syncthreads();
W2S[threadIdx.x] *= d3S[threadIdx.x];
__syncthreads();
//restrict remainder to even size
if( (remainder%2 == 1) && (threadIdx.x == (remainder-1) ) && (remainder != 1)) {
W2S[threadIdx.x-1] += W2S[threadIdx.x];
}
for(int offset = BACKWARD_CHUNK_SIZE / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
W2S[threadIdx.x] += W2S[threadIdx.x + offset];
}
// wait until all threads in the block have updated their partial sums
__syncthreads();
}
if(threadIdx.x == 0) { //only done once per cycle
runningSum += W2S[0];
}
}
if(threadIdx.x == 0)
{
//storing the interim value of d2 in d_d2 for next kernel call.
//d2 = (W2' * d3)
d2[blockIdx.x] = runningSum;
}
}
//dim3 gridDim(1,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZEE,1,1);
__global__ void kernel_backProp3_chunked(float* a2, float* d2, float* rhoHat, float* b1grad, int visible_size, int hidden_size)
{
__shared__ float d2S[BACKWARD_CHUNK_SIZE];
__shared__ float rhoHatS[BACKWARD_CHUNK_SIZE];
__shared__ float a2S[BACKWARD_CHUNK_SIZE];
__shared__ float b1gradS[BACKWARD_CHUNK_SIZE];
int ii = 0;
for(; (ii + BACKWARD_CHUNK_SIZE) < hidden_size; ii+=BACKWARD_CHUNK_SIZE) {
b1gradS[threadIdx.x] = b1grad[threadIdx.x +ii];
d2S[threadIdx.x] = d2[threadIdx.x + ii];
rhoHatS[threadIdx.x] = rhoHat[threadIdx.x + ii];
a2S[threadIdx.x] = a2[threadIdx.x + ii];
__syncthreads();
//calculate d2
d2S[threadIdx.x] = (d2S[threadIdx.x] + BETA * (-(SPARSITY_PARAM/rhoHatS[threadIdx.x]))
+ (1-SPARSITY_PARAM)/(1-rhoHatS[threadIdx.x])) * (a2S[threadIdx.x]
* (1 - a2S[threadIdx.x]));
d2[threadIdx.x + ii] = d2S[threadIdx.x];
//update b1 gradient
b1gradS[threadIdx.x] += d2S[threadIdx.x];
b1grad[threadIdx.x + ii] = b1gradS[threadIdx.x];
__syncthreads();
}
int remainder = (hidden_size - ii);
if(threadIdx.x < remainder) { //clean up slop
b1gradS[threadIdx.x] = b1grad[threadIdx.x +ii];
d2S[threadIdx.x] = d2[threadIdx.x + ii];
rhoHatS[threadIdx.x] = rhoHat[threadIdx.x + ii];
a2S[threadIdx.x] = a2[threadIdx.x + ii];
__syncthreads();
//calculate d2
d2S[threadIdx.x] = (d2S[threadIdx.x] + BETA * (-(SPARSITY_PARAM/rhoHatS[threadIdx.x]))
+ (1-SPARSITY_PARAM)/(1-rhoHatS[threadIdx.x])) * (a2S[threadIdx.x]
* (1 - a2S[threadIdx.x]));
d2[threadIdx.x + ii] = d2S[threadIdx.x];
//update b1 gradient
b1gradS[threadIdx.x] += d2S[threadIdx.x];
b1grad[threadIdx.x + ii] = b1gradS[threadIdx.x];
__syncthreads();
}
}
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(BACKWARD_CHUNK_SIZEE,1,1);
__global__ void kernel_backProp4_chunked(float* input, float* a2, float* d2, float* d3, float* W1grad, float* W2grad, int visible_size, int hidden_size)
{
__shared__ float d3S[BACKWARD_CHUNK_SIZE];
__shared__ float a2S;
__shared__ float d2S;
__shared__ float W2gradS[BACKWARD_CHUNK_SIZE];
__shared__ float W1gradS[BACKWARD_CHUNK_SIZE];
__shared__ float inputS[BACKWARD_CHUNK_SIZE];
//function not completed yet
if(threadIdx.x == 0)
{
a2S = a2[blockIdx.x];
d2S = d2[blockIdx.x];
}
int ii = 0.0;
for(; ii < visible_size; ii+=BACKWARD_CHUNK_SIZE) {
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x]; //W2gradS is read in transposed
d3S[threadIdx.x] = d3[threadIdx.x];
inputS[threadIdx.x] = input[threadIdx.x];
__syncthreads(); //finished initializing variables for this loop
//W2grad = W2grad + d3 * a2';
W2gradS[threadIdx.x] += a2S * d3S[threadIdx.x];
W2grad[threadIdx.x + ii] = W2gradS[threadIdx.x];
//W1grad = W1grad + d2 * xM';
W1gradS[threadIdx.x] += d2S * inputS[threadIdx.x];
W1grad[threadIdx.x + ii] = W1gradS[threadIdx.x];
__syncthreads();
}
int remainder = (visible_size - ii);
if( threadIdx.x < remainder) { //clean up slop
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x]; //W2gradS is read in transposed
d3S[threadIdx.x] = d3[threadIdx.x];
inputS[threadIdx.x] = input[threadIdx.x];
__syncthreads(); //finished initializing variables for last iteration
//W2grad = W2grad + d3 * a2';
W2gradS[threadIdx.x] += a2S * d3S[threadIdx.x];
W2grad[threadIdx.x + ii] = W2gradS[threadIdx.x];
//W1grad = W1grad + d2 * xM';
W1gradS[threadIdx.x] += d2S * inputS[threadIdx.x];
W1grad[threadIdx.x + ii] = W1gradS[threadIdx.x];
__syncthreads();
}
}
//dim3 gridDim(hidden_size,1,1);
//dim3 blockDim(visible_size,1,1);
__global__ void kernel_sparsityEnforcement_chunked(float* W1, float* W2, float* W1grad, float* W2grad, int visible_size, int hidden_size)
{
__shared__ float W1gradS[SPARSITY_CHUNK_SIZE];
__shared__ float W2gradS[SPARSITY_CHUNK_SIZE];
__shared__ float W1S[SPARSITY_CHUNK_SIZE];
__shared__ float W2S[SPARSITY_CHUNK_SIZE];
int ii = 0;
for(; (ii + SPARSITY_CHUNK_SIZE) < visible_size; ii+=SPARSITY_CHUNK_SIZE) {
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W1S[threadIdx.x] = W1[blockIdx.x * visible_size + threadIdx.x + ii];
//read W2 in as transpose
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x];
W2S[threadIdx.x] = W2[(threadIdx.x + ii) * hidden_size + blockIdx.x];
__syncthreads();
//W1grad = W1grad ./ M + lambda .* W1;
W1gradS[threadIdx.x] = W1gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W1S[threadIdx.x];
W2gradS[threadIdx.x] = W2gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W2S[threadIdx.x];
//W2grad = W2grad ./ M + lambda .* W2;
W1grad[blockIdx.x * blockDim.x + threadIdx.x + ii] = W1gradS[threadIdx.x];
W2grad[(threadIdx.x + ii) * gridDim.x + blockIdx.x] = W2gradS[threadIdx.x];
__syncthreads(); //David's note not sure if this sync threads is necessary. Double check.
}
int remainder = (visible_size - ii);
if(threadIdx.x < remainder) {
W1gradS[threadIdx.x] = W1grad[blockIdx.x * visible_size + threadIdx.x + ii];
W1S[threadIdx.x] = W1[blockIdx.x * visible_size + threadIdx.x + ii];
//read W2 in as transpose
W2gradS[threadIdx.x] = W2grad[(threadIdx.x + ii) * hidden_size + blockIdx.x];
W2S[threadIdx.x] = W2[(threadIdx.x + ii) * hidden_size + blockIdx.x];
__syncthreads();
//W1grad = W1grad ./ M + lambda .* W1;
W1gradS[threadIdx.x] = W1gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W1S[threadIdx.x];
W2gradS[threadIdx.x] = W2gradS[threadIdx.x]/SAMPLE_SIZE + LAMBDA * W2S[threadIdx.x];
//W2grad = W2grad ./ M + lambda .* W2;
W1grad[blockIdx.x * blockDim.x + threadIdx.x + ii] = W1gradS[threadIdx.x];
W2grad[(threadIdx.x + ii) * gridDim.x + blockIdx.x] = W2gradS[threadIdx.x];
}
}
/**********************************
SERIAL VECTOR OPS
***********************************/
void initializeMatrixWeightsRand(float *arr, int rows, int cols, int seed);
void initializeMatrixWeightsZero(float *arr, int rows, int cols);
void initializeVectorWeightsZero(float *arr, int numElements);
void mmm_kij(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2);
void mmm_ijk(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2);
void dotPdt(float* src1,float* src2, float* dest, int length);
void readCSV(float* array, int numElements, string filename);
void addVectors(float* src1, float* src2, float* dest, int length);
void subVectors(float* src1, float* src2, float* dest, int length);
void vectElemSigmoid(float* src,float* dest,int length);
void vectElemIntDiv(float* src, float* dest,int length,int divisor);
void vectElemFloatDiv(float* src, float* dest,int length,float divisor);
void vectElemVectDiv(float* src1,float* src2,float* dest,int length);
void initializeVector(float *array, int length, float val);
void vectElemVectMult(float* src1, float* src2, float* dest, int length);
void vectElemFloatMult(float* src, float* dest, int length,float multiplicand);
void matrixTranspose(float* src,float* dest,int rows, int cols);
float normVector(float* src,int length);
void vectElemLog(float* src,float* dest,int length);
float sumVector(float* src,int length);
/* PRINTOUT, DEBUG, AND TIMING FUNCTIONS */
void printVector(float* A, int length);
void printMatrix(float* A, int rows, int cols);
void printTiming(struct timespec* time_stamp,int numTimings);
int main(int argc, char *argv[])
{
int visible_size;
int hidden_size;
sscanf (argv[1],"%d",&visible_size);
sscanf (argv[2],"%d",&hidden_size);
/***********************************
TIMING STUFF
***********************************/
//CPU
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2;
struct timespec time_stamp[OPTIONS];//Can be increased if necessary.
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
/***********************************
ALLOCATE HOST MEMORY
***********************************/
//Arrays on host memory (CPU)
//input patches to train the autoencoder
float *h_inputs;// 64 x 10000 [visible x sample]
//sparsity vector
float *h_rhoHat;//hidden x 1 [25 x 1]
//weight matrices
float *h_W1;//hidden X visible [25 x 64]
float *h_W2;//visible X hidden [64 x 25]
//weight vectors
float *h_b1;//hidden X 1 [25 x 1]
float *h_b2;//visible X 1 [64 x 1]
//weight gradient matrices
float *h_W1grad;//hidden x visible [25 x 64]
float *h_W2grad;//visible x hidden [64 x 25]
//weight gradient vectors
float *h_b1grad;//hidden x 1 [25 x 1]
float *h_b2grad;//visible x 1 [64 x 1]
//z product vectors
float *h_z2;//hidden x 1 [25 x 1]
float *h_z3;//visible x 1 [64 x 1]
//a product vectors
float *h_a2;//hidden x 1 [25 x 1]
float *h_a3;//visible x 1 [64 x 1]
//partial derivatives for back prop
float *h_d2;//hidden x 1 [25 x 1]
float *h_d3;//visible x 1 [64 x 1]
//temp vectors: both are 64 elements but will not always be used
float *h_temp1;//64 x 1
float *h_temp2;//64 x1
//temp matrix
float *h_Wtemp1;//64 x 25 or 25 x 64
float *h_Wtemp2;//25 x 64 or 64 x 25
//sparsity penalty
float *h_sparsePen;//25x1
float *h_cost;
//Allocate input patches on host memory (CPU)
size_t allocSize = visible_size * SAMPLE_SIZE * sizeof(float);
h_inputs = (float *) malloc(allocSize);
//Allocate sparsity vector on host memory (CPU)
allocSize = hidden_size * sizeof(float);
h_rhoHat = (float *) malloc(allocSize);
//Alocate weight arrays on host memory (CPU)
allocSize = visible_size * hidden_size * sizeof(float);
h_W1 = (float *) malloc(allocSize);
h_W2 = (float *) malloc(allocSize);
//Alocate gradient arrays on host memory (CPU)
allocSize = visible_size * hidden_size * sizeof(float);
h_W1grad = (float *) malloc(allocSize);
h_W2grad = (float *) malloc(allocSize);
//Allocate weight vectors on host memory (CPU)
allocSize = hidden_size * sizeof(float);
h_b1 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_b2 = (float *) malloc(allocSize);
//Allocate weight vectors on host memory (CPU)
allocSize = hidden_size * sizeof(float);
h_b1grad = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_b2grad = (float *) malloc(allocSize);
//Allocate z product vectors (CPU)
allocSize = hidden_size * sizeof(float);
h_z2 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_z3 = (float *) malloc(allocSize);
//Allocate a product vectors (CPU)
allocSize = hidden_size * sizeof(float);
h_a2 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_a3 = (float *) malloc(allocSize);
//Allocate partial vectors (CPU)
allocSize = hidden_size * sizeof(float);
h_d2 = (float *) malloc(allocSize);
allocSize = visible_size * sizeof(float);
h_d3 = (float *) malloc(allocSize);
//Allocate temp vectors (CPU)
allocSize = visible_size * sizeof(float);
h_temp1 = (float *) malloc(allocSize);
h_temp2 = (float *) malloc(allocSize);
//Allocate temp matrix (CPU)
allocSize = visible_size * hidden_size * sizeof(float);
h_Wtemp1 = (float *) malloc(allocSize);
h_Wtemp2 = (float *) malloc(allocSize);
//Allocate sparsity penalty vector (CPU)
allocSize = hidden_size * sizeof(float);
h_sparsePen = (float *) malloc(allocSize);
allocSize = sizeof(float);
h_cost = (float *) malloc(allocSize);
/***********************************
ALLOCATE DEVICE MEMORY
***********************************/
//input patches to train the autoencoder
float *d_inputs;// 64 x 10000 [visible x sample]
//sparsity vector
float *d_rhoHat;//hidden x 1 [25 x 1]
//weight matrices
float *d_W1;//hidden X visible [25 x 64]
float *d_W2;//visible X hidden [64 x 25]
//weight vectors
float *d_b1;//hidden X 1 [25 x 1]
float *d_b2;//visible X 1 [64 x 1]
//weight gradient matrices
float *d_W1grad;//hidden x visible [25 x 64]
float *d_W2grad;//visible x hidden [64 x 25]
//weight gradient vectors
float *d_b1grad;//hidden x 1 [25 x 1]
float *d_b2grad;//visible x 1 [64 x 1]
//a product vectors
float *d_a2;//hidden x 1 [25 x 1]
float *d_a3;//visible x 1 [64 x 1]
//partial derivatives for back prop
float *d_d2;//hidden x 1 [25 x 1]
float *d_d3;//visible x 1 [64 x 1]
//sparsity penalty
float *d_sparsePen;//25x1
float *d_cost;//1 lonely float
//Allocate input patches on device memory (GPU)
allocSize = visible_size * SAMPLE_SIZE * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_inputs,allocSize));
//Allocate sparsity vector on device memory (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_rhoHat,allocSize));
//Alocate weight arrays on device memory (GPU)
allocSize = visible_size * hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_W1,allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_W2,allocSize));
//Alocate gradient arrays on device memory (GPU)
allocSize = visible_size * hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_W1grad,allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_W2grad,allocSize));
//Allocate weight vectors on device memory (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_b1,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_b2,allocSize));
//Allocate weight vectors on device memory (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_b1grad,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_b2grad,allocSize));
//Allocate a product vectors (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_a2,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_a3,allocSize));
//Allocate partial vectors (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_d2,allocSize));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_d3,allocSize));
//Allocate sparsity penalty vector (GPU)
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_sparsePen,allocSize));
//Allocate cost (GPU)
allocSize = sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_cost,allocSize));
/***********************************
INITIALIZE NETWORK WEIGHTS
***********************************/
//Initialize the weight matrices to random values
//initializeMatrixWeightsRand(h_inputs, visible_size, SAMPLE_SIZE, 2254);
initializeMatrixWeightsRand(h_W1, hidden_size, visible_size, 2254);
initializeMatrixWeightsRand(h_W2, visible_size, hidden_size, 1345);
initializeMatrixWeightsZero(h_W2grad,visible_size,hidden_size);
initializeMatrixWeightsZero(h_W1grad,hidden_size,visible_size);
initializeVectorWeightsZero(h_b1, hidden_size);
initializeVectorWeightsZero(h_b2, visible_size);
initializeVectorWeightsZero(h_rhoHat, hidden_size);
initializeVectorWeightsZero(h_z2, hidden_size);
initializeVectorWeightsZero(h_a2, hidden_size);
initializeVectorWeightsZero(h_z3, visible_size);
initializeVectorWeightsZero(h_a3, visible_size);
/***********************************
READ IN SAMPLE PATCHES
***********************************/
readCSV(h_inputs, NUM_SAMPLE_ELEMENTS, PATCHES_PATH);
//the following are for debug only
readCSV(h_W1, hidden_size*visible_size, W1_PATH);
readCSV(h_W2, hidden_size*visible_size, W2_PATH);
/***************************************
BEGIN CUDA TIMING
****************************************/
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
cudaEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
allocSize = visible_size * SAMPLE_SIZE * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(d_inputs,h_inputs, allocSize, cudaMemcpyHostToDevice));
allocSize = visible_size * hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(d_W1,h_W1, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_W2,h_W2, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_W1grad,h_W1grad, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_W2grad,h_W2grad, allocSize, cudaMemcpyHostToDevice));
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(d_rhoHat,h_rhoHat, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b1,h_b1, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b1grad,h_b1grad, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_a2,h_a2, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_d2,h_d2, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_sparsePen,h_sparsePen, allocSize, cudaMemcpyHostToDevice));
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(d_b2,h_b2, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b2grad,h_b2grad, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_a3,h_a3, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_d3,h_d3, allocSize, cudaMemcpyHostToDevice));
//cout << "CPU Inputs" <<endl;//DEBUG
//printVector(h_inputs, hidden_size);//DEBUG
//cout <<"CPU W1" << endl;//DEBUG
//printVector(h_W1,hidden_size);//DEBUG
//**************************************
// kernal_rho_forwardProp_chunked
//**************************************
dim3 gridDim1(hidden_size,1,1);
dim3 chunkBlockDim(FORWARD_CHUNK_SIZE,1,1);
dim3 blockDim1(visible_size,1,1);
for(int i = 0;i < NUM_SAMPLE_ELEMENTS; i+= visible_size)
{
kernel_forwardProp_chunked<<<gridDim1, chunkBlockDim>>>(&d_inputs[i], d_W1, d_b1, d_rhoHat, 0, visible_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
}
//***************************************
// kernel_rho_forwardProp1
//***************************************
/*
dim3 gridDim1(hidden_size,1,1);
dim3 blockDim1(visible_size,1,1);
for(int i = 0;i < NUM_SAMPLE_ELEMENTS; i+= visible_size)
{
kernel_rho_forwardProp<<<gridDim1, blockDim1>>>(&d_inputs[i], d_W1, d_b1, d_rhoHat);
CUDA_SAFE_CALL(cudaPeekAtLastError());
}*/
//average rhoHat
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(h_rhoHat,d_rhoHat, allocSize, cudaMemcpyDeviceToHost));
vectElemFloatDiv(h_rhoHat, h_rhoHat, hidden_size, SAMPLE_SIZE);
CUDA_SAFE_CALL(cudaMemcpy(d_rhoHat,h_rhoHat, allocSize, cudaMemcpyHostToDevice));
cout <<"GPU rhoHat" << endl;//DEBUG
printVector(h_rhoHat,1);//DEBUG
//printVector(h_rhoHat,hidden_size);//DEBUG
//allocSize = visible_size * sizeof(float);
//CUDA_SAFE_CALL(cudaMemcpy(h_inputs,d_inputs, allocSize, cudaMemcpyDeviceToHost));
//cout <<"GPU Inputs" << endl;//DEBUG
//printVector(h_inputs,hidden_size);//DEBUG
//allocSize = visible_size * hidden_size * sizeof(float);
//CUDA_SAFE_CALL(cudaMemcpy(h_W1,d_W1, allocSize, cudaMemcpyDeviceToHost));
//cout <<"GPU W1" << endl;//DEBUG
//printVector(h_W1,hidden_size);//DEBUG
dim3 gridDim2(visible_size,1,1);
dim3 blockDim2(hidden_size,1,1);
dim3 gridDim3(1,1,1);
dim3 blockDim3(visible_size,1,1);
dim3 gridDim4(hidden_size,1,1);
dim3 blockDim4(visible_size,1,1);
dim3 gridDim5(1,1,1);
dim3 blockDim5(hidden_size,1,1);
dim3 gridDim6(hidden_size,1,1);
dim3 blockDim6(visible_size,1,1);
dim3 gridDim7(hidden_size,1,1);
dim3 blockDim7(visible_size,1,1);
for(int i = 0;i < NUM_SAMPLE_ELEMENTS; i+= visible_size)
{
//***************************************
// FORWARD PROPAGATION a(1) --> a(2)
//***************************************
//kernel_forwardProp1<<<gridDim1, blockDim1>>>(&d_inputs[i], d_W1, d_b1, d_a2);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
kernel_forwardProp_chunked<<<gridDim1, chunkBlockDim>>>(&d_inputs[i], d_W1, d_b1, d_a2,1,visible_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
//***************************************
// FORWARD PROPAGATION a(2) --> a(3)
//***************************************
//kernel_forwardProp2<<<gridDim2, blockDim2>>>(&d_inputs[i], d_W2, d_b2, d_a3);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
kernel_forwardProp_chunked<<<gridDim1, chunkBlockDim>>>(&d_inputs[i], d_W1, d_b1, d_a2,2,hidden_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
//***************************************
// BACK PROPAGATION d(3) --> d(2)
//***************************************
//kernel_backProp1<<<gridDim3, blockDim3>>>(&d_inputs[i], d_a3, d_d3, d_b2grad, d_cost);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
kernel_backProp1_chunked<<<gridDim3, chunkBlockDim>>>(&d_inputs[i], d_a3, d_d3, d_b2grad, d_cost, visible_size, hidden_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
//***************************************
// BACK PROPAGATION d(2) --> input
//***************************************
//kernel_backProp2<<<gridDim4, blockDim4>>>(d_W2, d_d2, d_d3);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
kernel_backProp2_chunked<<<gridDim4, chunkBlockDim>>>(d_W2, d_d2, d_d3, visible_size, hidden_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
//kernel_backProp3<<<gridDim5, blockDim5>>>(d_a2, d_d2, d_rhoHat, d_b1grad);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
kernel_backProp3_chunked<<<gridDim5, chunkBlockDim>>>(d_a2, d_d2, d_rhoHat, d_b1grad, visible_size, hidden_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
//kernel_backProp4<<<gridDim6, blockDim6>>>(&d_inputs[i], d_a2, d_d2, d_d3, d_W1grad, d_W2grad);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
kernel_backProp4_chunked<<<gridDim6, chunkBlockDim>>>(&d_inputs[i], d_a2, d_d2, d_d3, d_W1grad, d_W2grad, visible_size, hidden_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
}
//kernel_sparsityEnforcement<<<gridDim7, blockDim7>>>(d_W1, d_W2, d_W1grad, d_W2grad);
//CUDA_SAFE_CALL(cudaPeekAtLastError());
kernel_sparsityEnforcement_chunked<<<gridDim7, chunkBlockDim>>>(d_W1, d_W2, d_W1grad, d_W2grad, visible_size, hidden_size);
CUDA_SAFE_CALL(cudaPeekAtLastError());
allocSize = hidden_size * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(h_a2,d_a2, allocSize, cudaMemcpyDeviceToHost));
cout <<"GPU a2" << endl;//DEBUG
printVector(h_a2,1);
//printVector(h_a2,hidden_size);
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(h_a3,d_a3, allocSize, cudaMemcpyDeviceToHost));
cout <<"GPU a3" << endl;//DEBUG
printVector(h_a3,1);
//printVector(h_a3,visible_size);
allocSize = visible_size * sizeof(float);
CUDA_SAFE_CALL(cudaMemcpy(h_d3,d_d3, allocSize, cudaMemcpyDeviceToHost));
cout << "GPU d3" << endl;//DEBUG
printVector(h_d3,1);//DEBUG
//printVector(h_d3,visible_size);//DEBUG
#if PRINT_TIME
// Stop and destroy the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (sec)\n", elapsed_gpu*1000000/GIG);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
/***************************************
FREEING HOST MEMORY
****************************************/
free(h_inputs);
free(h_rhoHat);
free(h_W1);
free(h_W2);
free(h_b1);
free(h_b2);
free(h_W1grad);
free(h_W2grad);
free(h_b1grad);
free(h_b2grad);
free(h_z2);
free(h_z3);
free(h_a2);
free(h_a3);
free(h_d2);
free(h_d3);
free(h_temp1);
free(h_temp2);
free(h_Wtemp1);
free(h_Wtemp2);
free(h_sparsePen);
free(h_cost);
/***************************************
FREEING DEVICE MEMORY
****************************************/
CUDA_SAFE_CALL(cudaFree(d_inputs));
CUDA_SAFE_CALL(cudaFree(d_rhoHat));
CUDA_SAFE_CALL(cudaFree(d_W1));
CUDA_SAFE_CALL(cudaFree(d_W2));
CUDA_SAFE_CALL(cudaFree(d_b1));
CUDA_SAFE_CALL(cudaFree(d_b2));
CUDA_SAFE_CALL(cudaFree(d_W1grad));
CUDA_SAFE_CALL(cudaFree(d_W2grad));
CUDA_SAFE_CALL(cudaFree(d_b1grad));
CUDA_SAFE_CALL(cudaFree(d_b2grad));
CUDA_SAFE_CALL(cudaFree(d_a2));
CUDA_SAFE_CALL(cudaFree(d_a3));
CUDA_SAFE_CALL(cudaFree(d_d2));
CUDA_SAFE_CALL(cudaFree(d_d3));
CUDA_SAFE_CALL(cudaFree(d_sparsePen));
CUDA_SAFE_CALL(cudaFree(d_cost));
return 0;
}
/***********************************************
TIMING FUNCTIONS AND STRUCTS
***********************************************/
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0)
{
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
}
else
{
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
void printTiming(struct timespec* time_stamp,int numTimings)
{
for (int j = 0; j < numTimings; j++)
{
if (j != 0) printf(", ");
printf("\nCPU time: %f (sec)", ((double)(CPG)*(double)
(GIG * time_stamp[j].tv_sec + time_stamp[j].tv_nsec)/GIG));
}
printf("\n");
}
/***********************************************
NAIVE VECTOR OPERATIONS
***********************************************/
float sumVector(float* src,int length)
{
float sum = 0;
for(int i = 0;i < length;i++)
{
sum += src[i];
}
return sum;
}
void vectElemLog(float* src,float* dest,int length)
{
for(int i = 0;i < length;i++)
{
dest[i] = log(src[i]);
}
}
float normVector(float* src,int length)
{
float x = 0;
float sum = 0;
for(int i = 0;i < length;i++)
{
x = src[i];
sum += x*x;
}
sum = sqrt(sum);
return sum;
}
void vectElemSigmoid(float* src,float* dest,int length)
{
for(int i = 0; i < length;i++)
{
dest[i] = float(1/(1+exp(-src[i])));
}
}
void vectElemVectMult(float* src1, float* src2, float* dest, int length)
{
for(int i = 0; i < length;i++)
{
dest[i] = src1[i] * src2[i];
}
}
//faster if float is used instead?
void vectElemIntDiv(float* src, float* dest,int length,int divisor)
{
for(int i = 0;i < length;i++)
{
dest[i] = float(src[i]/divisor);
}
}
void vectElemFloatDiv(float* src, float* dest,int length,float divisor)
{
for(int i = 0;i < length;i++)
{
dest[i] = float(src[i]/divisor);
}
}
void vectElemFloatMult(float* src, float* dest, int length,float multiplicand)
{
for(int i = 0;i < length;i++)
{
dest[i] = src[i] * multiplicand;
}
}
void vectElemVectDiv(float* src1,float* src2,float* dest,int length)
{
for(int i = 0;i < length;i++)
{
dest[i] = (src1[i]/src2[i]);
}
}
//Just for debugging eh?
void printVector(float* A, int length)
{
for(int i = 0;i < length; i++)
{
cout << A[i] << endl;
}
}
void initializeVector(float *array, int length, float val)
{
for(int i = 0; i < length; i++)
{
array[i] = val;
}
}
//Just for debugging eh?
void printMatrix(float* A, int rows, int cols)
{
for(int i = 0;i < rows; i++)
{
for(int j = 0;j < cols;j++)
{
cout << A[i*rows+j] << "\t";
}
cout << endl;
}
}
void addVectors(float* src1, float* src2, float* dest, int length)
{
for(int i = 0;i < length; i++)
{
dest[i] = src1[i] + src2[i];
}
}
void subVectors(float* src1, float* src2, float* dest, int length)
{
for(int i = 0;i < length;i++)
{
dest[i] = src1[i] - src2[i];
}
}
void dotPdt(float* src1,float* src2, float *dest, int length)
{
float accum = 0;
for(int i = 0; i< length;i++)
{
accum += src1[i] * src2[i];
}
*dest = accum;
//cout << accum << endl;//DEBUG
}
void matrixTranspose(float* src,float* dest,int rows,int cols)
{
for(int i = 0;i < rows;i++)
{
for(int j = 0;j < cols;j++)
{
//cout << src[i*rows+j] << "I: " << i << "J: " << j << endl;//DEBUG
dest[j*rows+i] = src[i*cols+j];
}
}
}
void initializeMatrixWeightsRand(float *arr, int rows, int cols, int seed)
{
int i;
float randNum, r;
srand(seed);
//rows and cols depend on hidden and visible sizes
int numElements = rows*cols;
for (i = 0; i < numElements; i++)
{
//Choose weights uniformly from the interval [-r, r]
r = sqrt(6) / sqrt(rows+cols+1);
randNum = float(rand()%10000)/10000;
randNum = randNum * 2 * r - r;
arr[i] = randNum;
}
}
void initializeMatrixWeightsZero(float *arr, int rows, int cols)
{
//rows and cols depend on hidden and visible sizes
int numElements = rows*cols;
for (int i = 0; i < numElements; i++)
{
arr[i] = 0.0;
}
}
//initialize the vector weights to 0
void initializeVectorWeightsZero(float *arr, int numElements)
{
int i;
for (i = 0; i < numElements; i++)
{
arr[i] = 0;
}
}
/* mmm kij */ //BROKEN CURRENTLY
void mmm_kij(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2)
{
float r = 0;
for (int k = 0; k < row2; k++)
{
for (int i = 0; i < row1; i++)
{
r = src1[i*col1+k];
for (int j = 0; j < col2; j++)
{
dest[i*row1+j] += r*src2[k*row2+j];
}
}
}
}
void mmm_ijk(float* src1, float* src2, float* dest, int row1, int col1, int row2, int col2)
{
for(int i = 0;i < row1;i++)
{
for(int j = 0;j < col1;j++)
{//or row2
for(int k = 0;k < col2;k++)
{
dest[i*col2+k] += src1[i*col1+j] * src2[j*col2+k];
//cout << "src1: " << src1[i*col1+j] << " src2: " << src2[j*col2+k] << endl;//DEBUG
//cout << "I: " << i << " J: " << j << " K: " << k << endl;//DEBUG
}
}
}
}
//http://www.cplusplus.com/forum/general/13087/
//http://www.cplusplus.com/forum/general/17771/
//http://www.cplusplus.com/forum/beginner/24906/
void readCSV(float* array, int numElements, string filename)
{
ifstream infile(filename.c_str());
int index = 0;
if(infile){
string line;
while(getline(infile,line))
{
istringstream sep(line);
string result;
while(getline(sep, result,','))
{
array[index] = atof(result.c_str());
if(array[index] == 0)
{
cout << index << endl;//DEBUG
}
index++;
}
}
}
//cout << "COUNT WAS " << index << endl;//DEBUG
//cout << "Last val was " << array[index-1] << endl;//DEBUG
}
|
01c57127de45f95a78d9b82e9709209d383b1361.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <memory> // std::unique_ptr
#include "hip/hip_texture_types.h" // texture
#include "texture_fetch_functions.h" // tex1Dfetch
#include "cuda_includes.h"
#include "clique.cuh"
// For profiling execution times
#include <chrono>
#ifndef TIMER_END
#define TIMER_END(str, start) std::cout << std::setw(6) << std::right << \
std::chrono::duration_cast<std::chrono::milliseconds>( \
std::chrono::high_resolution_clock::now()-start).count() << \
" ms " << str << std::endl;
#endif
namespace thin
{
namespace clique
{
namespace _private
{
// When we are checking if a clique can be formed at a certain d-face, we use
// the spatial mask template from [1] Section 10.
//
// For each dimension d, a default d-face is selected, with the offsets in the
// template set to work for this d-face. For the rest d-faces of the same
// dimension, we can apply a linear transformation on the default offsets to
// obtain the correct template. After enumerating all the cases, we will need 72
// transformation matrices, which are stored in a 1D texture of size 72 (8 x 9).
const unsigned matEntryArrSize = 72U;
const OffsetCompType h_matEntryArr[matEntryArrSize] =
{
// identity
// D3::m_mat, D2::m_mat_X, D1::m_mat_1, D0::m_mat_0
1, 0, 0, 0, 1, 0, 0, 0, 1, // 0
// rotate +90 degrees around Z axis
// D2::m_mat_Y, D1::m_mat_2
0, -1, 0, 1, 0, 0, 0, 0, 1, // 9
// rotate -90 degrees around Y axis
// D2::m_mat_Z,
0, 0, -1, 0, 1, 0, 1, 0, 0, // 18
// roate -90 degrees around X axis
// D1::m_mat_5, D0::m_mat_5
1, 0, 0, 0, 0, 1, 0, -1, 0, // 27
// rotate +90 degrees around X axis
// D1::m_mat_6, D0::m_mat_2
1, 0, 0, 0, 0, -1, 0, 1, 0, // 36
// rotate 180 degrees around X axis
// D1::m_mat_9, D0::m_mat_6
1, 0, 0, 0, -1, 0, 0, 0, -1, // 45
// rotate +90 degrees around Z axis first to obtain X'Y'Z',
// then rotate 180 degrees around X' axis
// D1::m_mat_10
0, 1, 0, 1, 0, 0, 0, 0, -1, // 54
// rotate +90 degrees around Y axis
// D2::m_mat_Z_neg
0, 0, 1, 0, 1, 0, -1, 0, 0 // 63
};
// Dim 3, Voxel
//
// number of voxels to form the essential 3-clique
const uint8_t D3ecArrSize = 0;
// number of core voxels to form 3-clique
const uint8_t D3coreArrSize = 0;
// number of neighborhood offsets of 3-clique
const uint8_t D3nbOffsIndexArrSize = 26U;
// the beginning index in the device texture reference for essential clique
const uint8_t D3ecTexBegin = 0;
// the beginning index in the device texture reference for core voxels
const uint8_t D3coreTexBegin = 0;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D3nbOffsIndexTexBegin = 0;
// Dim 2, Face
//
// number of voxels to form the essential 2-clique
const uint8_t D2ecArrSize = 1U;
// number of core voxels to form 2-clique
const uint8_t D2coreArrSize = 2U;
// number of neighborhood offsets of 2-clique
const uint8_t D2nbOffsIndexArrSize = 16U;
// the beginning index in the device texture reference for essential clique
const uint8_t D2ecTexBegin = D3ecTexBegin + D3ecArrSize;
// the beginning index in the device texture reference for core voxels
const uint8_t D2coreTexBegin = D3coreTexBegin + D3coreArrSize;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D2nbOffsIndexTexBegin = D3nbOffsIndexTexBegin + D3nbOffsIndexArrSize;
// Dim 1, Edge
//
// number of voxels to form the essential 1-clique
const uint8_t D1ecArrSize = 3U;
// number of core voxels to form 1-clique
const uint8_t D1coreArrSize = 5U;
// number of neighborhood offsets of 1-clique
const uint8_t D1nbOffsIndexArrSize = 8U;
// the beginning index in the device texture reference for essential clique
const uint8_t D1ecTexBegin = D2ecTexBegin + D2ecArrSize;
// the beginning index in the device texture reference for core voxels
const uint8_t D1coreTexBegin = D2coreTexBegin + D2coreArrSize;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D1nbOffsIndexTexBegin = D2nbOffsIndexTexBegin + D2nbOffsIndexArrSize;
// Dim 0, Vertex
//
// number of voxels to form the essential 0-clique
const uint8_t D0ecArrSize = 7U;
// number of core voxels to form 0-clique
const uint8_t D0coreArrSize = 11U;
// number of neighborhood offsets of 0-clique
const uint8_t D0nbOffsIndexArrSize = 0;
// the beginning index in the device texture reference for essential clique
const uint8_t D0ecTexBegin = D1ecTexBegin + D1ecArrSize;
// the beginning index in the device texture reference for core voxels
const uint8_t D0coreTexBegin = D1coreTexBegin + D1coreArrSize;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D0nbOffsIndexTexBegin = D1nbOffsIndexTexBegin + D1nbOffsIndexArrSize;
// Essential Clique array size
const unsigned EC_ARR_SIZE = D3ecArrSize + D2ecArrSize + D1ecArrSize + D0ecArrSize;
// Core clique voxel array size
const unsigned CORE_CLQ_ARR_SIZE = D3coreArrSize + D2coreArrSize + D1coreArrSize + D0coreArrSize;
// Indices of the neighborhood offsets array size
const unsigned NB_OFFS_IDX_ARR_SIZE = D3nbOffsIndexArrSize + D2nbOffsIndexArrSize + D1nbOffsIndexArrSize + D0nbOffsIndexArrSize;
const unsigned NB_OFFS_ARR_SIZE = 26U;
// Essential clique array
const OffsetIjkType h_ecOffsetArr[EC_ARR_SIZE] =
{
// Dim 3 does not have any ec
// Dim 2
makeOffsetIjk(1,0,0),
// Dim 1
makeOffsetIjk(1,0,0), makeOffsetIjk(1,0,-1), makeOffsetIjk(0,0,-1),
// Dim 0
makeOffsetIjk(0,-1,-1), makeOffsetIjk(0,0,-1), makeOffsetIjk(1,-1,-1), makeOffsetIjk(1,0,-1),
makeOffsetIjk(0,-1,0), makeOffsetIjk(1,-1,0), makeOffsetIjk(1,0,0)
};
// Core clique voxel array.
//
// For each dimension d, the associated entry is a list of tuples with variable
// length. For this to work on the device side, there is a -1 inserted between
// every two tuples, indicating the end of the previous one.
const int8_t h_coreCliqueArr[CORE_CLQ_ARR_SIZE] =
{
// Dim 3 does not have any core clique index
// Dim 2
0 + D2ecTexBegin, -1,
// Dim 1
1 + D1ecTexBegin, -1,
0 + D1ecTexBegin, 2 + D1ecTexBegin, -1,
// Dim 0
2 + D0ecTexBegin, -1,
0 + D0ecTexBegin, 3 + D0ecTexBegin, -1,
0 + D0ecTexBegin, 5 + D0ecTexBegin, -1,
3 + D0ecTexBegin, 5 + D0ecTexBegin, -1
};
// Indices of neighborhood offsets array
const uint8_t h_nbOffsIndexArr[NB_OFFS_IDX_ARR_SIZE] =
{
// Dim 3 nb offset indices
0, 11, 3, 8, 20, 10, 1, 9, 2, 12, 24, 15, 21, 23, 13, 22, 14, 4, 19, 7, 16, 25, 18, 5, 17, 6,
// Dim 2 nb offset indices
8, 20, 10, 1, 9, 2, 21, 23, 13, 14, 16, 25, 18, 5, 17, 6,
// Dim 1 nb offset indices
8, 10, 1, 2, 21, 23, 13, 14
};
// A singleton class that unions all the device pointers for texture reference.
class DevArrPtrs
{
public:
static DevArrPtrs* instance()
{
if (!m_instance)
{
m_instance = std::unique_ptr<DevArrPtrs>(new DevArrPtrs);
}
return m_instance.get();
}
OffsetCompType* d_matEntryArr;
OffsetIjkType* d_ecOffsetArr;
int8_t* d_coreCliqueArr;
uint8_t* d_nbOffsIndexArr;
// OffsetIjkType* d_nbOffsetArr;
// uint8_t* d_nbFlatIjkToIndexLut;
private:
static std::unique_ptr<DevArrPtrs> m_instance;
};
std::unique_ptr<DevArrPtrs> DevArrPtrs::m_instance = nullptr;
tp::Int8TexType matEntryTex;
tp::OffsetIjkTexType ecOffsetTex;
tp::Int8TexType coreCliqueTex;
tp::Uint8TexType nbOffsIndexTex;
// Initialize the device texture references of this module.
void
_initDeviceTex(OffsetCompType** d_matEntryArr, OffsetIjkType** d_ecOffsetArr, int8_t** d_coreCliqueArr, uint8_t** d_nbOffsIndexArr)
{
const hipChannelFormatDesc int8Desc = hipCreateChannelDesc(8 * sizeof(OffsetCompType), 0, 0, 0, hipChannelFormatKindSigned);
const hipChannelFormatDesc uint8Desc = hipCreateChannelDesc(8 * sizeof(OffsetCompType), 0, 0, 0, hipChannelFormatKindUnsigned);
const hipChannelFormatDesc char4Desc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindSigned);
checkCudaErrors(hipMalloc(d_matEntryArr, sizeof(OffsetCompType) * matEntryArrSize));
checkCudaErrors(hipMemcpy(*d_matEntryArr, h_matEntryArr, sizeof(OffsetCompType) * matEntryArrSize, hipMemcpyHostToDevice));
checkCudaErrors(hipBindTexture(0, matEntryTex, *d_matEntryArr, int8Desc, sizeof(OffsetCompType) * matEntryArrSize));
checkCudaErrors(hipMalloc(d_ecOffsetArr, sizeof(OffsetIjkType) * EC_ARR_SIZE));
checkCudaErrors(hipMemcpy(*d_ecOffsetArr, h_ecOffsetArr, sizeof(OffsetIjkType) * EC_ARR_SIZE, hipMemcpyHostToDevice));
checkCudaErrors(hipBindTexture(0, ecOffsetTex, *d_ecOffsetArr, char4Desc, sizeof(OffsetIjkType) * EC_ARR_SIZE));
checkCudaErrors(hipMalloc(d_coreCliqueArr, sizeof(int8_t) * CORE_CLQ_ARR_SIZE));
checkCudaErrors(hipMemcpy(*d_coreCliqueArr, h_coreCliqueArr, sizeof(int8_t) * CORE_CLQ_ARR_SIZE, hipMemcpyHostToDevice));
checkCudaErrors(hipBindTexture(0, coreCliqueTex, *d_coreCliqueArr, int8Desc, sizeof(int8_t) * CORE_CLQ_ARR_SIZE));
checkCudaErrors(hipMalloc(d_nbOffsIndexArr, sizeof(uint8_t) * NB_OFFS_IDX_ARR_SIZE));
checkCudaErrors(hipMemcpy(*d_nbOffsIndexArr, h_nbOffsIndexArr, sizeof(uint8_t) * NB_OFFS_IDX_ARR_SIZE, hipMemcpyHostToDevice));
checkCudaErrors(hipBindTexture(0, nbOffsIndexTex, *d_nbOffsIndexArr, uint8Desc, sizeof(uint8_t) * NB_OFFS_IDX_ARR_SIZE));
}
// Unbinds the GPU texture references and frees the device memory.
void
_clearDeviceTex(OffsetCompType* d_matEntryArr, OffsetIjkType* d_ecOffsetArr, int8_t* d_coreCliqueArr, uint8_t* d_nbOffsIndexArr)
{
checkCudaErrors(hipFree(d_matEntryArr));
checkCudaErrors(hipFree(d_ecOffsetArr));
checkCudaErrors(hipFree(d_coreCliqueArr));
checkCudaErrors(hipFree(d_nbOffsIndexArr));
}
__device__ OffsetIjkType _fetchEcOffset(uint8_t ecOffsetIdx)
{
return tex1Dfetch(ecOffsetTex, ecOffsetIdx);
}
__device__ int8_t _fetchCoreClique(uint8_t coreCliqueIdx)
{
return tex1Dfetch(coreCliqueTex, coreCliqueIdx);
}
__device__ uint8_t _fetchNbOffsIndex(uint8_t nbOffsIndexIter)
{
return tex1Dfetch(nbOffsIndexTex, nbOffsIndexIter);
}
__device__ OffsetCompType _fetchMatEntry(uint8_t matEntryIdx)
{
return tex1Dfetch(matEntryTex, matEntryIdx);
}
// Compute the linear transformation on @offs using the matrix whose whose
// entries in @matEntryTex starts from @matTexBegin. The computed result is
// stored in @result.
__device__ void _transform(uint8_t matTexBegin, const OffsetIjkType& offs, OffsetIjkType& result)
{
result.x = _fetchMatEntry(matTexBegin + 0) * offs.x + _fetchMatEntry(matTexBegin + 1) * offs.y + _fetchMatEntry(matTexBegin + 2) * offs.z;
result.y = _fetchMatEntry(matTexBegin + 3) * offs.x + _fetchMatEntry(matTexBegin + 4) * offs.y + _fetchMatEntry(matTexBegin + 5) * offs.z;
result.z = _fetchMatEntry(matTexBegin + 6) * offs.x + _fetchMatEntry(matTexBegin + 7) * offs.y + _fetchMatEntry(matTexBegin + 8) * offs.z;
}
// Dim3CliquePolicy
__device__ uint8_t Dim3CliquePolicy::numFaceTokens()
{
return 1U;
}
__host__ __device__ uint8_t Dim3CliquePolicy::numEcVoxels()
{
return 1U;
}
__device__ uint8_t Dim3CliquePolicy::ecOffsetArrBegin()
{
return D3ecTexBegin;
}
__device__ uint8_t Dim3CliquePolicy::ecOffsetArrEnd()
{
return D3ecTexBegin + D3ecArrSize;
}
__device__ uint8_t Dim3CliquePolicy::coreCliqueArrBegin()
{
return D3coreTexBegin;
}
__device__ uint8_t Dim3CliquePolicy::coreCliqueArrEnd()
{
return D3coreTexBegin + D3coreArrSize;
}
__device__ uint8_t Dim3CliquePolicy::nbOffsIndexArrBegin()
{
return D3nbOffsIndexTexBegin;
}
__device__ uint8_t Dim3CliquePolicy::nbOffsIndexArrEnd()
{
return D3nbOffsIndexTexBegin + D3nbOffsIndexArrSize;
}
__device__ uint8_t Dim3CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
return faceToken == 0 ? 0 : 0xff;
}
// Dim2CliquePolicy
__device__ uint8_t Dim2CliquePolicy::numFaceTokens()
{
// return 3U;
return 4U;
}
__host__ __device__ uint8_t Dim2CliquePolicy::numEcVoxels()
{
return 2U;
}
__device__ uint8_t Dim2CliquePolicy::ecOffsetArrBegin()
{
return D2ecTexBegin;
}
__device__ uint8_t Dim2CliquePolicy::ecOffsetArrEnd()
{
return D2ecTexBegin + D2ecArrSize;
}
__device__ uint8_t Dim2CliquePolicy::coreCliqueArrBegin()
{
return D2coreTexBegin;
}
__device__ uint8_t Dim2CliquePolicy::coreCliqueArrEnd()
{
return D2coreTexBegin + D2coreArrSize;
}
__device__ uint8_t Dim2CliquePolicy::nbOffsIndexArrBegin()
{
return D2nbOffsIndexTexBegin;
}
__device__ uint8_t Dim2CliquePolicy::nbOffsIndexArrEnd()
{
return D2nbOffsIndexTexBegin + D2nbOffsIndexArrSize;
}
__device__ uint8_t Dim2CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
switch (faceToken)
{
case D2_FACE_X:
// default case is Face_X
return 0;
case D2_FACE_Y:
// rotate +90 degrees around Z axis
return 9U;
case D2_FACE_Z:
// rotate -90 degrees around Y axis
return 18U;
case D2_FACE_Z_NEG:
// rotate +90 degrees around Y axis
return 63U;
default:
return 0xff;
}
}
// Dim1CliquePolicy
__device__ uint8_t Dim1CliquePolicy::numFaceTokens()
{
return 6U;
}
__host__ __device__ uint8_t Dim1CliquePolicy::numEcVoxels()
{
return 4U;
}
__device__ uint8_t Dim1CliquePolicy::ecOffsetArrBegin()
{
return D1ecTexBegin;
}
__device__ uint8_t Dim1CliquePolicy::ecOffsetArrEnd()
{
return D1ecTexBegin + D1ecArrSize;
}
__device__ uint8_t Dim1CliquePolicy::coreCliqueArrBegin()
{
return D1coreTexBegin;
}
__device__ uint8_t Dim1CliquePolicy::coreCliqueArrEnd()
{
return D1coreTexBegin + D1coreArrSize;
}
__device__ uint8_t Dim1CliquePolicy::nbOffsIndexArrBegin()
{
return D1nbOffsIndexTexBegin;
}
__device__ uint8_t Dim1CliquePolicy::nbOffsIndexArrEnd()
{
return D1nbOffsIndexTexBegin + D1nbOffsIndexArrSize;
}
__device__ uint8_t Dim1CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
switch (faceToken)
{
case D1_EDGE_1:
// default case is edge 1
return 0;
case D1_EDGE_2:
// rotate +90 degrees around Z axis
return 9U;
case D1_EDGE_5:
// roate -90 degrees around X axis
return 27U;
case D1_EDGE_6:
// rotate +90 degrees around X axis
return 36U;
case D1_EDGE_9:
// rotate 180 degrees around X axis
return 45U;
case D1_EDGE_10:
// rotate +90 degrees around Z axis first to obtain X'Y'Z',
// then rotate 180 degrees around X' axis
return 54U;
default:
return 0xff;
}
}
// Dim0CliquePolicy
__device__ uint8_t Dim0CliquePolicy::numFaceTokens()
{
return 4U;
}
__host__ __device__ uint8_t Dim0CliquePolicy::numEcVoxels()
{
return 8U;
}
__device__ uint8_t Dim0CliquePolicy::ecOffsetArrBegin()
{
return D0ecTexBegin;
}
__device__ uint8_t Dim0CliquePolicy::ecOffsetArrEnd()
{
return D0ecTexBegin + D0ecArrSize;
}
__device__ uint8_t Dim0CliquePolicy::coreCliqueArrBegin()
{
return D0coreTexBegin;
}
__device__ uint8_t Dim0CliquePolicy::coreCliqueArrEnd()
{
return D0coreTexBegin + D0coreArrSize;
}
__device__ uint8_t Dim0CliquePolicy::nbOffsIndexArrBegin()
{
return D0nbOffsIndexTexBegin;
}
__device__ uint8_t Dim0CliquePolicy::nbOffsIndexArrEnd()
{
return D0nbOffsIndexTexBegin + D0nbOffsIndexArrSize;
}
__device__ uint8_t Dim0CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
switch (faceToken)
{
case D0_VERTEX_1:
// default case is vertex 1
return 0;
case D0_VERTEX_2:
// rotate +90 degrees around X axis
return 36U;
case D0_VERTEX_5:
// rotate -90 degrees around X axis
return 27U;
case D0_VERTEX_6:
// rotate 180 degrees around X axis
return 45U;
default:
return 0xff;
}
}
// Binary search of the 3D discrete coordinate, @targetIjk, in @compactIjkArr.
// This is necessary due to few libraries on the device side.
//
// [precondition] @comapctIjkArr is sorted in ascending order.
__device__ ArrIndexType
_binSearch(const IjkType* compactIjkArr, const IjkType& targetIjk, ArrIndexType lo, ArrIndexType hi)
{
ArrIndexType mid;
while (lo < hi)
{
mid = lo + ((hi - lo) >> 1);
if (isEqual(compactIjkArr[mid], targetIjk))
{
return mid;
}
else if (less(compactIjkArr[mid], targetIjk))
{
lo = mid + 1;
}
else
{
hi = mid;
}
}
return INVALID_UINT;
}
// Find the 3D discrete coordinate, @targetIjk, in @compactIjkArr, if the
// reference 3D coordinate, @refIjk, and its index in @compactIjkArr, @refIndex,
// are known.
//
// [precondition] @comapctIjkArr is sorted.
// [precondition] @compactIjkArr[@refIndex] == @refIjk
__device__ ArrIndexType
_findIndexOfIjk(const IjkType* compactIjkArr, const unsigned arrSize, const IjkType& targetIjk,
const IjkType& refIjk, const ArrIndexType refIndex)
{
if (isEqual(targetIjk, refIjk))
{
return refIndex;
}
else if (less(targetIjk, refIjk))
{
return _binSearch(compactIjkArr, targetIjk, 0, refIndex);
}
else
{
return _binSearch(compactIjkArr, targetIjk, refIndex + 1, arrSize);
}
}
/*
// Find the targetIjk 3D discrete coordinate in compactIjkArr, if the reference
// 3D coord refIjk and its index in compactIjkArr, refIndex, are Unknown. The
// function will have to search for the entire array.
// [precondition]: comapctIjkArr is sorted
__device__ ArrIndexType
_findIndexOfIjk(const IjkType* compactIjkArr, const unsigned arrSize, const IjkType targetIjk)
{
return _binSearch(compactIjkArr, targetIjk, 0, arrSize);
}
*/
// Find if the target 3D discrete coordinate, @refIjk + @offs, exists in
// @compactIjkArr.
//
// [precondition] comapctIjkArr is sorted
// [precondition] @compactIjkArr[@refIndex] == @refIjk
// [postcondition] If returns is true, then @compactIjkArr[@foundIndex] ==
// @refIjk + @offs.
__device__ bool
_find(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const unsigned arrSize, const IjkType& refIjk,
const ArrIndexType refIndex, const OffsetIjkType& offs, const IjkType& size3D)
{
IjkType targetIjk;
if (!tp::_isInBoundary(refIjk, offs, size3D, targetIjk))
{
return false;
}
foundIndex = _findIndexOfIjk(compactIjkArr, arrSize, targetIjk, refIjk, refIndex);
return foundIndex != INVALID_UINT;
}
/*
__device__ bool
_find(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const unsigned arrSize, const IjkType& refIjk,
const ArrIndexType refIndex, const IjkType& size3D)
{
return _find(foundIndex, compactIjkArr, arrSize, refIjk, refIndex, makeOffsetIjk(0, 0, 0), size3D);
}
*/
// This function adds an additional feature on top of the _find() function. Even
// if @refIjk + @offs is indeed found in @compactIjkArr, we need to check the
// @nthBit of the corresponding entry in @recBitsArr, and only when that bit is
// set does the function return true.
__device__ bool
_findInRecBitsArr(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const RecBitsType* recBitsArr,
const uint8_t nthBit, const unsigned arrSize, const IjkType& refIjk, const ArrIndexType refIndex,
const OffsetIjkType& offs, const IjkType& size3D)
{
if (!_find(foundIndex, compactIjkArr, arrSize, refIjk, refIndex, offs, size3D))
{
return false;
}
return tp::_readBit(recBitsArr[foundIndex], nthBit) == 1;
}
__device__ bool
_findInX(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const RecBitsType* recBitsArr,
const unsigned arrSize, const IjkType& refIjk, const ArrIndexType refIndex,
const OffsetIjkType& offs, const IjkType& size3D)
{
using namespace details;
return _findInRecBitsArr(foundIndex, compactIjkArr, recBitsArr, REC_BIT_X, arrSize,
refIjk, refIndex, offs, size3D);
}
__device__ bool
// _findInX(ArrIndexType& foundIndex, const ThinningData& thinData, const IjkType& refIjk,
_findInX(ArrIndexType& foundIndex, const details::DevDataPack& thinData, const IjkType& refIjk,
const ArrIndexType refIndex, const OffsetIjkType& offs)
{
bool found = _findInX(foundIndex, thinData.compactIjkArr, thinData.recBitsArr, thinData.arrSize,
refIjk, refIndex, offs, thinData.size3D());
if (found && thinData.useVoxelID())
{
found &= (thinData.voxelIdArr[foundIndex] == thinData.voxelIdArr[refIndex]);
}
return found;
}
__device__ bool
_containsInX(const IjkType* compactIjkArr, const RecBitsType* recBitsArr,
const unsigned arrSize, const IjkType& refIjk, const ArrIndexType refIndex,
const OffsetIjkType& offs, const IjkType& size3D)
{
ArrIndexType foundIndex;
return _findInX(foundIndex, compactIjkArr, recBitsArr, arrSize, refIjk, refIndex, offs, size3D);
}
__device__ bool
// _containsInX(const ThinningData& thinData, const IjkType& refIjk, const ArrIndexType refIndex, const OffsetIjkType& offs)
_containsInX(const details::DevDataPack& thinData, const IjkType& refIjk, const ArrIndexType refIndex, const OffsetIjkType& offs)
{
ArrIndexType foundIndex;
return _findInX(foundIndex, thinData, refIjk, refIndex, offs);
}
__device__ bool _isInNbBoundary(const OffsetIjkType& ijk, const OffsetIjkType& offsIjk, OffsetIjkType& resultIjk)
{
auto checker = [](OffsetCompType coord, OffsetCompType offs, OffsetCompType& result)
{
result = coord + offs;
bool flag = (-1 <= result) && (result <= 1);
result = flag * result + (1 - flag) * 0xff;
return flag;
};
return checker(ijk.x, offsIjk.x, resultIjk.x) &&
checker(ijk.y, offsIjk.y, resultIjk.y) &&
checker(ijk.z, offsIjk.z, resultIjk.z);
}
__device__ nb::NbMaskType _genNbMaskFromCliqueNbMask(nb::NbMaskType cliqueNbMask, uint8_t nthNb)
{
nb::NbMaskType nbMask = 0;
OffsetIjkType curIjk = nb::fetchNbOffset(nthNb);
for (uint8_t nbOffsetIdx = 0; nbOffsetIdx < NB_OFFS_ARR_SIZE; ++nbOffsetIdx)
{
OffsetIjkType offsIjk = nb::fetchNbOffset(nbOffsetIdx);
OffsetIjkType targetIjk;
if (_isInNbBoundary(curIjk, offsIjk, targetIjk))
{
uint8_t targetNthBit = nb::fetchIndexOfNbOffset(targetIjk);
if (tp::_readBit(cliqueNbMask, targetNthBit))
{
tp::_setBit(nbMask, nbOffsetIdx);
}
}
}
return nbMask;
}
__global__ void _assignKern(RecBitsType* recBitsArr, const unsigned arrSize, const uint8_t SRC, const uint8_t DST)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(recBitsArr[index], SRC))
{
tp::_setBit(recBitsArr[index], DST);
}
else
{
tp::_clearBit(recBitsArr[index], DST);
}
}
__global__ void _unionKern(RecBitsType* srcRecBitsArr, RecBitsType* dstRecBitsArr, const unsigned arrSize,
const uint8_t SRC, const uint8_t DST)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(srcRecBitsArr[index], SRC))
{
tp::_setBit(dstRecBitsArr[index], DST);
}
}
__global__ void _clearKern(RecBitsType* recBitsArr, const unsigned arrSize, const uint8_t BIT)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
tp::_clearBit(recBitsArr[index], BIT);
}
class _BitPred
{
public:
__host__ __device__ _BitPred(uint8_t bit) : m_bit(bit) { }
__host__ __device__ bool operator()(const RecBitsType bits) const
{
return tp::_readBit(bits, m_bit) == 1;
}
private:
uint8_t m_bit;
};
unsigned _countBit(RecBitsType* d_recBitsArr, const unsigned arrSize, const uint8_t BIT)
{
_BitPred pred(BIT);
return thrust::count_if(thrust::device, d_recBitsArr, d_recBitsArr + arrSize, pred);
}
__global__ void _flagActiveKern(ArrIndexType* flagArr, const RecBitsType* recBitsArr, const unsigned arrSize)
{
using namespace details;
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
flagArr[index] = tp::_readBit(recBitsArr[index], REC_BIT_X)
|| tp::_readBit(recBitsArr[index], REC_BIT_K);
}
template <typename T>
__global__ void
_compactArrsKern(T* dstArr, const T* srcArr, const ArrIndexType* flagArr, const ArrIndexType* flagScanArr, const unsigned arrSize)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if ((index >= arrSize) || (flagArr[index] == 0)) return;
ArrIndexType newIndex = flagScanArr[index];
dstArr[newIndex] = srcArr[index];
}
__global__ void _updateBirthKern(unsigned* birthArr, const RecBitsType* recBitsArr, const unsigned arrSize, const unsigned iter)
{
using namespace details;
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(recBitsArr[index], REC_BIT_Z) && (birthArr[index] == 0))
{
birthArr[index] = iter;
}
}
__global__ void _unionKsetByBirth(RecBitsType* recBitsArr, const unsigned* birthArr, const unsigned arrSize, const unsigned iter, const unsigned p)
{
using namespace details;
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(recBitsArr[index], REC_BIT_Y) && birthArr[index] && (iter + 1U - birthArr[index] >= p))
{
tp::_setBit(recBitsArr[index], REC_BIT_K);
}
}
unsigned _flagVoxelsInXorK(ArrIndexType* d_flagArr, ArrIndexType* d_flagScanArr, const RecBitsType* d_recBitsArr,
const unsigned arrSize, const dim3& blocksDim, const dim3& threadsDim)
{
// Find out the active voxels in X or K after one iteration of thinning
hipLaunchKernelGGL(( _flagActiveKern), dim3(blocksDim), dim3(threadsDim), 0, 0, d_flagArr, d_recBitsArr, arrSize);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Scan the flags array
thrust::exclusive_scan(thrust::device, d_flagArr, d_flagArr + arrSize, d_flagScanArr);
unsigned lastFlagArrElem, lastFlagScanArrElem;
checkCudaErrors(hipMemcpy(&lastFlagArrElem, d_flagArr + arrSize - 1, sizeof(unsigned), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&lastFlagScanArrElem, d_flagScanArr + arrSize - 1, sizeof(unsigned), hipMemcpyDeviceToHost));
// New array size will be flagArr[-1] + flagScanArr[-1], since flagScanArr is an exclusive scan.
unsigned newArrSize = lastFlagArrElem + lastFlagScanArrElem;
return newArrSize;
}
// unsigned _shrinkArrs(ThinningData& thinData, const dim3& blocksDim, const dim3& threadsDim)
unsigned _shrinkArrs(details::DevDataPack& thinData, const dim3& blocksDim, const dim3& threadsDim)
{
ArrIndexType* d_flagArr;
checkCudaErrors(hipMalloc(&d_flagArr, sizeof(ArrIndexType) * thinData.arrSize));
checkCudaErrors(hipMemset(d_flagArr, 0, sizeof(ArrIndexType) * thinData.arrSize));
ArrIndexType* d_flagScanArr;
checkCudaErrors(hipMalloc(&d_flagScanArr, sizeof(ArrIndexType) * thinData.arrSize));
checkCudaErrors(hipMemset(d_flagScanArr, 0, sizeof(ArrIndexType) * thinData.arrSize));
unsigned newArrSize = _flagVoxelsInXorK(d_flagArr, d_flagScanArr, thinData.recBitsArr, thinData.arrSize, blocksDim, threadsDim);
// Create two new arrays to stgore the active voxels information by
// performing a scatter operation on the original two arrays.
IjkType* d_dstIjkArr;
checkCudaErrors(hipMalloc(&d_dstIjkArr, sizeof(IjkType) * newArrSize));
hipLaunchKernelGGL(( _compactArrsKern), dim3(blocksDim), dim3(threadsDim), 0, 0, d_dstIjkArr, thinData.compactIjkArr, d_flagArr, d_flagScanArr, thinData.arrSize);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Free the unused device memory. Notice that the ORIGINAL voxel arrays
// are being freed!
checkCudaErrors(hipFree(thinData.compactIjkArr));
// Store the address of the device memory
thinData.compactIjkArr = d_dstIjkArr;
RecBitsType* d_dstRecBitsArr;
checkCudaErrors(hipMalloc(&d_dstRecBitsArr, sizeof(RecBitsType) * newArrSize));
hipLaunchKernelGGL(( _compactArrsKern), dim3(blocksDim), dim3(threadsDim), 0, 0, d_dstRecBitsArr, thinData.recBitsArr, d_flagArr, d_flagScanArr, thinData.arrSize);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(thinData.recBitsArr));
thinData.recBitsArr = d_dstRecBitsArr;
if (thinData.birthArr)
{
unsigned* d_dstBirthArr;
checkCudaErrors(hipMalloc(&d_dstBirthArr, sizeof(unsigned) * newArrSize));
hipLaunchKernelGGL(( _compactArrsKern), dim3(blocksDim), dim3(threadsDim), 0, 0, d_dstBirthArr, thinData.birthArr, d_flagArr, d_flagScanArr, thinData.arrSize);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(thinData.birthArr));
thinData.birthArr = d_dstBirthArr;
}
if (thinData.useVoxelID())
{
ObjIdType* d_dstVoxelIdArr;
checkCudaErrors(hipMalloc(&d_dstVoxelIdArr, sizeof(ObjIdType) * newArrSize));
hipLaunchKernelGGL(( _compactArrsKern), dim3(blocksDim), dim3(threadsDim), 0, 0, d_dstVoxelIdArr, thinData.voxelIdArr, d_flagArr, d_flagScanArr, thinData.arrSize);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(thinData.voxelIdArr));
thinData.voxelIdArr = d_dstVoxelIdArr;
}
checkCudaErrors(hipFree(d_flagArr));
checkCudaErrors(hipFree(d_flagScanArr));
return newArrSize;
}
}; // namespace thin::clique::_private;
void initDevice()
{
cp::DevArrPtrs* ptrs = cp::DevArrPtrs::instance();
cp::_initDeviceTex(&(ptrs->d_matEntryArr), &(ptrs->d_ecOffsetArr), &(ptrs->d_coreCliqueArr), &(ptrs->d_nbOffsIndexArr));
}
void shutdownDevice()
{
cp::DevArrPtrs* ptrs = cp::DevArrPtrs::instance();
cp::_clearDeviceTex(ptrs->d_matEntryArr, ptrs->d_ecOffsetArr, ptrs->d_coreCliqueArr, ptrs->d_nbOffsIndexArr);
}
// void crucialIsthmus(ThinningData& thinData, const dim3& blocksDim, const dim3& threadsDim)
void crucialIsthmus(details::DevDataPack& thinData, const dim3& blocksDim, const dim3& threadsDim)
{
using namespace details;
auto TIMER = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( cp::_assignKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_K, REC_BIT_Y);
TIMER_END(">>>> crucialIsthmus::_assignKern()", TIMER);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
TIMER = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( cp::_clearKern), dim3(blocksDim), dim3(threadsDim), 0, 0, thinData.recBitsArr, thinData.arrSize, REC_BIT_Z);
TIMER_END(">>>> crucialIsthmus::_clearKern()", TIMER);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::hipMemset(clear A and B set)", TIMER);
// Find 3-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D3CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D3CliqueChecker(find 3-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::hipMemset(clear A and B set)", TIMER);
// Find 2-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D2CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D2CliqueChecker(Find 2-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::hipMemset(clear A and B set)", TIMER);
// Find 1-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D1CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D1CliqueChecker(Find 1-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::hipMemset(clear A and B set)", TIMER);
// Find 0-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D0CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D0CliqueChecker(Find 0-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(hipMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::hipMemset(clear A and B set)", TIMER);
}
}; // namespace thin::clique;
}; // namespace thin;
| 01c57127de45f95a78d9b82e9709209d383b1361.cu | #include <memory> // std::unique_ptr
#include "cuda_texture_types.h" // texture
#include "texture_fetch_functions.h" // tex1Dfetch
#include "cuda_includes.h"
#include "clique.cuh"
// For profiling execution times
#include <chrono>
#ifndef TIMER_END
#define TIMER_END(str, start) std::cout << std::setw(6) << std::right << \
std::chrono::duration_cast<std::chrono::milliseconds>( \
std::chrono::high_resolution_clock::now()-start).count() << \
" ms " << str << std::endl;
#endif
namespace thin
{
namespace clique
{
namespace _private
{
// When we are checking if a clique can be formed at a certain d-face, we use
// the spatial mask template from [1] Section 10.
//
// For each dimension d, a default d-face is selected, with the offsets in the
// template set to work for this d-face. For the rest d-faces of the same
// dimension, we can apply a linear transformation on the default offsets to
// obtain the correct template. After enumerating all the cases, we will need 72
// transformation matrices, which are stored in a 1D texture of size 72 (8 x 9).
const unsigned matEntryArrSize = 72U;
const OffsetCompType h_matEntryArr[matEntryArrSize] =
{
// identity
// D3::m_mat, D2::m_mat_X, D1::m_mat_1, D0::m_mat_0
1, 0, 0, 0, 1, 0, 0, 0, 1, // 0
// rotate +90 degrees around Z axis
// D2::m_mat_Y, D1::m_mat_2
0, -1, 0, 1, 0, 0, 0, 0, 1, // 9
// rotate -90 degrees around Y axis
// D2::m_mat_Z,
0, 0, -1, 0, 1, 0, 1, 0, 0, // 18
// roate -90 degrees around X axis
// D1::m_mat_5, D0::m_mat_5
1, 0, 0, 0, 0, 1, 0, -1, 0, // 27
// rotate +90 degrees around X axis
// D1::m_mat_6, D0::m_mat_2
1, 0, 0, 0, 0, -1, 0, 1, 0, // 36
// rotate 180 degrees around X axis
// D1::m_mat_9, D0::m_mat_6
1, 0, 0, 0, -1, 0, 0, 0, -1, // 45
// rotate +90 degrees around Z axis first to obtain X'Y'Z',
// then rotate 180 degrees around X' axis
// D1::m_mat_10
0, 1, 0, 1, 0, 0, 0, 0, -1, // 54
// rotate +90 degrees around Y axis
// D2::m_mat_Z_neg
0, 0, 1, 0, 1, 0, -1, 0, 0 // 63
};
// Dim 3, Voxel
//
// number of voxels to form the essential 3-clique
const uint8_t D3ecArrSize = 0;
// number of core voxels to form 3-clique
const uint8_t D3coreArrSize = 0;
// number of neighborhood offsets of 3-clique
const uint8_t D3nbOffsIndexArrSize = 26U;
// the beginning index in the device texture reference for essential clique
const uint8_t D3ecTexBegin = 0;
// the beginning index in the device texture reference for core voxels
const uint8_t D3coreTexBegin = 0;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D3nbOffsIndexTexBegin = 0;
// Dim 2, Face
//
// number of voxels to form the essential 2-clique
const uint8_t D2ecArrSize = 1U;
// number of core voxels to form 2-clique
const uint8_t D2coreArrSize = 2U;
// number of neighborhood offsets of 2-clique
const uint8_t D2nbOffsIndexArrSize = 16U;
// the beginning index in the device texture reference for essential clique
const uint8_t D2ecTexBegin = D3ecTexBegin + D3ecArrSize;
// the beginning index in the device texture reference for core voxels
const uint8_t D2coreTexBegin = D3coreTexBegin + D3coreArrSize;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D2nbOffsIndexTexBegin = D3nbOffsIndexTexBegin + D3nbOffsIndexArrSize;
// Dim 1, Edge
//
// number of voxels to form the essential 1-clique
const uint8_t D1ecArrSize = 3U;
// number of core voxels to form 1-clique
const uint8_t D1coreArrSize = 5U;
// number of neighborhood offsets of 1-clique
const uint8_t D1nbOffsIndexArrSize = 8U;
// the beginning index in the device texture reference for essential clique
const uint8_t D1ecTexBegin = D2ecTexBegin + D2ecArrSize;
// the beginning index in the device texture reference for core voxels
const uint8_t D1coreTexBegin = D2coreTexBegin + D2coreArrSize;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D1nbOffsIndexTexBegin = D2nbOffsIndexTexBegin + D2nbOffsIndexArrSize;
// Dim 0, Vertex
//
// number of voxels to form the essential 0-clique
const uint8_t D0ecArrSize = 7U;
// number of core voxels to form 0-clique
const uint8_t D0coreArrSize = 11U;
// number of neighborhood offsets of 0-clique
const uint8_t D0nbOffsIndexArrSize = 0;
// the beginning index in the device texture reference for essential clique
const uint8_t D0ecTexBegin = D1ecTexBegin + D1ecArrSize;
// the beginning index in the device texture reference for core voxels
const uint8_t D0coreTexBegin = D1coreTexBegin + D1coreArrSize;
// the beginning index in the device texture reference for the indices of the
// neighborhood offsets
const uint8_t D0nbOffsIndexTexBegin = D1nbOffsIndexTexBegin + D1nbOffsIndexArrSize;
// Essential Clique array size
const unsigned EC_ARR_SIZE = D3ecArrSize + D2ecArrSize + D1ecArrSize + D0ecArrSize;
// Core clique voxel array size
const unsigned CORE_CLQ_ARR_SIZE = D3coreArrSize + D2coreArrSize + D1coreArrSize + D0coreArrSize;
// Indices of the neighborhood offsets array size
const unsigned NB_OFFS_IDX_ARR_SIZE = D3nbOffsIndexArrSize + D2nbOffsIndexArrSize + D1nbOffsIndexArrSize + D0nbOffsIndexArrSize;
const unsigned NB_OFFS_ARR_SIZE = 26U;
// Essential clique array
const OffsetIjkType h_ecOffsetArr[EC_ARR_SIZE] =
{
// Dim 3 does not have any ec
// Dim 2
makeOffsetIjk(1,0,0),
// Dim 1
makeOffsetIjk(1,0,0), makeOffsetIjk(1,0,-1), makeOffsetIjk(0,0,-1),
// Dim 0
makeOffsetIjk(0,-1,-1), makeOffsetIjk(0,0,-1), makeOffsetIjk(1,-1,-1), makeOffsetIjk(1,0,-1),
makeOffsetIjk(0,-1,0), makeOffsetIjk(1,-1,0), makeOffsetIjk(1,0,0)
};
// Core clique voxel array.
//
// For each dimension d, the associated entry is a list of tuples with variable
// length. For this to work on the device side, there is a -1 inserted between
// every two tuples, indicating the end of the previous one.
const int8_t h_coreCliqueArr[CORE_CLQ_ARR_SIZE] =
{
// Dim 3 does not have any core clique index
// Dim 2
0 + D2ecTexBegin, -1,
// Dim 1
1 + D1ecTexBegin, -1,
0 + D1ecTexBegin, 2 + D1ecTexBegin, -1,
// Dim 0
2 + D0ecTexBegin, -1,
0 + D0ecTexBegin, 3 + D0ecTexBegin, -1,
0 + D0ecTexBegin, 5 + D0ecTexBegin, -1,
3 + D0ecTexBegin, 5 + D0ecTexBegin, -1
};
// Indices of neighborhood offsets array
const uint8_t h_nbOffsIndexArr[NB_OFFS_IDX_ARR_SIZE] =
{
// Dim 3 nb offset indices
0, 11, 3, 8, 20, 10, 1, 9, 2, 12, 24, 15, 21, 23, 13, 22, 14, 4, 19, 7, 16, 25, 18, 5, 17, 6,
// Dim 2 nb offset indices
8, 20, 10, 1, 9, 2, 21, 23, 13, 14, 16, 25, 18, 5, 17, 6,
// Dim 1 nb offset indices
8, 10, 1, 2, 21, 23, 13, 14
};
// A singleton class that unions all the device pointers for texture reference.
class DevArrPtrs
{
public:
static DevArrPtrs* instance()
{
if (!m_instance)
{
m_instance = std::unique_ptr<DevArrPtrs>(new DevArrPtrs);
}
return m_instance.get();
}
OffsetCompType* d_matEntryArr;
OffsetIjkType* d_ecOffsetArr;
int8_t* d_coreCliqueArr;
uint8_t* d_nbOffsIndexArr;
// OffsetIjkType* d_nbOffsetArr;
// uint8_t* d_nbFlatIjkToIndexLut;
private:
static std::unique_ptr<DevArrPtrs> m_instance;
};
std::unique_ptr<DevArrPtrs> DevArrPtrs::m_instance = nullptr;
tp::Int8TexType matEntryTex;
tp::OffsetIjkTexType ecOffsetTex;
tp::Int8TexType coreCliqueTex;
tp::Uint8TexType nbOffsIndexTex;
// Initialize the device texture references of this module.
void
_initDeviceTex(OffsetCompType** d_matEntryArr, OffsetIjkType** d_ecOffsetArr, int8_t** d_coreCliqueArr, uint8_t** d_nbOffsIndexArr)
{
const cudaChannelFormatDesc int8Desc = cudaCreateChannelDesc(8 * sizeof(OffsetCompType), 0, 0, 0, cudaChannelFormatKindSigned);
const cudaChannelFormatDesc uint8Desc = cudaCreateChannelDesc(8 * sizeof(OffsetCompType), 0, 0, 0, cudaChannelFormatKindUnsigned);
const cudaChannelFormatDesc char4Desc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSigned);
checkCudaErrors(cudaMalloc(d_matEntryArr, sizeof(OffsetCompType) * matEntryArrSize));
checkCudaErrors(cudaMemcpy(*d_matEntryArr, h_matEntryArr, sizeof(OffsetCompType) * matEntryArrSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaBindTexture(0, matEntryTex, *d_matEntryArr, int8Desc, sizeof(OffsetCompType) * matEntryArrSize));
checkCudaErrors(cudaMalloc(d_ecOffsetArr, sizeof(OffsetIjkType) * EC_ARR_SIZE));
checkCudaErrors(cudaMemcpy(*d_ecOffsetArr, h_ecOffsetArr, sizeof(OffsetIjkType) * EC_ARR_SIZE, cudaMemcpyHostToDevice));
checkCudaErrors(cudaBindTexture(0, ecOffsetTex, *d_ecOffsetArr, char4Desc, sizeof(OffsetIjkType) * EC_ARR_SIZE));
checkCudaErrors(cudaMalloc(d_coreCliqueArr, sizeof(int8_t) * CORE_CLQ_ARR_SIZE));
checkCudaErrors(cudaMemcpy(*d_coreCliqueArr, h_coreCliqueArr, sizeof(int8_t) * CORE_CLQ_ARR_SIZE, cudaMemcpyHostToDevice));
checkCudaErrors(cudaBindTexture(0, coreCliqueTex, *d_coreCliqueArr, int8Desc, sizeof(int8_t) * CORE_CLQ_ARR_SIZE));
checkCudaErrors(cudaMalloc(d_nbOffsIndexArr, sizeof(uint8_t) * NB_OFFS_IDX_ARR_SIZE));
checkCudaErrors(cudaMemcpy(*d_nbOffsIndexArr, h_nbOffsIndexArr, sizeof(uint8_t) * NB_OFFS_IDX_ARR_SIZE, cudaMemcpyHostToDevice));
checkCudaErrors(cudaBindTexture(0, nbOffsIndexTex, *d_nbOffsIndexArr, uint8Desc, sizeof(uint8_t) * NB_OFFS_IDX_ARR_SIZE));
}
// Unbinds the GPU texture references and frees the device memory.
void
_clearDeviceTex(OffsetCompType* d_matEntryArr, OffsetIjkType* d_ecOffsetArr, int8_t* d_coreCliqueArr, uint8_t* d_nbOffsIndexArr)
{
checkCudaErrors(cudaFree(d_matEntryArr));
checkCudaErrors(cudaFree(d_ecOffsetArr));
checkCudaErrors(cudaFree(d_coreCliqueArr));
checkCudaErrors(cudaFree(d_nbOffsIndexArr));
}
__device__ OffsetIjkType _fetchEcOffset(uint8_t ecOffsetIdx)
{
return tex1Dfetch(ecOffsetTex, ecOffsetIdx);
}
__device__ int8_t _fetchCoreClique(uint8_t coreCliqueIdx)
{
return tex1Dfetch(coreCliqueTex, coreCliqueIdx);
}
__device__ uint8_t _fetchNbOffsIndex(uint8_t nbOffsIndexIter)
{
return tex1Dfetch(nbOffsIndexTex, nbOffsIndexIter);
}
__device__ OffsetCompType _fetchMatEntry(uint8_t matEntryIdx)
{
return tex1Dfetch(matEntryTex, matEntryIdx);
}
// Compute the linear transformation on @offs using the matrix whose whose
// entries in @matEntryTex starts from @matTexBegin. The computed result is
// stored in @result.
__device__ void _transform(uint8_t matTexBegin, const OffsetIjkType& offs, OffsetIjkType& result)
{
result.x = _fetchMatEntry(matTexBegin + 0) * offs.x + _fetchMatEntry(matTexBegin + 1) * offs.y + _fetchMatEntry(matTexBegin + 2) * offs.z;
result.y = _fetchMatEntry(matTexBegin + 3) * offs.x + _fetchMatEntry(matTexBegin + 4) * offs.y + _fetchMatEntry(matTexBegin + 5) * offs.z;
result.z = _fetchMatEntry(matTexBegin + 6) * offs.x + _fetchMatEntry(matTexBegin + 7) * offs.y + _fetchMatEntry(matTexBegin + 8) * offs.z;
}
// Dim3CliquePolicy
__device__ uint8_t Dim3CliquePolicy::numFaceTokens()
{
return 1U;
}
__host__ __device__ uint8_t Dim3CliquePolicy::numEcVoxels()
{
return 1U;
}
__device__ uint8_t Dim3CliquePolicy::ecOffsetArrBegin()
{
return D3ecTexBegin;
}
__device__ uint8_t Dim3CliquePolicy::ecOffsetArrEnd()
{
return D3ecTexBegin + D3ecArrSize;
}
__device__ uint8_t Dim3CliquePolicy::coreCliqueArrBegin()
{
return D3coreTexBegin;
}
__device__ uint8_t Dim3CliquePolicy::coreCliqueArrEnd()
{
return D3coreTexBegin + D3coreArrSize;
}
__device__ uint8_t Dim3CliquePolicy::nbOffsIndexArrBegin()
{
return D3nbOffsIndexTexBegin;
}
__device__ uint8_t Dim3CliquePolicy::nbOffsIndexArrEnd()
{
return D3nbOffsIndexTexBegin + D3nbOffsIndexArrSize;
}
__device__ uint8_t Dim3CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
return faceToken == 0 ? 0 : 0xff;
}
// Dim2CliquePolicy
__device__ uint8_t Dim2CliquePolicy::numFaceTokens()
{
// return 3U;
return 4U;
}
__host__ __device__ uint8_t Dim2CliquePolicy::numEcVoxels()
{
return 2U;
}
__device__ uint8_t Dim2CliquePolicy::ecOffsetArrBegin()
{
return D2ecTexBegin;
}
__device__ uint8_t Dim2CliquePolicy::ecOffsetArrEnd()
{
return D2ecTexBegin + D2ecArrSize;
}
__device__ uint8_t Dim2CliquePolicy::coreCliqueArrBegin()
{
return D2coreTexBegin;
}
__device__ uint8_t Dim2CliquePolicy::coreCliqueArrEnd()
{
return D2coreTexBegin + D2coreArrSize;
}
__device__ uint8_t Dim2CliquePolicy::nbOffsIndexArrBegin()
{
return D2nbOffsIndexTexBegin;
}
__device__ uint8_t Dim2CliquePolicy::nbOffsIndexArrEnd()
{
return D2nbOffsIndexTexBegin + D2nbOffsIndexArrSize;
}
__device__ uint8_t Dim2CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
switch (faceToken)
{
case D2_FACE_X:
// default case is Face_X
return 0;
case D2_FACE_Y:
// rotate +90 degrees around Z axis
return 9U;
case D2_FACE_Z:
// rotate -90 degrees around Y axis
return 18U;
case D2_FACE_Z_NEG:
// rotate +90 degrees around Y axis
return 63U;
default:
return 0xff;
}
}
// Dim1CliquePolicy
__device__ uint8_t Dim1CliquePolicy::numFaceTokens()
{
return 6U;
}
__host__ __device__ uint8_t Dim1CliquePolicy::numEcVoxels()
{
return 4U;
}
__device__ uint8_t Dim1CliquePolicy::ecOffsetArrBegin()
{
return D1ecTexBegin;
}
__device__ uint8_t Dim1CliquePolicy::ecOffsetArrEnd()
{
return D1ecTexBegin + D1ecArrSize;
}
__device__ uint8_t Dim1CliquePolicy::coreCliqueArrBegin()
{
return D1coreTexBegin;
}
__device__ uint8_t Dim1CliquePolicy::coreCliqueArrEnd()
{
return D1coreTexBegin + D1coreArrSize;
}
__device__ uint8_t Dim1CliquePolicy::nbOffsIndexArrBegin()
{
return D1nbOffsIndexTexBegin;
}
__device__ uint8_t Dim1CliquePolicy::nbOffsIndexArrEnd()
{
return D1nbOffsIndexTexBegin + D1nbOffsIndexArrSize;
}
__device__ uint8_t Dim1CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
switch (faceToken)
{
case D1_EDGE_1:
// default case is edge 1
return 0;
case D1_EDGE_2:
// rotate +90 degrees around Z axis
return 9U;
case D1_EDGE_5:
// roate -90 degrees around X axis
return 27U;
case D1_EDGE_6:
// rotate +90 degrees around X axis
return 36U;
case D1_EDGE_9:
// rotate 180 degrees around X axis
return 45U;
case D1_EDGE_10:
// rotate +90 degrees around Z axis first to obtain X'Y'Z',
// then rotate 180 degrees around X' axis
return 54U;
default:
return 0xff;
}
}
// Dim0CliquePolicy
__device__ uint8_t Dim0CliquePolicy::numFaceTokens()
{
return 4U;
}
__host__ __device__ uint8_t Dim0CliquePolicy::numEcVoxels()
{
return 8U;
}
__device__ uint8_t Dim0CliquePolicy::ecOffsetArrBegin()
{
return D0ecTexBegin;
}
__device__ uint8_t Dim0CliquePolicy::ecOffsetArrEnd()
{
return D0ecTexBegin + D0ecArrSize;
}
__device__ uint8_t Dim0CliquePolicy::coreCliqueArrBegin()
{
return D0coreTexBegin;
}
__device__ uint8_t Dim0CliquePolicy::coreCliqueArrEnd()
{
return D0coreTexBegin + D0coreArrSize;
}
__device__ uint8_t Dim0CliquePolicy::nbOffsIndexArrBegin()
{
return D0nbOffsIndexTexBegin;
}
__device__ uint8_t Dim0CliquePolicy::nbOffsIndexArrEnd()
{
return D0nbOffsIndexTexBegin + D0nbOffsIndexArrSize;
}
__device__ uint8_t Dim0CliquePolicy::matBeginByFaceToken(FaceTokenType faceToken)
{
switch (faceToken)
{
case D0_VERTEX_1:
// default case is vertex 1
return 0;
case D0_VERTEX_2:
// rotate +90 degrees around X axis
return 36U;
case D0_VERTEX_5:
// rotate -90 degrees around X axis
return 27U;
case D0_VERTEX_6:
// rotate 180 degrees around X axis
return 45U;
default:
return 0xff;
}
}
// Binary search of the 3D discrete coordinate, @targetIjk, in @compactIjkArr.
// This is necessary due to few libraries on the device side.
//
// [precondition] @comapctIjkArr is sorted in ascending order.
__device__ ArrIndexType
_binSearch(const IjkType* compactIjkArr, const IjkType& targetIjk, ArrIndexType lo, ArrIndexType hi)
{
ArrIndexType mid;
while (lo < hi)
{
mid = lo + ((hi - lo) >> 1);
if (isEqual(compactIjkArr[mid], targetIjk))
{
return mid;
}
else if (less(compactIjkArr[mid], targetIjk))
{
lo = mid + 1;
}
else
{
hi = mid;
}
}
return INVALID_UINT;
}
// Find the 3D discrete coordinate, @targetIjk, in @compactIjkArr, if the
// reference 3D coordinate, @refIjk, and its index in @compactIjkArr, @refIndex,
// are known.
//
// [precondition] @comapctIjkArr is sorted.
// [precondition] @compactIjkArr[@refIndex] == @refIjk
__device__ ArrIndexType
_findIndexOfIjk(const IjkType* compactIjkArr, const unsigned arrSize, const IjkType& targetIjk,
const IjkType& refIjk, const ArrIndexType refIndex)
{
if (isEqual(targetIjk, refIjk))
{
return refIndex;
}
else if (less(targetIjk, refIjk))
{
return _binSearch(compactIjkArr, targetIjk, 0, refIndex);
}
else
{
return _binSearch(compactIjkArr, targetIjk, refIndex + 1, arrSize);
}
}
/*
// Find the targetIjk 3D discrete coordinate in compactIjkArr, if the reference
// 3D coord refIjk and its index in compactIjkArr, refIndex, are Unknown. The
// function will have to search for the entire array.
// [precondition]: comapctIjkArr is sorted
__device__ ArrIndexType
_findIndexOfIjk(const IjkType* compactIjkArr, const unsigned arrSize, const IjkType targetIjk)
{
return _binSearch(compactIjkArr, targetIjk, 0, arrSize);
}
*/
// Find if the target 3D discrete coordinate, @refIjk + @offs, exists in
// @compactIjkArr.
//
// [precondition] comapctIjkArr is sorted
// [precondition] @compactIjkArr[@refIndex] == @refIjk
// [postcondition] If returns is true, then @compactIjkArr[@foundIndex] ==
// @refIjk + @offs.
__device__ bool
_find(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const unsigned arrSize, const IjkType& refIjk,
const ArrIndexType refIndex, const OffsetIjkType& offs, const IjkType& size3D)
{
IjkType targetIjk;
if (!tp::_isInBoundary(refIjk, offs, size3D, targetIjk))
{
return false;
}
foundIndex = _findIndexOfIjk(compactIjkArr, arrSize, targetIjk, refIjk, refIndex);
return foundIndex != INVALID_UINT;
}
/*
__device__ bool
_find(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const unsigned arrSize, const IjkType& refIjk,
const ArrIndexType refIndex, const IjkType& size3D)
{
return _find(foundIndex, compactIjkArr, arrSize, refIjk, refIndex, makeOffsetIjk(0, 0, 0), size3D);
}
*/
// This function adds an additional feature on top of the _find() function. Even
// if @refIjk + @offs is indeed found in @compactIjkArr, we need to check the
// @nthBit of the corresponding entry in @recBitsArr, and only when that bit is
// set does the function return true.
__device__ bool
_findInRecBitsArr(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const RecBitsType* recBitsArr,
const uint8_t nthBit, const unsigned arrSize, const IjkType& refIjk, const ArrIndexType refIndex,
const OffsetIjkType& offs, const IjkType& size3D)
{
if (!_find(foundIndex, compactIjkArr, arrSize, refIjk, refIndex, offs, size3D))
{
return false;
}
return tp::_readBit(recBitsArr[foundIndex], nthBit) == 1;
}
__device__ bool
_findInX(ArrIndexType& foundIndex, const IjkType* compactIjkArr, const RecBitsType* recBitsArr,
const unsigned arrSize, const IjkType& refIjk, const ArrIndexType refIndex,
const OffsetIjkType& offs, const IjkType& size3D)
{
using namespace details;
return _findInRecBitsArr(foundIndex, compactIjkArr, recBitsArr, REC_BIT_X, arrSize,
refIjk, refIndex, offs, size3D);
}
__device__ bool
// _findInX(ArrIndexType& foundIndex, const ThinningData& thinData, const IjkType& refIjk,
_findInX(ArrIndexType& foundIndex, const details::DevDataPack& thinData, const IjkType& refIjk,
const ArrIndexType refIndex, const OffsetIjkType& offs)
{
bool found = _findInX(foundIndex, thinData.compactIjkArr, thinData.recBitsArr, thinData.arrSize,
refIjk, refIndex, offs, thinData.size3D());
if (found && thinData.useVoxelID())
{
found &= (thinData.voxelIdArr[foundIndex] == thinData.voxelIdArr[refIndex]);
}
return found;
}
__device__ bool
_containsInX(const IjkType* compactIjkArr, const RecBitsType* recBitsArr,
const unsigned arrSize, const IjkType& refIjk, const ArrIndexType refIndex,
const OffsetIjkType& offs, const IjkType& size3D)
{
ArrIndexType foundIndex;
return _findInX(foundIndex, compactIjkArr, recBitsArr, arrSize, refIjk, refIndex, offs, size3D);
}
__device__ bool
// _containsInX(const ThinningData& thinData, const IjkType& refIjk, const ArrIndexType refIndex, const OffsetIjkType& offs)
_containsInX(const details::DevDataPack& thinData, const IjkType& refIjk, const ArrIndexType refIndex, const OffsetIjkType& offs)
{
ArrIndexType foundIndex;
return _findInX(foundIndex, thinData, refIjk, refIndex, offs);
}
__device__ bool _isInNbBoundary(const OffsetIjkType& ijk, const OffsetIjkType& offsIjk, OffsetIjkType& resultIjk)
{
auto checker = [](OffsetCompType coord, OffsetCompType offs, OffsetCompType& result)
{
result = coord + offs;
bool flag = (-1 <= result) && (result <= 1);
result = flag * result + (1 - flag) * 0xff;
return flag;
};
return checker(ijk.x, offsIjk.x, resultIjk.x) &&
checker(ijk.y, offsIjk.y, resultIjk.y) &&
checker(ijk.z, offsIjk.z, resultIjk.z);
}
__device__ nb::NbMaskType _genNbMaskFromCliqueNbMask(nb::NbMaskType cliqueNbMask, uint8_t nthNb)
{
nb::NbMaskType nbMask = 0;
OffsetIjkType curIjk = nb::fetchNbOffset(nthNb);
for (uint8_t nbOffsetIdx = 0; nbOffsetIdx < NB_OFFS_ARR_SIZE; ++nbOffsetIdx)
{
OffsetIjkType offsIjk = nb::fetchNbOffset(nbOffsetIdx);
OffsetIjkType targetIjk;
if (_isInNbBoundary(curIjk, offsIjk, targetIjk))
{
uint8_t targetNthBit = nb::fetchIndexOfNbOffset(targetIjk);
if (tp::_readBit(cliqueNbMask, targetNthBit))
{
tp::_setBit(nbMask, nbOffsetIdx);
}
}
}
return nbMask;
}
__global__ void _assignKern(RecBitsType* recBitsArr, const unsigned arrSize, const uint8_t SRC, const uint8_t DST)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(recBitsArr[index], SRC))
{
tp::_setBit(recBitsArr[index], DST);
}
else
{
tp::_clearBit(recBitsArr[index], DST);
}
}
__global__ void _unionKern(RecBitsType* srcRecBitsArr, RecBitsType* dstRecBitsArr, const unsigned arrSize,
const uint8_t SRC, const uint8_t DST)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(srcRecBitsArr[index], SRC))
{
tp::_setBit(dstRecBitsArr[index], DST);
}
}
__global__ void _clearKern(RecBitsType* recBitsArr, const unsigned arrSize, const uint8_t BIT)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
tp::_clearBit(recBitsArr[index], BIT);
}
class _BitPred
{
public:
__host__ __device__ _BitPred(uint8_t bit) : m_bit(bit) { }
__host__ __device__ bool operator()(const RecBitsType bits) const
{
return tp::_readBit(bits, m_bit) == 1;
}
private:
uint8_t m_bit;
};
unsigned _countBit(RecBitsType* d_recBitsArr, const unsigned arrSize, const uint8_t BIT)
{
_BitPred pred(BIT);
return thrust::count_if(thrust::device, d_recBitsArr, d_recBitsArr + arrSize, pred);
}
__global__ void _flagActiveKern(ArrIndexType* flagArr, const RecBitsType* recBitsArr, const unsigned arrSize)
{
using namespace details;
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
flagArr[index] = tp::_readBit(recBitsArr[index], REC_BIT_X)
|| tp::_readBit(recBitsArr[index], REC_BIT_K);
}
template <typename T>
__global__ void
_compactArrsKern(T* dstArr, const T* srcArr, const ArrIndexType* flagArr, const ArrIndexType* flagScanArr, const unsigned arrSize)
{
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if ((index >= arrSize) || (flagArr[index] == 0)) return;
ArrIndexType newIndex = flagScanArr[index];
dstArr[newIndex] = srcArr[index];
}
__global__ void _updateBirthKern(unsigned* birthArr, const RecBitsType* recBitsArr, const unsigned arrSize, const unsigned iter)
{
using namespace details;
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(recBitsArr[index], REC_BIT_Z) && (birthArr[index] == 0))
{
birthArr[index] = iter;
}
}
__global__ void _unionKsetByBirth(RecBitsType* recBitsArr, const unsigned* birthArr, const unsigned arrSize, const unsigned iter, const unsigned p)
{
using namespace details;
ArrIndexType index = blockIdx.y * gridDim.x + blockIdx.x;
index = index * blockDim.x + threadIdx.x;
if (index >= arrSize) return;
if (tp::_readBit(recBitsArr[index], REC_BIT_Y) && birthArr[index] && (iter + 1U - birthArr[index] >= p))
{
tp::_setBit(recBitsArr[index], REC_BIT_K);
}
}
unsigned _flagVoxelsInXorK(ArrIndexType* d_flagArr, ArrIndexType* d_flagScanArr, const RecBitsType* d_recBitsArr,
const unsigned arrSize, const dim3& blocksDim, const dim3& threadsDim)
{
// Find out the active voxels in X or K after one iteration of thinning
_flagActiveKern<<<blocksDim, threadsDim>>>(d_flagArr, d_recBitsArr, arrSize);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Scan the flags array
thrust::exclusive_scan(thrust::device, d_flagArr, d_flagArr + arrSize, d_flagScanArr);
unsigned lastFlagArrElem, lastFlagScanArrElem;
checkCudaErrors(cudaMemcpy(&lastFlagArrElem, d_flagArr + arrSize - 1, sizeof(unsigned), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&lastFlagScanArrElem, d_flagScanArr + arrSize - 1, sizeof(unsigned), cudaMemcpyDeviceToHost));
// New array size will be flagArr[-1] + flagScanArr[-1], since flagScanArr is an exclusive scan.
unsigned newArrSize = lastFlagArrElem + lastFlagScanArrElem;
return newArrSize;
}
// unsigned _shrinkArrs(ThinningData& thinData, const dim3& blocksDim, const dim3& threadsDim)
unsigned _shrinkArrs(details::DevDataPack& thinData, const dim3& blocksDim, const dim3& threadsDim)
{
ArrIndexType* d_flagArr;
checkCudaErrors(cudaMalloc(&d_flagArr, sizeof(ArrIndexType) * thinData.arrSize));
checkCudaErrors(cudaMemset(d_flagArr, 0, sizeof(ArrIndexType) * thinData.arrSize));
ArrIndexType* d_flagScanArr;
checkCudaErrors(cudaMalloc(&d_flagScanArr, sizeof(ArrIndexType) * thinData.arrSize));
checkCudaErrors(cudaMemset(d_flagScanArr, 0, sizeof(ArrIndexType) * thinData.arrSize));
unsigned newArrSize = _flagVoxelsInXorK(d_flagArr, d_flagScanArr, thinData.recBitsArr, thinData.arrSize, blocksDim, threadsDim);
// Create two new arrays to stgore the active voxels information by
// performing a scatter operation on the original two arrays.
IjkType* d_dstIjkArr;
checkCudaErrors(cudaMalloc(&d_dstIjkArr, sizeof(IjkType) * newArrSize));
_compactArrsKern<<<blocksDim, threadsDim>>>(d_dstIjkArr, thinData.compactIjkArr, d_flagArr, d_flagScanArr, thinData.arrSize);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Free the unused device memory. Notice that the ORIGINAL voxel arrays
// are being freed!
checkCudaErrors(cudaFree(thinData.compactIjkArr));
// Store the address of the device memory
thinData.compactIjkArr = d_dstIjkArr;
RecBitsType* d_dstRecBitsArr;
checkCudaErrors(cudaMalloc(&d_dstRecBitsArr, sizeof(RecBitsType) * newArrSize));
_compactArrsKern<<<blocksDim, threadsDim>>>(d_dstRecBitsArr, thinData.recBitsArr, d_flagArr, d_flagScanArr, thinData.arrSize);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(thinData.recBitsArr));
thinData.recBitsArr = d_dstRecBitsArr;
if (thinData.birthArr)
{
unsigned* d_dstBirthArr;
checkCudaErrors(cudaMalloc(&d_dstBirthArr, sizeof(unsigned) * newArrSize));
_compactArrsKern<<<blocksDim, threadsDim>>>(d_dstBirthArr, thinData.birthArr, d_flagArr, d_flagScanArr, thinData.arrSize);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(thinData.birthArr));
thinData.birthArr = d_dstBirthArr;
}
if (thinData.useVoxelID())
{
ObjIdType* d_dstVoxelIdArr;
checkCudaErrors(cudaMalloc(&d_dstVoxelIdArr, sizeof(ObjIdType) * newArrSize));
_compactArrsKern<<<blocksDim, threadsDim>>>(d_dstVoxelIdArr, thinData.voxelIdArr, d_flagArr, d_flagScanArr, thinData.arrSize);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(thinData.voxelIdArr));
thinData.voxelIdArr = d_dstVoxelIdArr;
}
checkCudaErrors(cudaFree(d_flagArr));
checkCudaErrors(cudaFree(d_flagScanArr));
return newArrSize;
}
}; // namespace thin::clique::_private;
void initDevice()
{
cp::DevArrPtrs* ptrs = cp::DevArrPtrs::instance();
cp::_initDeviceTex(&(ptrs->d_matEntryArr), &(ptrs->d_ecOffsetArr), &(ptrs->d_coreCliqueArr), &(ptrs->d_nbOffsIndexArr));
}
void shutdownDevice()
{
cp::DevArrPtrs* ptrs = cp::DevArrPtrs::instance();
cp::_clearDeviceTex(ptrs->d_matEntryArr, ptrs->d_ecOffsetArr, ptrs->d_coreCliqueArr, ptrs->d_nbOffsIndexArr);
}
// void crucialIsthmus(ThinningData& thinData, const dim3& blocksDim, const dim3& threadsDim)
void crucialIsthmus(details::DevDataPack& thinData, const dim3& blocksDim, const dim3& threadsDim)
{
using namespace details;
auto TIMER = std::chrono::high_resolution_clock::now();
cp::_assignKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_K, REC_BIT_Y);
TIMER_END(">>>> crucialIsthmus::_assignKern()", TIMER);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
TIMER = std::chrono::high_resolution_clock::now();
cp::_clearKern<<<blocksDim, threadsDim>>>(thinData.recBitsArr, thinData.arrSize, REC_BIT_Z);
TIMER_END(">>>> crucialIsthmus::_clearKern()", TIMER);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::cudaMemset(clear A and B set)", TIMER);
// Find 3-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D3CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D3CliqueChecker(find 3-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::cudaMemset(clear A and B set)", TIMER);
// Find 2-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D2CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D2CliqueChecker(Find 2-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::cudaMemset(clear A and B set)", TIMER);
// Find 1-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D1CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D1CliqueChecker(Find 1-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::cudaMemset(clear A and B set)", TIMER);
// Find 0-cliques that are crucial for <X, K>
TIMER = std::chrono::high_resolution_clock::now();
dimCrucialIsthmus<D0CliqueChecker>(thinData, blocksDim, threadsDim);
TIMER_END(">>>> crucialIsthmus::D0CliqueChecker(Find 0-cliques that are crucial for <X, K>)", TIMER);
// clear A and B set
TIMER = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaMemset(thinData.A_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
checkCudaErrors(cudaMemset(thinData.B_recBitsArr, 0, sizeof(RecBitsType) * thinData.arrSize));
TIMER_END(">>>> crucialIsthmus::cudaMemset(clear A and B set)", TIMER);
}
}; // namespace thin::clique;
}; // namespace thin;
|
746fd0cf9e66eb7990b93c4e875bf6d2e5e89cec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal_v2.cu
* \brief Proposal Operator for SNIP
* \author Shaoqing Ren, Jian Guo, Pengfei Chen, Yuntao Chen, Yanghao Li
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "../tensor/sort_op.h"
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include <fstream>
#include <iterator>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal_v2-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace {
// scores are (b, anchor, h, w)
// proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
proposals[index * 5 + 0] = proposals[a * 5 + 0] + w * feature_stride;
proposals[index * 5 + 1] = proposals[a * 5 + 1] + h * feature_stride;
proposals[index * 5 + 2] = proposals[a * 5 + 2] + w * feature_stride;
proposals[index * 5 + 3] = proposals[a * 5 + 3] + h * feature_stride;
proposals[index * 5 + 4] = Dtype(1.0f) - scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets,
const bool filter_scale,
const float valid_min,
const float valid_max) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}else if (filter_scale && (iw * ih < valid_min || iw * ih > valid_max)) {
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out,
uint64_t *mask_dev,
uint64_t *mask_host) {
/*
@input boxes: (pre_nms_top_n, 5)
@return keep
@return num_out
@tmp mask_dev
@tmp mask_host
*/
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// TODO: need to be rewritten
FRCNN_CUDA_CHECK(hipMemcpy(mask_host,
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
uint64_t *p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 4 + j] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
//int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 4 + j] = 0.0f;
}
score[index] = 0;
}
}
}
} // namespace
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp_v2 : public Operator{
public:
explicit ProposalGPUOp_v2(ProposalParam_v2 param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 4);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
// CHECK_EQ(req[proposal_v2::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> scores = in_data[proposal_v2::kClsProb].get<xpu, 4, float>(s); // batch_idx, anchor_idx, height_idx, width_idx
Tensor<xpu, 4> bbox_deltas = in_data[proposal_v2::kBBoxPred].get<xpu, 4, float>(s); // batch_idx, height_idx, width_idx, anchor_idx
Tensor<xpu, 2> im_info = in_data[proposal_v2::kImInfo].get<xpu, 2, float>(s); // batch_idx, 3(height, width, scale)
Tensor<xpu, 2> valid_ranges = in_data[proposal_v2::kValidRanges].get<xpu, 2, float>(s); //batch_idx, 2(min_scale, max_scale)
Tensor<xpu, 3> out = out_data[proposal_v2::kOut].get<xpu, 3, float>(s); // batch_idx, rois_idx, 4(x1, y1, x2, y2), batch_idx is needed after flatten
Tensor<xpu, 3> out_score = out_data[proposal_v2::kScore].get<xpu, 3, float>(s); // batch_idx, rois_idx, 1(score)
uint64_t WORKSPACE_LIMIT = 1024 * 1024 * param_.workspace; // 256 MB should be sufficient
Tensor<xpu, 1, uint8_t> workspace = ctx.requested[proposal_v2::kTempSpace].get_space_typed<xpu, 1, uint8_t>(Shape1(WORKSPACE_LIMIT), s);
uint64_t allocated_bytes = 0ULL;
uint64_t allocated_bytes_outside_loop = 0ULL;
int nbatch = scores.size(0);
int num_anchors = scores.size(1) / param_.num_class;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
proposal_v2_utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
&anchors);
// Copy generated anchors to GPU
Tensor<xpu, 3> proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape3(nbatch, count, 5));
allocated_bytes += nbatch * count * 5 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(nbatch * 3);
FRCNN_CUDA_CHECK(hipMemcpy(cpu_im_info.data(),
im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
hipMemcpyDeviceToHost)); // less than 64K
// copy valid_ranges to cpu
std::vector<float> cpu_valid_ranges(nbatch * 2);
FRCNN_CUDA_CHECK(hipMemcpy(cpu_valid_ranges.data(),
valid_ranges.dptr_,
sizeof(float) * cpu_valid_ranges.size(),
hipMemcpyDeviceToHost)); // less than 64K
allocated_bytes_outside_loop = allocated_bytes;
/* copy anchors for all images in batch */
for (int i = 0; i < nbatch; i++) {
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[i*3 + 0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[i*3 + 1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
float* batch_proposals = proposals.dptr_ + i * 5 * count;
FRCNN_CUDA_CHECK(hipMemcpy(batch_proposals,
&anchors[0],
sizeof(float) * anchors.size(),
hipMemcpyHostToDevice)); // less than 64K
/* get current batch background score */
float *bg_scores_ptr = reinterpret_cast<float *>(scores.dptr_) + i * param_.num_class * count;
/* copy proposals to a mesh grid */
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride,
bg_scores_ptr, batch_proposals);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* transform anchors and bbox_deltas into bboxes */
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1],
batch_proposals, bbox_deltas.dptr_ + i * 4 * count, batch_proposals);
} else {
hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1],
batch_proposals, bbox_deltas.dptr_ + i * 4 * count, batch_proposals);
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* filter boxes with less than rpn_min_size */
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, param_.rpn_min_size * cpu_im_info[i * 3 + 2], batch_proposals,
param_.filter_scales, cpu_valid_ranges[i * 2 + 0] * cpu_valid_ranges[i * 2 + 0],
cpu_valid_ranges[i * 2 + 1] * cpu_valid_ranges[i * 2 + 1]);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* copy score to a continuous memory */
Tensor<xpu, 1> score(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
Tensor<xpu, 1, int> order(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(int);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(( CopyScoreKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, batch_proposals, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* argsort score, save order */
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<float>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* Reorder proposals according to order */
Tensor<xpu, 2> ordered_proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape2(rpn_pre_nms_top_n, 5));
allocated_bytes += rpn_pre_nms_top_n * 5 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(( ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_pre_nms_top_n, batch_proposals, order.dptr_, ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* perform nms */
std::vector<int> _keep(rpn_pre_nms_top_n);
int out_size = 0;
const int boxes_num = rpn_pre_nms_top_n;
const int col_blocks = DIVUP(boxes_num, sizeof(uint64_t) * 8);
// take special care when allocate memory of 8-byte alignment.
allocated_bytes += allocated_bytes % sizeof(uint64_t);
Tensor<xpu, 1, uint64_t> mask_tensor(reinterpret_cast<uint64_t *>(workspace.dptr_ + allocated_bytes), Shape1(boxes_num * col_blocks));
allocated_bytes += boxes_num * col_blocks * sizeof(uint64_t);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
// the following line does not need change since it the only place where requires host workspace
Tensor<cpu, 1, uint64_t> mask_host_tensor = ctx.requested[proposal_v2::kTempSpace].get_host_space_typed<1, uint64_t>(Shape1(boxes_num * col_blocks));
uint64_t *mask_dev = mask_tensor.dptr_;
uint64_t *mask_host = mask_host_tensor.dptr_;
_nms(ordered_proposals,
param_.threshold,
&_keep[0],
&out_size,
mask_dev,
mask_host);
/* copy nms result to gpu */
Tensor<xpu, 1, int> keep(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(_keep.size()));
allocated_bytes += _keep.size() * sizeof(int);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
FRCNN_CUDA_CHECK(hipMemcpy(keep.dptr_,
&_keep[0],
sizeof(int) * _keep.size(),
hipMemcpyHostToDevice)); // less than 64K
/* copy results after nms */
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(( PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_post_nms_top_n, ordered_proposals.dptr_, keep.dptr_, out_size,
out.dptr_ + i * 4 * rpn_post_nms_top_n,
out_score.dptr_ + i * rpn_post_nms_top_n);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// recycle all bytes allocated within loop
allocated_bytes = allocated_bytes_outside_loop;
}
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 4);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal_v2::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal_v2::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal_v2::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> granges = in_grad[proposal_v2::kValidRanges].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal_v2::kClsProb], 0);
Assign(gbbox, req[proposal_v2::kBBoxPred], 0);
Assign(ginfo, req[proposal_v2::kImInfo], 0);
Assign(granges, req[proposal_v2::kValidRanges], 0);
}
private:
ProposalParam_v2 param_;
}; // class ProposalGPUOp_v2
template<>
Operator* CreateOp<gpu>(ProposalParam_v2 param) {
return new ProposalGPUOp_v2<gpu>(param);
}
} // namespace op
} // namespace mxnet
| 746fd0cf9e66eb7990b93c4e875bf6d2e5e89cec.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal_v2.cu
* \brief Proposal Operator for SNIP
* \author Shaoqing Ren, Jian Guo, Pengfei Chen, Yuntao Chen, Yanghao Li
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "../tensor/sort_op.h"
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include <fstream>
#include <iterator>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal_v2-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace {
// scores are (b, anchor, h, w)
// proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
proposals[index * 5 + 0] = proposals[a * 5 + 0] + w * feature_stride;
proposals[index * 5 + 1] = proposals[a * 5 + 1] + h * feature_stride;
proposals[index * 5 + 2] = proposals[a * 5 + 2] + w * feature_stride;
proposals[index * 5 + 3] = proposals[a * 5 + 3] + h * feature_stride;
proposals[index * 5 + 4] = Dtype(1.0f) - scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets,
const bool filter_scale,
const float valid_min,
const float valid_max) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}else if (filter_scale && (iw * ih < valid_min || iw * ih > valid_max)) {
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out,
uint64_t *mask_dev,
uint64_t *mask_host) {
/*
@input boxes: (pre_nms_top_n, 5)
@return keep
@return num_out
@tmp mask_dev
@tmp mask_host
*/
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// TODO: need to be rewritten
FRCNN_CUDA_CHECK(cudaMemcpy(mask_host,
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
uint64_t *p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 4 + j] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
//int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 4 + j] = 0.0f;
}
score[index] = 0;
}
}
}
} // namespace
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp_v2 : public Operator{
public:
explicit ProposalGPUOp_v2(ProposalParam_v2 param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 4);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
// CHECK_EQ(req[proposal_v2::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> scores = in_data[proposal_v2::kClsProb].get<xpu, 4, float>(s); // batch_idx, anchor_idx, height_idx, width_idx
Tensor<xpu, 4> bbox_deltas = in_data[proposal_v2::kBBoxPred].get<xpu, 4, float>(s); // batch_idx, height_idx, width_idx, anchor_idx
Tensor<xpu, 2> im_info = in_data[proposal_v2::kImInfo].get<xpu, 2, float>(s); // batch_idx, 3(height, width, scale)
Tensor<xpu, 2> valid_ranges = in_data[proposal_v2::kValidRanges].get<xpu, 2, float>(s); //batch_idx, 2(min_scale, max_scale)
Tensor<xpu, 3> out = out_data[proposal_v2::kOut].get<xpu, 3, float>(s); // batch_idx, rois_idx, 4(x1, y1, x2, y2), batch_idx is needed after flatten
Tensor<xpu, 3> out_score = out_data[proposal_v2::kScore].get<xpu, 3, float>(s); // batch_idx, rois_idx, 1(score)
uint64_t WORKSPACE_LIMIT = 1024 * 1024 * param_.workspace; // 256 MB should be sufficient
Tensor<xpu, 1, uint8_t> workspace = ctx.requested[proposal_v2::kTempSpace].get_space_typed<xpu, 1, uint8_t>(Shape1(WORKSPACE_LIMIT), s);
uint64_t allocated_bytes = 0ULL;
uint64_t allocated_bytes_outside_loop = 0ULL;
int nbatch = scores.size(0);
int num_anchors = scores.size(1) / param_.num_class;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
proposal_v2_utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
&anchors);
// Copy generated anchors to GPU
Tensor<xpu, 3> proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape3(nbatch, count, 5));
allocated_bytes += nbatch * count * 5 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(nbatch * 3);
FRCNN_CUDA_CHECK(cudaMemcpy(cpu_im_info.data(),
im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
cudaMemcpyDeviceToHost)); // less than 64K
// copy valid_ranges to cpu
std::vector<float> cpu_valid_ranges(nbatch * 2);
FRCNN_CUDA_CHECK(cudaMemcpy(cpu_valid_ranges.data(),
valid_ranges.dptr_,
sizeof(float) * cpu_valid_ranges.size(),
cudaMemcpyDeviceToHost)); // less than 64K
allocated_bytes_outside_loop = allocated_bytes;
/* copy anchors for all images in batch */
for (int i = 0; i < nbatch; i++) {
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[i*3 + 0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[i*3 + 1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
float* batch_proposals = proposals.dptr_ + i * 5 * count;
FRCNN_CUDA_CHECK(cudaMemcpy(batch_proposals,
&anchors[0],
sizeof(float) * anchors.size(),
cudaMemcpyHostToDevice)); // less than 64K
/* get current batch background score */
float *bg_scores_ptr = reinterpret_cast<float *>(scores.dptr_) + i * param_.num_class * count;
/* copy proposals to a mesh grid */
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
ProposalGridKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, param_.feature_stride,
bg_scores_ptr, batch_proposals);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* transform anchors and bbox_deltas into bboxes */
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
IoUPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1],
batch_proposals, bbox_deltas.dptr_ + i * 4 * count, batch_proposals);
} else {
BBoxPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1],
batch_proposals, bbox_deltas.dptr_ + i * 4 * count, batch_proposals);
}
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* filter boxes with less than rpn_min_size */
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
FilterBoxKernel<<<dimGrid, dimBlock>>>(
count, param_.rpn_min_size * cpu_im_info[i * 3 + 2], batch_proposals,
param_.filter_scales, cpu_valid_ranges[i * 2 + 0] * cpu_valid_ranges[i * 2 + 0],
cpu_valid_ranges[i * 2 + 1] * cpu_valid_ranges[i * 2 + 1]);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* copy score to a continuous memory */
Tensor<xpu, 1> score(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
Tensor<xpu, 1, int> order(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(int);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel<<<dimGrid, dimBlock>>>(
count, batch_proposals, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* argsort score, save order */
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<float>());
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* Reorder proposals according to order */
Tensor<xpu, 2> ordered_proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape2(rpn_pre_nms_top_n, 5));
allocated_bytes += rpn_pre_nms_top_n * 5 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel<<<dimGrid, dimBlock>>>(
rpn_pre_nms_top_n, batch_proposals, order.dptr_, ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* perform nms */
std::vector<int> _keep(rpn_pre_nms_top_n);
int out_size = 0;
const int boxes_num = rpn_pre_nms_top_n;
const int col_blocks = DIVUP(boxes_num, sizeof(uint64_t) * 8);
// take special care when allocate memory of 8-byte alignment.
allocated_bytes += allocated_bytes % sizeof(uint64_t);
Tensor<xpu, 1, uint64_t> mask_tensor(reinterpret_cast<uint64_t *>(workspace.dptr_ + allocated_bytes), Shape1(boxes_num * col_blocks));
allocated_bytes += boxes_num * col_blocks * sizeof(uint64_t);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
// the following line does not need change since it the only place where requires host workspace
Tensor<cpu, 1, uint64_t> mask_host_tensor = ctx.requested[proposal_v2::kTempSpace].get_host_space_typed<1, uint64_t>(Shape1(boxes_num * col_blocks));
uint64_t *mask_dev = mask_tensor.dptr_;
uint64_t *mask_host = mask_host_tensor.dptr_;
_nms(ordered_proposals,
param_.threshold,
&_keep[0],
&out_size,
mask_dev,
mask_host);
/* copy nms result to gpu */
Tensor<xpu, 1, int> keep(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(_keep.size()));
allocated_bytes += _keep.size() * sizeof(int);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
FRCNN_CUDA_CHECK(cudaMemcpy(keep.dptr_,
&_keep[0],
sizeof(int) * _keep.size(),
cudaMemcpyHostToDevice)); // less than 64K
/* copy results after nms */
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput<<<dimGrid, dimBlock>>>(
rpn_post_nms_top_n, ordered_proposals.dptr_, keep.dptr_, out_size,
out.dptr_ + i * 4 * rpn_post_nms_top_n,
out_score.dptr_ + i * rpn_post_nms_top_n);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// recycle all bytes allocated within loop
allocated_bytes = allocated_bytes_outside_loop;
}
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 4);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal_v2::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal_v2::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal_v2::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> granges = in_grad[proposal_v2::kValidRanges].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal_v2::kClsProb], 0);
Assign(gbbox, req[proposal_v2::kBBoxPred], 0);
Assign(ginfo, req[proposal_v2::kImInfo], 0);
Assign(granges, req[proposal_v2::kValidRanges], 0);
}
private:
ProposalParam_v2 param_;
}; // class ProposalGPUOp_v2
template<>
Operator* CreateOp<gpu>(ProposalParam_v2 param) {
return new ProposalGPUOp_v2<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
9a127a47df129579c5908b955ed4efafb16c546d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits>
#include "saber/funcs/impl/cuda/saber_softmax.h"
namespace anakin{
namespace saber{
//! general kernel for softmax
template <typename dtype>
__global__ void softmax_max_kernel(int total_size, const dtype* in_data, dtype* out_data, \
dtype min_data, int inner_num, int outer_num, int axis_size){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int real_index = idx_outer * inner_num + idx_inner;
//! get maximum data across softmax axis
dtype max_data = min_data;
for (int i = 0; i < axis_size; ++i) {
max_data = in_data[real_index] > max_data? in_data[real_index] : max_data;
real_index += inner_num;
}
out_data[idx] = max_data;
}
}
template <typename dtype>
__global__ void softmax_max_roi_kernel(int total_size, const dtype* in_data, \
dtype* out_data, dtype min_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
//! compute real data index
int input_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
input_real_index += x * input_stride_real[i];
idx = idx / shape_valid[i];
}
}
//! get maximum data across softmax axis
dtype max_data = min_data;
for (int i = 0; i < axis_size; ++i) {
max_data = in_data[input_real_index] > max_data? \
in_data[input_real_index] : max_data;
input_real_index += i * input_stride_real[softmax_axis];
}
out_data[idx] = max_data;
}
}
template <typename dtype>
__global__ void softmax_sub_exp_sum_kernel(int total_size, const dtype* in_data, \
dtype* out_data, const dtype* max_data, dtype* sum_data, \
int inner_num, int outer_num, int axis_size){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype max_data_cur = max_data[idx];
//dtype *sum_data_cur = &sum_data[idx];
dtype sum_data_cur = 0;
int real_index = idx_outer * inner_num + idx_inner;
//! compute exp and summarize across the softmax axis
for (int i = 0; i < axis_size; ++i) {
dtype sub_data = in_data[real_index] - max_data_cur;
sub_data = expf(sub_data);
sum_data_cur += sub_data;
out_data[real_index] = sub_data;
real_index += inner_num;
}
sum_data[idx] = sum_data_cur;
}
}
template <typename dtype>
__global__ void softmax_sub_exp_sum_roi_kernel(int total_size, \
const dtype* in_data, dtype* out_data, \
const dtype* max_data, dtype* sum_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
//! compute real data index
int output_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
output_real_index += x * output_stride_real[i];
idx = idx / shape_valid[i];
}
}
dtype max_data_cur = max_data[idx];
//dtype *sum_data_cur = &sum_data[idx];
dtype sum_data_cur = 0;
//! compute exp and summarize across the softmax axis
for (int i = 0; i < axis_size; ++i) {
dtype sub_data = in_data[output_real_index] - max_data_cur;
sub_data = expf(sub_data);
sum_data_cur += sub_data;
out_data[output_real_index] = sub_data;
output_real_index += output_stride_real[softmax_axis];
}
sum_data[idx] = sum_data_cur;
}
}
template <typename dtype>
__global__ void softmax_divid_output_kernel(int total_size, dtype* io_data, \
const dtype* sum_data, int inner_num, int outer_num, int axis_size){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype sum_data_cur = sum_data[idx];
int real_index = idx_outer * inner_num + idx_inner;
//! compute final result
for (int i = 0; i < axis_size; ++i) {
io_data[real_index] = io_data[real_index] / sum_data_cur;
real_index += inner_num;
}
}
}
template <typename dtype>
__global__ void softmax_divid_output_roi_kernel(int total_size, \
dtype* io_data, const dtype* sum_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
//! compute real data index
int output_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
output_real_index += x * output_stride_real[i];
idx = idx / shape_valid[i];
}
}
dtype sum_data_cur = sum_data[idx];
//! compute final result
for (int i = 0; i < axis_size; ++i) {
io_data[output_real_index] = io_data[output_real_index] / sum_data_cur;
output_real_index += output_stride_real[softmax_axis];
}
}
}
extern __shared__ char tile[];
template <typename dtype>
__global__ void sharemem_softmax_kernel(int total_size, \
const dtype* in_data, dtype* out_data, \
int inner_num, int outer_num, int axis_size){
//__shared__ dtype data[MAX_AXIS_SIZE][CUDA_NUM_THREADS];
dtype* data = (dtype*)tile + threadIdx.x;
//! compute thread index and real data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int blocksize = blockDim.x;
int real_index = idx_outer * inner_num + idx_inner;
int loop_idx = real_index;
//! read all data to sharemem in softmax channel
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
data[i * blocksize] = in_data[loop_idx];
loop_idx += inner_num;
}
//! get maximum value in softmax channel
dtype max_data = data[0];
#pragma unroll
for (int i = 1; i < axis_size; ++i) {
dtype dt = data[i * blocksize];
if (max_data < dt){
max_data = dt;
}
}
//! subtract then summarize
dtype sum = 0;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
//dtype *dt = &data[i][thread_idx];
dtype *dt = data + i * blocksize;
*dt = expf(*dt - max_data);
sum += *dt;
}
//! write back result
loop_idx = real_index;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
out_data[loop_idx] = data[i * blocksize] / sum;
loop_idx += inner_num;
}
}
}
template <typename dtype>
__global__ void sharemem_softmax_roi_kernel(int total_size, \
const dtype* in_data, dtype* out_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
//__shared__ dtype data[MAX_AXIS_SIZE][CUDA_NUM_THREADS];
dtype* data = (dtype*)tile + threadIdx.x;
//! compute thread index and real data index
int idx1 = blockIdx.x * blockDim.x + threadIdx.x;
int idx = idx1;
if (idx < total_size) {
int blocksize = blockDim.x;
//! compute real data index
int input_real_index = 0;
int output_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
input_real_index += x * input_stride_real[i];
output_real_index += x * output_stride_real[i];
idx = idx / shape_valid[i];
}
}
//! read all data to sharemem in softmax channel
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
data[i * blocksize] = in_data[input_real_index];
input_real_index += input_stride_real[softmax_axis];
}
//! get maximum value in softmax channel
dtype max_data = data[0];
#pragma unroll
for (int i = 1; i < axis_size; ++i) {
dtype dt = data[i * blocksize];
if (max_data < dt){
max_data = dt;
}
}
//! subtract then summarize
dtype sum = 0;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
//dtype *dt = &data[i][thread_idx];
dtype *dt = data + i * blocksize;
*dt = expf(*dt - max_data);
sum += *dt;
}
//! write back result
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
out_data[output_real_index] = data[i * blocksize] / sum;
output_real_index += output_stride_real[softmax_axis];
}
}
}
template <>
SaberStatus SaberSoftmax<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
//! compute size
Shape shape_in = inputs[0]->valid_shape();
Shape shape_out = outputs[0]->valid_shape();
CHECK_EQ(shape_in == shape_out, true) << "valid shapes must be the same";
_outer_num = inputs[0]->count_valid(0, param.axis);
_inner_num = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
_axis_size = shape_in[param.axis];
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, API::get_device_id());
size_t sharedmem_size = deviceProp.sharedMemPerBlock;
_max_dimsize = sharedmem_size / sizeof(float) / CUDA_NUM_THREADS;
Shape sh_tmp({1, 1, 1, _outer_num * _inner_num});
if (_axis_size > _max_dimsize){
//! re_alloc device memory
_max_data.reshape(sh_tmp);
_sum_data.reshape(sh_tmp);
}
//! CHECK whether the input or output tensor is with continuous buffer or not
_is_continue_buf = outputs[0]->is_continue_mem() && inputs[0]->is_continue_mem();
_dims = shape_in.size();
if (!_is_continue_buf) {
Shape sh_input_real_stride = inputs[0]->get_stride();
Shape sh_output_real_stride = outputs[0]->get_stride();
//! re_alloc device memory
Shape sh({1, 1, 1, _dims});
_valid_shape.reshape(sh);
_input_stride.reshape(sh);
_output_stride.reshape(sh);
CUDA_CHECK(hipMemcpy(_valid_shape.mutable_data(), inputs[0]->valid_shape().data(), \
sizeof(int) * _dims, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(_input_stride.mutable_data(), sh_input_real_stride.data(), \
sizeof(int) * _dims, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(_output_stride.mutable_data(), sh_output_real_stride.data(), \
sizeof(int) * _dims, hipMemcpyHostToDevice));
}
return SaberSuccess;
}
template <>
SaberStatus SaberSoftmax<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
//! get context
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberSoftmax<NV, AK_FLOAT>::dispatch(\
const std::vector<Tensor<NV> *>& inputs, \
std::vector<Tensor<NV> *>& outputs, \
SoftmaxParam<NV>& param) {
hipStream_t stream = this->_ctx->get_compute_stream();
//! inputs only has one tensor
int total_threads = this->_inner_num * this->_outer_num;
const float* data_in = (const float* )inputs[0]->data();
float* data_out = (float*)outputs[0]->mutable_data();
float* max_data = (float*)this->_max_data.mutable_data();
float* sum_data = (float*)this->_sum_data.mutable_data();
const int* valid_shape = (const int*)_valid_shape.data();
const int* input_stride = (const int*)_input_stride.data();
const int* output_stride = (const int*)_output_stride.data();
if (_is_continue_buf) {
//! softmax kernel without roi
if (this->_axis_size <= _max_dimsize){
int sharemem_size = this->_axis_size * CUDA_NUM_THREADS * sizeof(float);
hipLaunchKernelGGL(( sharemem_softmax_kernel<float>)\
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), sharemem_size, stream,
total_threads, data_in, data_out,
this->_inner_num, this->_outer_num, this->_axis_size);
} else {
//! firstly, get maximum data
float min_data = std::numeric_limits<float>::min();
hipLaunchKernelGGL(( softmax_max_kernel<float>)\
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, stream,
total_threads, data_in, max_data, min_data, \
this->_inner_num, this->_outer_num, this->_axis_size);
//! then, compute exp and sum data
hipLaunchKernelGGL(( softmax_sub_exp_sum_kernel<float>)
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, stream,
total_threads, data_in, data_out, max_data, sum_data, \
this->_inner_num, this->_outer_num, this->_axis_size);
//! lastly, compute divided output
hipLaunchKernelGGL(( softmax_divid_output_kernel<float>)\
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, stream,
total_threads, data_out, sum_data, \
this->_inner_num, this->_outer_num, this->_axis_size);
}
} else {
//! softmax kernel with roi
if (this->_axis_size <= _max_dimsize){
int sharemem_size = this->_axis_size * CUDA_NUM_THREADS * sizeof(float);
hipLaunchKernelGGL(( sharemem_softmax_roi_kernel<float>)\
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), sharemem_size, stream,
total_threads, data_in, data_out,
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
} else {
//! firstly, get maximum data
float min_data = std::numeric_limits<float>::min();
hipLaunchKernelGGL(( softmax_max_roi_kernel<float>)\
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, stream,
total_threads, data_in, max_data, min_data, \
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
//! then, compute exp and sum data
hipLaunchKernelGGL(( softmax_sub_exp_sum_roi_kernel<float>)
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, stream,
total_threads, data_in, data_out, max_data, sum_data, \
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
//! lastly, compute divided output
hipLaunchKernelGGL(( softmax_divid_output_roi_kernel<float>)\
, dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, stream,
total_threads, data_out, sum_data, \
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
}
}
return SaberSuccess;
}
// ============================================= int8
template <>
SaberStatus SaberSoftmax<NV, AK_INT8>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
return SaberSuccess;
}
template <>
SaberStatus SaberSoftmax<NV, AK_INT8>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberSoftmax<NV, AK_INT8>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param) {
return SaberSuccess;
}
template class SaberSoftmax<NV, AK_FLOAT>;
template class SaberSoftmax<NV, AK_INT8>;
DEFINE_OP_TEMPLATE(SaberSoftmax, SoftmaxParam, NV, AK_HALF);
} //namespace anakin
} //namespace anakin
| 9a127a47df129579c5908b955ed4efafb16c546d.cu | #include <limits>
#include "saber/funcs/impl/cuda/saber_softmax.h"
namespace anakin{
namespace saber{
//! general kernel for softmax
template <typename dtype>
__global__ void softmax_max_kernel(int total_size, const dtype* in_data, dtype* out_data, \
dtype min_data, int inner_num, int outer_num, int axis_size){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int real_index = idx_outer * inner_num + idx_inner;
//! get maximum data across softmax axis
dtype max_data = min_data;
for (int i = 0; i < axis_size; ++i) {
max_data = in_data[real_index] > max_data? in_data[real_index] : max_data;
real_index += inner_num;
}
out_data[idx] = max_data;
}
}
template <typename dtype>
__global__ void softmax_max_roi_kernel(int total_size, const dtype* in_data, \
dtype* out_data, dtype min_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
//! compute real data index
int input_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
input_real_index += x * input_stride_real[i];
idx = idx / shape_valid[i];
}
}
//! get maximum data across softmax axis
dtype max_data = min_data;
for (int i = 0; i < axis_size; ++i) {
max_data = in_data[input_real_index] > max_data? \
in_data[input_real_index] : max_data;
input_real_index += i * input_stride_real[softmax_axis];
}
out_data[idx] = max_data;
}
}
template <typename dtype>
__global__ void softmax_sub_exp_sum_kernel(int total_size, const dtype* in_data, \
dtype* out_data, const dtype* max_data, dtype* sum_data, \
int inner_num, int outer_num, int axis_size){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype max_data_cur = max_data[idx];
//dtype *sum_data_cur = &sum_data[idx];
dtype sum_data_cur = 0;
int real_index = idx_outer * inner_num + idx_inner;
//! compute exp and summarize across the softmax axis
for (int i = 0; i < axis_size; ++i) {
dtype sub_data = in_data[real_index] - max_data_cur;
sub_data = expf(sub_data);
sum_data_cur += sub_data;
out_data[real_index] = sub_data;
real_index += inner_num;
}
sum_data[idx] = sum_data_cur;
}
}
template <typename dtype>
__global__ void softmax_sub_exp_sum_roi_kernel(int total_size, \
const dtype* in_data, dtype* out_data, \
const dtype* max_data, dtype* sum_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
//! compute real data index
int output_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
output_real_index += x * output_stride_real[i];
idx = idx / shape_valid[i];
}
}
dtype max_data_cur = max_data[idx];
//dtype *sum_data_cur = &sum_data[idx];
dtype sum_data_cur = 0;
//! compute exp and summarize across the softmax axis
for (int i = 0; i < axis_size; ++i) {
dtype sub_data = in_data[output_real_index] - max_data_cur;
sub_data = expf(sub_data);
sum_data_cur += sub_data;
out_data[output_real_index] = sub_data;
output_real_index += output_stride_real[softmax_axis];
}
sum_data[idx] = sum_data_cur;
}
}
template <typename dtype>
__global__ void softmax_divid_output_kernel(int total_size, dtype* io_data, \
const dtype* sum_data, int inner_num, int outer_num, int axis_size){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
dtype sum_data_cur = sum_data[idx];
int real_index = idx_outer * inner_num + idx_inner;
//! compute final result
for (int i = 0; i < axis_size; ++i) {
io_data[real_index] = io_data[real_index] / sum_data_cur;
real_index += inner_num;
}
}
}
template <typename dtype>
__global__ void softmax_divid_output_roi_kernel(int total_size, \
dtype* io_data, const dtype* sum_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
//! compute data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
//! compute real data index
int output_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
output_real_index += x * output_stride_real[i];
idx = idx / shape_valid[i];
}
}
dtype sum_data_cur = sum_data[idx];
//! compute final result
for (int i = 0; i < axis_size; ++i) {
io_data[output_real_index] = io_data[output_real_index] / sum_data_cur;
output_real_index += output_stride_real[softmax_axis];
}
}
}
extern __shared__ char tile[];
template <typename dtype>
__global__ void sharemem_softmax_kernel(int total_size, \
const dtype* in_data, dtype* out_data, \
int inner_num, int outer_num, int axis_size){
//__shared__ dtype data[MAX_AXIS_SIZE][CUDA_NUM_THREADS];
dtype* data = (dtype*)tile + threadIdx.x;
//! compute thread index and real data index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total_size) {
int idx_inner = idx % inner_num;
int idx_outer = (idx / inner_num) * axis_size;
int blocksize = blockDim.x;
int real_index = idx_outer * inner_num + idx_inner;
int loop_idx = real_index;
//! read all data to sharemem in softmax channel
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
data[i * blocksize] = in_data[loop_idx];
loop_idx += inner_num;
}
//! get maximum value in softmax channel
dtype max_data = data[0];
#pragma unroll
for (int i = 1; i < axis_size; ++i) {
dtype dt = data[i * blocksize];
if (max_data < dt){
max_data = dt;
}
}
//! subtract then summarize
dtype sum = 0;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
//dtype *dt = &data[i][thread_idx];
dtype *dt = data + i * blocksize;
*dt = expf(*dt - max_data);
sum += *dt;
}
//! write back result
loop_idx = real_index;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
out_data[loop_idx] = data[i * blocksize] / sum;
loop_idx += inner_num;
}
}
}
template <typename dtype>
__global__ void sharemem_softmax_roi_kernel(int total_size, \
const dtype* in_data, dtype* out_data, \
const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \
int softmax_axis, int axis_size, int dims){
//__shared__ dtype data[MAX_AXIS_SIZE][CUDA_NUM_THREADS];
dtype* data = (dtype*)tile + threadIdx.x;
//! compute thread index and real data index
int idx1 = blockIdx.x * blockDim.x + threadIdx.x;
int idx = idx1;
if (idx < total_size) {
int blocksize = blockDim.x;
//! compute real data index
int input_real_index = 0;
int output_real_index = 0;
for (int i = dims - 1; i >= 0; i--) {
if (i == softmax_axis) {
continue;
} else {
int x = idx % shape_valid[i];
input_real_index += x * input_stride_real[i];
output_real_index += x * output_stride_real[i];
idx = idx / shape_valid[i];
}
}
//! read all data to sharemem in softmax channel
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
data[i * blocksize] = in_data[input_real_index];
input_real_index += input_stride_real[softmax_axis];
}
//! get maximum value in softmax channel
dtype max_data = data[0];
#pragma unroll
for (int i = 1; i < axis_size; ++i) {
dtype dt = data[i * blocksize];
if (max_data < dt){
max_data = dt;
}
}
//! subtract then summarize
dtype sum = 0;
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
//dtype *dt = &data[i][thread_idx];
dtype *dt = data + i * blocksize;
*dt = expf(*dt - max_data);
sum += *dt;
}
//! write back result
#pragma unroll
for (int i = 0; i < axis_size; ++i) {
out_data[output_real_index] = data[i * blocksize] / sum;
output_real_index += output_stride_real[softmax_axis];
}
}
}
template <>
SaberStatus SaberSoftmax<NV, AK_FLOAT>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
//! compute size
Shape shape_in = inputs[0]->valid_shape();
Shape shape_out = outputs[0]->valid_shape();
CHECK_EQ(shape_in == shape_out, true) << "valid shapes must be the same";
_outer_num = inputs[0]->count_valid(0, param.axis);
_inner_num = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
_axis_size = shape_in[param.axis];
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, API::get_device_id());
size_t sharedmem_size = deviceProp.sharedMemPerBlock;
_max_dimsize = sharedmem_size / sizeof(float) / CUDA_NUM_THREADS;
Shape sh_tmp({1, 1, 1, _outer_num * _inner_num});
if (_axis_size > _max_dimsize){
//! re_alloc device memory
_max_data.reshape(sh_tmp);
_sum_data.reshape(sh_tmp);
}
//! CHECK whether the input or output tensor is with continuous buffer or not
_is_continue_buf = outputs[0]->is_continue_mem() && inputs[0]->is_continue_mem();
_dims = shape_in.size();
if (!_is_continue_buf) {
Shape sh_input_real_stride = inputs[0]->get_stride();
Shape sh_output_real_stride = outputs[0]->get_stride();
//! re_alloc device memory
Shape sh({1, 1, 1, _dims});
_valid_shape.reshape(sh);
_input_stride.reshape(sh);
_output_stride.reshape(sh);
CUDA_CHECK(cudaMemcpy(_valid_shape.mutable_data(), inputs[0]->valid_shape().data(), \
sizeof(int) * _dims, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(_input_stride.mutable_data(), sh_input_real_stride.data(), \
sizeof(int) * _dims, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(_output_stride.mutable_data(), sh_output_real_stride.data(), \
sizeof(int) * _dims, cudaMemcpyHostToDevice));
}
return SaberSuccess;
}
template <>
SaberStatus SaberSoftmax<NV, AK_FLOAT>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
//! get context
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberSoftmax<NV, AK_FLOAT>::dispatch(\
const std::vector<Tensor<NV> *>& inputs, \
std::vector<Tensor<NV> *>& outputs, \
SoftmaxParam<NV>& param) {
cudaStream_t stream = this->_ctx->get_compute_stream();
//! inputs only has one tensor
int total_threads = this->_inner_num * this->_outer_num;
const float* data_in = (const float* )inputs[0]->data();
float* data_out = (float*)outputs[0]->mutable_data();
float* max_data = (float*)this->_max_data.mutable_data();
float* sum_data = (float*)this->_sum_data.mutable_data();
const int* valid_shape = (const int*)_valid_shape.data();
const int* input_stride = (const int*)_input_stride.data();
const int* output_stride = (const int*)_output_stride.data();
if (_is_continue_buf) {
//! softmax kernel without roi
if (this->_axis_size <= _max_dimsize){
int sharemem_size = this->_axis_size * CUDA_NUM_THREADS * sizeof(float);
sharemem_softmax_kernel<float>\
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, sharemem_size, stream>>>(
total_threads, data_in, data_out,
this->_inner_num, this->_outer_num, this->_axis_size);
} else {
//! firstly, get maximum data
float min_data = std::numeric_limits<float>::min();
softmax_max_kernel<float>\
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>(
total_threads, data_in, max_data, min_data, \
this->_inner_num, this->_outer_num, this->_axis_size);
//! then, compute exp and sum data
softmax_sub_exp_sum_kernel<float>
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>(
total_threads, data_in, data_out, max_data, sum_data, \
this->_inner_num, this->_outer_num, this->_axis_size);
//! lastly, compute divided output
softmax_divid_output_kernel<float>\
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>(
total_threads, data_out, sum_data, \
this->_inner_num, this->_outer_num, this->_axis_size);
}
} else {
//! softmax kernel with roi
if (this->_axis_size <= _max_dimsize){
int sharemem_size = this->_axis_size * CUDA_NUM_THREADS * sizeof(float);
sharemem_softmax_roi_kernel<float>\
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, sharemem_size, stream>>>(
total_threads, data_in, data_out,
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
} else {
//! firstly, get maximum data
float min_data = std::numeric_limits<float>::min();
softmax_max_roi_kernel<float>\
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>(
total_threads, data_in, max_data, min_data, \
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
//! then, compute exp and sum data
softmax_sub_exp_sum_roi_kernel<float>
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>(
total_threads, data_in, data_out, max_data, sum_data, \
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
//! lastly, compute divided output
softmax_divid_output_roi_kernel<float>\
<<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>(
total_threads, data_out, sum_data, \
input_stride, output_stride, valid_shape, \
param.axis, _axis_size, _dims);
}
}
return SaberSuccess;
}
// ============================================= int8
template <>
SaberStatus SaberSoftmax<NV, AK_INT8>::create(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
return SaberSuccess;
}
template <>
SaberStatus SaberSoftmax<NV, AK_INT8>::init(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param, Context<NV>& ctx) {
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberSoftmax<NV, AK_INT8>::dispatch(
const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
SoftmaxParam<NV>& param) {
return SaberSuccess;
}
template class SaberSoftmax<NV, AK_FLOAT>;
template class SaberSoftmax<NV, AK_INT8>;
DEFINE_OP_TEMPLATE(SaberSoftmax, SoftmaxParam, NV, AK_HALF);
} //namespace anakin
} //namespace anakin
|
963fe67f0487de56e43f6bc99946ecebb2fcc0fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "CropOp.h"
namespace paddle {
__global__ void KeCrop(real* outputs, const real* inputs,
int inC, int inH, int inW,
int cropC, int cropH, int cropW,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % outW;
const int h = (idx / outW) % outH;
const int c = (idx / outW / outH) % outC;
const int n = idx / outW / outH / outC;
const int off = ((n * inC + c + cropC) * inH + h + cropH) * inW + cropW + w;
outputs[idx] = inputs[off];
}
}
template <>
void Crop<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const TensorShape inShape,
const TensorShape outShape,
const FuncConfig& conf) {
std::vector<uint32_t> crop_corner = conf.get<std::vector<uint32_t>>("crop_corner");
int cropC = crop_corner[1];
int cropH = crop_corner[2];
int cropW = crop_corner[3];
int num = inShape[0];
int inC = inShape[1];
int inH = inShape[2];
int inW = inShape[3];
int outC = outShape[1];
int outH = outShape[2];
int outW = outShape[3];
size_t nth = num * outC * outH * outW;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( KeCrop), dim3(gridSize), dim3(blockSize), 0, STREAM_DEFAULT,
outputs, inputs, inC, inH, inW, cropC, cropH, cropW,
outC, outH, outW, nth);
CHECK_SYNC("Crop");
}
__global__ void KeCropDiff(const real* inGrad, real* outGrad,
int inC, int inH, int inW,
int cropC, int cropH, int cropW,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + cropC) * outH + h + cropH) * outW + cropW + w;
outGrad[off] += inGrad[idx];
}
}
template <>
void CropGrad<DEVICE_TYPE_GPU>(const real* inGrad,
real* outGrad,
const TensorShape inShape,
const TensorShape outShape,
const FuncConfig& conf) {
std::vector<uint32_t> crop_corner = conf.get<std::vector<uint32_t>>("crop_corner");
int cropC = crop_corner[1];
int cropH = crop_corner[2];
int cropW = crop_corner[3];
int num = outShape[0];
int outC = outShape[1];
int outH = outShape[2];
int outW = outShape[3];
int inC = inShape[1];
int inH = inShape[2];
int inW = inShape[3];
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( KeCropDiff) , dim3(gridSize), dim3(blockSize), 0, STREAM_DEFAULT,
inGrad, outGrad, inC, inH, inW, cropC, cropH, cropW,
outC, outH, outW, nth);
CHECK_SYNC("CropGrad");
}
} // namespace paddle
| 963fe67f0487de56e43f6bc99946ecebb2fcc0fd.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "CropOp.h"
namespace paddle {
__global__ void KeCrop(real* outputs, const real* inputs,
int inC, int inH, int inW,
int cropC, int cropH, int cropW,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % outW;
const int h = (idx / outW) % outH;
const int c = (idx / outW / outH) % outC;
const int n = idx / outW / outH / outC;
const int off = ((n * inC + c + cropC) * inH + h + cropH) * inW + cropW + w;
outputs[idx] = inputs[off];
}
}
template <>
void Crop<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const TensorShape inShape,
const TensorShape outShape,
const FuncConfig& conf) {
std::vector<uint32_t> crop_corner = conf.get<std::vector<uint32_t>>("crop_corner");
int cropC = crop_corner[1];
int cropH = crop_corner[2];
int cropW = crop_corner[3];
int num = inShape[0];
int inC = inShape[1];
int inH = inShape[2];
int inW = inShape[3];
int outC = outShape[1];
int outH = outShape[2];
int outW = outShape[3];
size_t nth = num * outC * outH * outW;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
KeCrop<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
(outputs, inputs, inC, inH, inW, cropC, cropH, cropW,
outC, outH, outW, nth);
CHECK_SYNC("Crop");
}
__global__ void KeCropDiff(const real* inGrad, real* outGrad,
int inC, int inH, int inW,
int cropC, int cropH, int cropW,
int outC, int outH, int outW, int nthreads) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * outC + c + cropC) * outH + h + cropH) * outW + cropW + w;
outGrad[off] += inGrad[idx];
}
}
template <>
void CropGrad<DEVICE_TYPE_GPU>(const real* inGrad,
real* outGrad,
const TensorShape inShape,
const TensorShape outShape,
const FuncConfig& conf) {
std::vector<uint32_t> crop_corner = conf.get<std::vector<uint32_t>>("crop_corner");
int cropC = crop_corner[1];
int cropH = crop_corner[2];
int cropW = crop_corner[3];
int num = outShape[0];
int outC = outShape[1];
int outH = outShape[2];
int outW = outShape[3];
int inC = inShape[1];
int inH = inShape[2];
int inW = inShape[3];
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + blockSize - 1) / blockSize;
KeCropDiff <<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
(inGrad, outGrad, inC, inH, inW, cropC, cropH, cropW,
outC, outH, outW, nth);
CHECK_SYNC("CropGrad");
}
} // namespace paddle
|
f537e2259b04f0fc60d498e2d06eb44e8611ce3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kmean_kernel.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void cuda_find_nearest_cluster(int sharedSize, float * dimObjects, float *clusters, int numObjs, int numDims, int numClusters, int *memberShip) {
extern __shared__ float shareMemory[];
float *shared_clusters = shareMemory;
if(sharedSize == 1) {
shared_clusters = clusters;
}
else {
/// Copy Clusters into shared memory
for(int idx = threadIdx.x; idx < numClusters; idx += blockDim.x) {
for(int idy = 0; idy < numDims; idy ++) {
shared_clusters[idy * numClusters + idx] = clusters[idy * numClusters + idx];
}
}
}
__syncthreads();
int ObjsIdx = blockDim.x * blockIdx.x + threadIdx.x;
if(ObjsIdx < numObjs) {
int index = 0;
float dist, minDist;
minDist = calculateDist(dimObjects, shared_clusters, numObjs, numClusters, numDims, ObjsIdx, 0);
for(int clusterIdx = 1; clusterIdx < numClusters; clusterIdx ++) {
dist = calculateDist(dimObjects, shared_clusters, numObjs, numClusters, numDims, ObjsIdx, clusterIdx);
if(minDist > dist) {
index = clusterIdx;
minDist = dist;
}
}
memberShip[ObjsIdx] = index;
}
}
inline __device__ float calculateDist(float * dimObjects, float * shared_clusters, int numObjs, int numClusters, int numDims, int ObjsIdx, int clusterIdx) {
float res = 0.0, tmp;
for(int i = 0;i < numDims; i++) {
tmp = dimObjects[i * numObjs + ObjsIdx] - shared_clusters[i * numClusters + clusterIdx];
res += tmp * tmp;
}
return (res);
}
__global__ void cuda_accumulate_clusters(float * dimObjects, int *memberShip, int numObjs, int numDims, int numClusters, int *clusterSize, float *clusters) {
int ObjsIdx = blockDim.x * blockIdx.x + threadIdx.x;
float val;
if(ObjsIdx < numObjs) {
int index = memberShip[ObjsIdx];
atomicAdd(&clusterSize[index], 1);
for(int idx = 0; idx < numDims; idx ++) {
val = dimObjects[idx * numObjs + ObjsIdx];
atomicAdd(&(clusters[idx * numClusters + index]), val);
}
}
}
__global__ void cuda_average_clusters(int * clusterSize, float * clusters) {
clusters[blockDim.x * blockIdx.x + threadIdx.x] /= clusterSize[threadIdx.x];
}
__global__ void cuda_average_diagcovs(int * device_clusterSize, float * device_diagCovs, float * device_dMats) {
int clusterIdx = threadIdx.x, dimIdx = blockIdx.x, numClusters = blockDim.x;
int idx = numClusters * dimIdx + clusterIdx;
float val = device_diagCovs[idx] / device_clusterSize[clusterIdx];
val = 1.0 / val;
device_diagCovs[idx] = val;
atomicAdd(&device_dMats[clusterIdx], log(val));
}
__global__ void cuda_accumulate_diagcovs(float *device_dimObjects, int * device_memberShip, float * device_dimClusters, int numObjs, int numDims, int numClusters, float * device_diagCovs) {
int ObjsIdx = blockDim.x * blockIdx.x + threadIdx.x;
float val;
int idy;
if(ObjsIdx < numObjs) {
int index = device_memberShip[ObjsIdx];
for(int idx = 0; idx < numDims; idx ++) {
idy = idx * numClusters + index;
val = device_dimObjects[idx * numObjs + ObjsIdx] - device_dimClusters[idy];
atomicAdd(& device_diagCovs[idy], val * val);
}
}
}
| f537e2259b04f0fc60d498e2d06eb44e8611ce3c.cu | #include "kmean_kernel.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void cuda_find_nearest_cluster(int sharedSize, float * dimObjects, float *clusters, int numObjs, int numDims, int numClusters, int *memberShip) {
extern __shared__ float shareMemory[];
float *shared_clusters = shareMemory;
if(sharedSize == 1) {
shared_clusters = clusters;
}
else {
/// Copy Clusters into shared memory
for(int idx = threadIdx.x; idx < numClusters; idx += blockDim.x) {
for(int idy = 0; idy < numDims; idy ++) {
shared_clusters[idy * numClusters + idx] = clusters[idy * numClusters + idx];
}
}
}
__syncthreads();
int ObjsIdx = blockDim.x * blockIdx.x + threadIdx.x;
if(ObjsIdx < numObjs) {
int index = 0;
float dist, minDist;
minDist = calculateDist(dimObjects, shared_clusters, numObjs, numClusters, numDims, ObjsIdx, 0);
for(int clusterIdx = 1; clusterIdx < numClusters; clusterIdx ++) {
dist = calculateDist(dimObjects, shared_clusters, numObjs, numClusters, numDims, ObjsIdx, clusterIdx);
if(minDist > dist) {
index = clusterIdx;
minDist = dist;
}
}
memberShip[ObjsIdx] = index;
}
}
inline __device__ float calculateDist(float * dimObjects, float * shared_clusters, int numObjs, int numClusters, int numDims, int ObjsIdx, int clusterIdx) {
float res = 0.0, tmp;
for(int i = 0;i < numDims; i++) {
tmp = dimObjects[i * numObjs + ObjsIdx] - shared_clusters[i * numClusters + clusterIdx];
res += tmp * tmp;
}
return (res);
}
__global__ void cuda_accumulate_clusters(float * dimObjects, int *memberShip, int numObjs, int numDims, int numClusters, int *clusterSize, float *clusters) {
int ObjsIdx = blockDim.x * blockIdx.x + threadIdx.x;
float val;
if(ObjsIdx < numObjs) {
int index = memberShip[ObjsIdx];
atomicAdd(&clusterSize[index], 1);
for(int idx = 0; idx < numDims; idx ++) {
val = dimObjects[idx * numObjs + ObjsIdx];
atomicAdd(&(clusters[idx * numClusters + index]), val);
}
}
}
__global__ void cuda_average_clusters(int * clusterSize, float * clusters) {
clusters[blockDim.x * blockIdx.x + threadIdx.x] /= clusterSize[threadIdx.x];
}
__global__ void cuda_average_diagcovs(int * device_clusterSize, float * device_diagCovs, float * device_dMats) {
int clusterIdx = threadIdx.x, dimIdx = blockIdx.x, numClusters = blockDim.x;
int idx = numClusters * dimIdx + clusterIdx;
float val = device_diagCovs[idx] / device_clusterSize[clusterIdx];
val = 1.0 / val;
device_diagCovs[idx] = val;
atomicAdd(&device_dMats[clusterIdx], log(val));
}
__global__ void cuda_accumulate_diagcovs(float *device_dimObjects, int * device_memberShip, float * device_dimClusters, int numObjs, int numDims, int numClusters, float * device_diagCovs) {
int ObjsIdx = blockDim.x * blockIdx.x + threadIdx.x;
float val;
int idy;
if(ObjsIdx < numObjs) {
int index = device_memberShip[ObjsIdx];
for(int idx = 0; idx < numDims; idx ++) {
idy = idx * numClusters + index;
val = device_dimObjects[idx * numObjs + ObjsIdx] - device_dimClusters[idy];
atomicAdd(& device_diagCovs[idy], val * val);
}
}
}
|
7f2537a337c1f271e0d66f2d07ea53a2823ecfef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T *input_ptr, const int64_t elements, T *max_ptr,
T *min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T *shared_max = reinterpret_cast<T *>(shared_max_min_memory);
T *shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T *input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T *max_ptr, T *min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T *shared_max = reinterpret_cast<T *>(shared_max_min_memory);
T *shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T *max_ptr, T *min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T *max_ptr, const T *min_ptr,
const int64_t elements, const double quantization_bit,
T *scale, T *zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T *max_ptr, const T *min_ptr, const int64_t elements,
const double quantization_bit, T *scale, T *zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -min / s;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointCambricon(const T *max_ptr, const T *min_ptr,
const int64_t elements, const double quantization_bit,
T *scale, T *zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
// T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = floor(log2(weight_max)) - (quantization_bit - 2);
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
hipLaunchKernelGGL(( func), dim3(SMBlocksNum4ThreadsNum(thread_num)), dim3(kCudaThreadsNumPerBlock), shared_mem_size, \
(device_ctx_ptr)->cuda_stream(), __VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
void Compute(user_op::KernelComputeContext *ctx) const override {
const user_op::Tensor *in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor *scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor *zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor *tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T *max_ptr = tmp_buffer->mut_dptr<T>();
T *min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext *ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape *in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
tmp_buffer_size = in_shape->At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
| 7f2537a337c1f271e0d66f2d07ea53a2823ecfef.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T *input_ptr, const int64_t elements, T *max_ptr,
T *min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T *shared_max = reinterpret_cast<T *>(shared_max_min_memory);
T *shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T *input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T *max_ptr, T *min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T *shared_max = reinterpret_cast<T *>(shared_max_min_memory);
T *shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T *max_ptr, T *min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T *max_ptr, const T *min_ptr,
const int64_t elements, const double quantization_bit,
T *scale, T *zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T *max_ptr, const T *min_ptr, const int64_t elements,
const double quantization_bit, T *scale, T *zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -min / s;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointCambricon(const T *max_ptr, const T *min_ptr,
const int64_t elements, const double quantization_bit,
T *scale, T *zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
// T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = floor(log2(weight_max)) - (quantization_bit - 2);
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
func<<<SMBlocksNum4ThreadsNum(thread_num), kCudaThreadsNumPerBlock, shared_mem_size, \
(device_ctx_ptr)->cuda_stream()>>>(__VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
void Compute(user_op::KernelComputeContext *ctx) const override {
const user_op::Tensor *in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor *scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor *zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor *tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T *max_ptr = tmp_buffer->mut_dptr<T>();
T *min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext *ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape *in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
tmp_buffer_size = in_shape->At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
|
b44c331e55cb2814959bbda4e5c98efcc1a95c60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RayTracer.h"
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line){
if(result){
std::cerr << "CUDA Error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n";
//Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
//This function had to be changed alot to reduce the depth of recursion -- otherwise the stack on the graphics card would blow up
__device__ vec3 color(const Ray& r, hitable** world, int max_bounces, hiprandState_t *local_rand_state){
Ray current_ray = r;
//attenuation causes the color to get closer to black every time there is a bounce
vec3 current_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i<50; i++){
HitRecord rec;
if ((*world)->hit(current_ray, 0.001f, FLT_MAX, rec)){
Ray scattered;
vec3 attenuation;
float x = hiprand_uniform(local_rand_state);
if (rec.mat_p->scatter(current_ray, rec, attenuation, scattered,local_rand_state,x)) {
current_attenuation = current_attenuation*attenuation;
current_ray = scattered;
}
else{
return vec3(0.0,0.0,0.0);
}
}
else{
vec3 dir = unitize(current_ray.direction());
float t = 0.5f*(dir.y() + 1.0f);
vec3 c = (1.0f - t)*vec3(1., 1., 1.) + t * vec3(.5, .7, 1.);
return current_attenuation*c;
}
}
//exceeded max number of bounces
return vec3(0.0,0.0,0.0);
}
__global__ void rand_init(hiprandState_t *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
//each pixel is associated with a different radom state.
hiprand_init(1984, pixel_index,0, &rand_state[pixel_index]);
}
__global__ void render(vec3* frame_buffer, int max_x, int max_y, int ns, Camera **cam, hitable **world, hiprandState_t *rand_state){
//START rendering the image on the GPU
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//if I am outside the image, stop.
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//get the random state of this pixel
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 c(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
//when j = ny and i = 0 lower_left + u*horizontal + v*verticle = upper_right
//First ray is shot at upper right corner going across then down.
Ray ray = (*cam)->GetRay(u, v, &local_rand_state);
//vec3 p = ray[2.0f];
c = c + color(ray, world, 50, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
c = c / float(ns);
c = vec3(sqrt(c[0]), sqrt(c[1]), sqrt(c[2]));
//vec3 icolor(c[0] * 255.99, c[1] * 255.99, c[2] * 255.99);
frame_buffer[pixel_index] = c;
//DONE rendering the image on the gpu
}
#define RND (hiprand_uniform(&local_rand_state))
__global__ void create_world(hitable **d_list, hitable **d_world, Camera **d_camera, int nx, int ny, hiprandState_t *rand_state, int num_hitables) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
//this kernel function will construct the objects in my scene on the GPU (several Spheres and a camera)
hiprandState_t local_rand_state = *rand_state;
d_list[0] = new Sphere(vec3(0,-1000.0,-1), 1000,
new Lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
float x = RND;
for(int a = -3; a < 3; a++) {
for(int b = -3; b < 3; b++) {
float choose_mat = RND;
vec3 center(a+RND,0.2,b+RND);
vec3 min(center.x() - .2,center.y() - .2,center.z() - .2);
vec3 max(center.x() + .2,center.y() + .2,center.z() + .2);
if(choose_mat < 0.8f) {
float r1 = RND;
//Cube *s = new Cube(min, max, new Lambertian(vec3(RND*RND, RND*RND, RND*RND)));
Sphube *s = new Sphube(center, 0.2f, .85f, new Lambertian(vec3(RND*RND, RND*RND, RND*RND)));
//Sphere *s = new Sphere(center,0.2f,new Lambertian(vec3(RND*RND, RND*RND, RND*RND)));
d_list[i++] = s;
}
else if(choose_mat < 0.95f) {
//Cube *s = new Cube(min, max, new Metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)),0.5f*RND));
Sphube *s = new Sphube(center, 0.2f, .85f, new Metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)),0.5f*RND));
//Sphere *s = new Sphere(center,0.2f,new Metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)),0.5f*RND));
d_list[i++] = s;
}
else {
//Cube *s = new Cube(min, max, new Dielectric(1.5));
Sphube *s = new Sphube(center, 0.2f, .85f, new Dielectric(1.5));
//Sphere *s = new Sphere(center,0.2f,new Dielectric(1.5));
d_list[i++] = s;
}
}
}
//d_list[i++] = new Cube(vec3(0, -2,-1) , vec3(1, 2,1), new Dielectric(1.5));
//d_list[i++] = new Sphere(vec3(-4,1,0), 1.0f,new Lambertian(vec3(0.1, 0.2, 0.5)));
//d_list[i++] = new Sphere(vec3(0,1,0), 1.0f, new Metal(vec3(0.8, 0.6, 0.2), 0.0));
//d_list[i++] = new Sphere(vec3(4,1,0), 1.0f, new Dielectric(1.5));
//d_list[i++] = new Sphube(vec3(-4, 1, 0), 1.0f, 1.0f, new Metal(vec3(0.7, 0.6, 0.5),0.0f));
//d_list[i++] = new Sphube(vec3(0, 1, 0) , 1.0f, 0.85f, new Dielectric(1.5));
//d_list[i++] = new Sphube(vec3(4, 1, 0) , 2.0f, 0.85f, new Dielectric(1.5));
//d_list[i++] = new Cube(vec3(3.8 -.8, 1-.8, 0-.8),vec3(3.8+.8, 1+.8, 0+.8), new Lambertian(vec3(0.4, 0.2, 0.1)));
//d_list[i++] = new Cube(vec3(-4-1,1-1,0-1), vec3(-4+1, 1 +1, 0+1),new Metal(vec3(0.7, 0.6, 0.5),0.0f));
// d_list[0] = new Sphere(vec3(0,0,-1), 0.5,
// new Lambertian(vec3(0.1, 0.2, 0.5)));
// d_list[1] = new Sphere(vec3(0,-100.5,-1), 100,
// new Lambertian(vec3(0.8, 0.8, 0.0)));
// d_list[2] = new Sphere(vec3(1,0,-1), 0.5,
// new Metal(vec3(0.8, 0.6, 0.2), 0.0));
// d_list[3] = new Sphere(vec3(-1,0,-1), 0.5,
// new Dielectric(1.5));
// d_list[4] = new Sphere(vec3(-1,0,-1), -0.45,
// new Dielectric(1.5));
//*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, num_hitables);
vec3 lookfrom(13,2,3);
vec3 lookat(0,0,0);
float dist_to_focus = (lookfrom - vec3(0,0,0)).length(); //(lookfrom-lookat).length();
float aperture = 0.01;
*d_camera = new Camera(lookfrom,lookat,vec3(0,1,0),20.0,float(nx)/float(ny),aperture,10.0);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, Camera **d_camera, int num_hitables) {
for(int i=0; i < num_hitables; i++) {
delete ((Sphere *)d_list[i])->mat;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
RayTracer::RayTracer(int x, int y, int s, int x_block_size, int y_block_size){
nx = x;
ny = y;
ns = s;
tx = x_block_size;
ty = y_block_size;
num_pixels = nx*ny;
frame_buffer_size = num_pixels*sizeof(vec3);
//allocate frame buffer on device
checkCudaErrors(hipMallocManaged((void**)&frame_buffer,frame_buffer_size));
//allocate random state on device
checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels*sizeof(hiprandState_t)));
checkCudaErrors(hipMalloc((void **)&d_rand_state2, 1*sizeof(hiprandState_t)));
num_hitables = 6*6 + 1; //4;//22*22+1+3;
//22*22+1+3;
//initialize random states on device
hipLaunchKernelGGL(( rand_init), dim3(1),dim3(1), 0, 0, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//allocate hitables on device
checkCudaErrors(hipMalloc((void **)&d_list, num_hitables*sizeof(hitable *)));
//allocate world on device
checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *)));
//allocate camera on device
checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(Camera *)));
//initialize the world on the device
hipLaunchKernelGGL(( create_world), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera, nx, ny, d_rand_state2,num_hitables);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
}
__host__ void RayTracer::write_image(){
std::string filepath = "image.ppm";
std::ofstream of;
of.open(filepath.c_str(), std::ios_base::app);
of << "P3\n" << nx << " " << ny << "\n255\n";
for(int j = ny-1; j>= 0; j--){
for(int i = 0; i < nx; i++){
unsigned int pixel_index = j*nx + i;
vec3 c = frame_buffer[pixel_index];
int ir = int(255.99f*c.r());
int ig = int(255.99f*c.g());
int ib = int(255.99f*c.b());
of << ir << " " << ig << " " << ib << "\n";
}
}
of.close();
// clean up
checkCudaErrors(hipDeviceSynchronize());
}
__host__ RayTracer::~RayTracer(){
hipLaunchKernelGGL(( free_world), dim3(1),dim3(1), 0, 0, d_list,d_world,d_camera,num_hitables);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(frame_buffer));
hipDeviceReset();
}
__host__ void RayTracer::render_kernel(dim3 n, dim3 m){
//hipEvent_t start1, stop1;
//float t_elapsed;
//checkCudaErrors(hipEventCreate(&start1));
//checkCudaErrors(hipEventCreate(&stop1));
//checkCudaErrors(hipEventRecord(start1));
hipLaunchKernelGGL(( render), dim3(n), dim3(m), 0, 0, frame_buffer, nx, ny, ns, d_camera, d_world, d_rand_state);
//checkCudaErrors(hipEventRecord(stop1));
//checkCudaErrors(hipGetLastError());
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//checkCudaErrors(hipEventSynchronize(stop1));
//checkCudaErrors(hipEventSynchronize(stop1));
//checkCudaErrors(hipEventElapsedTime(&t_elapsed,start1,stop1));
//checkCudaErrors(hipEventDestroy(start1));
//checkCudaErrors(hipEventDestroy(stop1));
//elapsedTime += t_elapsed;
}
__host__ void RayTracer::render_init_kernel(dim3 n, dim3 m){
hipEvent_t start, stop;
float t_elapsed2;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( render_init), dim3(n), dim3(m), 0, 0, nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&t_elapsed2,start,stop));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
elapsedTime += t_elapsed2;
}
__host__ void RayTracer::render_image(){
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
double dist_to_focus = 10.0;
double aperture = 0.1;
Camera cam(lookfrom, lookat, vec3(0, 1, 0), 30.0f, float(nx) / float(ny), aperture, dist_to_focus);
dim3 blocks(nx/tx+1,ny/ty +1);
dim3 threads(tx,ty);
//start and stop used to measure performance
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//RENDER THE SCENE HERE
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, frame_buffer, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventElapsedTime(&elapsedTime,start,stop));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
//DONE RENDERING THE SCENE HERE
elapsedTime /= 10000; //convert ms to s.
printf(" Time Elapsed: %10.2f\n",elapsedTime);
printf(" Pixels/Second: %10.2f\n",num_pixels/elapsedTime);
printf(" Rays/Second: %10.2f\n",num_pixels*ns/elapsedTime);
//checkCudaErrors(hipGetLastError());
//checkCudaErrors(hipDeviceSynchronize());
}
| b44c331e55cb2814959bbda4e5c98efcc1a95c60.cu | #include "RayTracer.h"
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line){
if(result){
std::cerr << "CUDA Error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n";
//Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
//This function had to be changed alot to reduce the depth of recursion -- otherwise the stack on the graphics card would blow up
__device__ vec3 color(const Ray& r, hitable** world, int max_bounces, curandState *local_rand_state){
Ray current_ray = r;
//attenuation causes the color to get closer to black every time there is a bounce
vec3 current_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i<50; i++){
HitRecord rec;
if ((*world)->hit(current_ray, 0.001f, FLT_MAX, rec)){
Ray scattered;
vec3 attenuation;
float x = curand_uniform(local_rand_state);
if (rec.mat_p->scatter(current_ray, rec, attenuation, scattered,local_rand_state,x)) {
current_attenuation = current_attenuation*attenuation;
current_ray = scattered;
}
else{
return vec3(0.0,0.0,0.0);
}
}
else{
vec3 dir = unitize(current_ray.direction());
float t = 0.5f*(dir.y() + 1.0f);
vec3 c = (1.0f - t)*vec3(1., 1., 1.) + t * vec3(.5, .7, 1.);
return current_attenuation*c;
}
}
//exceeded max number of bounces
return vec3(0.0,0.0,0.0);
}
__global__ void rand_init(curandState *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
//each pixel is associated with a different radom state.
curand_init(1984, pixel_index,0, &rand_state[pixel_index]);
}
__global__ void render(vec3* frame_buffer, int max_x, int max_y, int ns, Camera **cam, hitable **world, curandState *rand_state){
//START rendering the image on the GPU
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//if I am outside the image, stop.
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//get the random state of this pixel
curandState local_rand_state = rand_state[pixel_index];
vec3 c(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
//when j = ny and i = 0 lower_left + u*horizontal + v*verticle = upper_right
//First ray is shot at upper right corner going across then down.
Ray ray = (*cam)->GetRay(u, v, &local_rand_state);
//vec3 p = ray[2.0f];
c = c + color(ray, world, 50, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
c = c / float(ns);
c = vec3(sqrt(c[0]), sqrt(c[1]), sqrt(c[2]));
//vec3 icolor(c[0] * 255.99, c[1] * 255.99, c[2] * 255.99);
frame_buffer[pixel_index] = c;
//DONE rendering the image on the gpu
}
#define RND (curand_uniform(&local_rand_state))
__global__ void create_world(hitable **d_list, hitable **d_world, Camera **d_camera, int nx, int ny, curandState *rand_state, int num_hitables) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
//this kernel function will construct the objects in my scene on the GPU (several Spheres and a camera)
curandState local_rand_state = *rand_state;
d_list[0] = new Sphere(vec3(0,-1000.0,-1), 1000,
new Lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
float x = RND;
for(int a = -3; a < 3; a++) {
for(int b = -3; b < 3; b++) {
float choose_mat = RND;
vec3 center(a+RND,0.2,b+RND);
vec3 min(center.x() - .2,center.y() - .2,center.z() - .2);
vec3 max(center.x() + .2,center.y() + .2,center.z() + .2);
if(choose_mat < 0.8f) {
float r1 = RND;
//Cube *s = new Cube(min, max, new Lambertian(vec3(RND*RND, RND*RND, RND*RND)));
Sphube *s = new Sphube(center, 0.2f, .85f, new Lambertian(vec3(RND*RND, RND*RND, RND*RND)));
//Sphere *s = new Sphere(center,0.2f,new Lambertian(vec3(RND*RND, RND*RND, RND*RND)));
d_list[i++] = s;
}
else if(choose_mat < 0.95f) {
//Cube *s = new Cube(min, max, new Metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)),0.5f*RND));
Sphube *s = new Sphube(center, 0.2f, .85f, new Metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)),0.5f*RND));
//Sphere *s = new Sphere(center,0.2f,new Metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)),0.5f*RND));
d_list[i++] = s;
}
else {
//Cube *s = new Cube(min, max, new Dielectric(1.5));
Sphube *s = new Sphube(center, 0.2f, .85f, new Dielectric(1.5));
//Sphere *s = new Sphere(center,0.2f,new Dielectric(1.5));
d_list[i++] = s;
}
}
}
//d_list[i++] = new Cube(vec3(0, -2,-1) , vec3(1, 2,1), new Dielectric(1.5));
//d_list[i++] = new Sphere(vec3(-4,1,0), 1.0f,new Lambertian(vec3(0.1, 0.2, 0.5)));
//d_list[i++] = new Sphere(vec3(0,1,0), 1.0f, new Metal(vec3(0.8, 0.6, 0.2), 0.0));
//d_list[i++] = new Sphere(vec3(4,1,0), 1.0f, new Dielectric(1.5));
//d_list[i++] = new Sphube(vec3(-4, 1, 0), 1.0f, 1.0f, new Metal(vec3(0.7, 0.6, 0.5),0.0f));
//d_list[i++] = new Sphube(vec3(0, 1, 0) , 1.0f, 0.85f, new Dielectric(1.5));
//d_list[i++] = new Sphube(vec3(4, 1, 0) , 2.0f, 0.85f, new Dielectric(1.5));
//d_list[i++] = new Cube(vec3(3.8 -.8, 1-.8, 0-.8),vec3(3.8+.8, 1+.8, 0+.8), new Lambertian(vec3(0.4, 0.2, 0.1)));
//d_list[i++] = new Cube(vec3(-4-1,1-1,0-1), vec3(-4+1, 1 +1, 0+1),new Metal(vec3(0.7, 0.6, 0.5),0.0f));
// d_list[0] = new Sphere(vec3(0,0,-1), 0.5,
// new Lambertian(vec3(0.1, 0.2, 0.5)));
// d_list[1] = new Sphere(vec3(0,-100.5,-1), 100,
// new Lambertian(vec3(0.8, 0.8, 0.0)));
// d_list[2] = new Sphere(vec3(1,0,-1), 0.5,
// new Metal(vec3(0.8, 0.6, 0.2), 0.0));
// d_list[3] = new Sphere(vec3(-1,0,-1), 0.5,
// new Dielectric(1.5));
// d_list[4] = new Sphere(vec3(-1,0,-1), -0.45,
// new Dielectric(1.5));
//*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, num_hitables);
vec3 lookfrom(13,2,3);
vec3 lookat(0,0,0);
float dist_to_focus = (lookfrom - vec3(0,0,0)).length(); //(lookfrom-lookat).length();
float aperture = 0.01;
*d_camera = new Camera(lookfrom,lookat,vec3(0,1,0),20.0,float(nx)/float(ny),aperture,10.0);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, Camera **d_camera, int num_hitables) {
for(int i=0; i < num_hitables; i++) {
delete ((Sphere *)d_list[i])->mat;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
RayTracer::RayTracer(int x, int y, int s, int x_block_size, int y_block_size){
nx = x;
ny = y;
ns = s;
tx = x_block_size;
ty = y_block_size;
num_pixels = nx*ny;
frame_buffer_size = num_pixels*sizeof(vec3);
//allocate frame buffer on device
checkCudaErrors(cudaMallocManaged((void**)&frame_buffer,frame_buffer_size));
//allocate random state on device
checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels*sizeof(curandState)));
checkCudaErrors(cudaMalloc((void **)&d_rand_state2, 1*sizeof(curandState)));
num_hitables = 6*6 + 1; //4;//22*22+1+3;
//22*22+1+3;
//initialize random states on device
rand_init<<<1,1>>>(d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//allocate hitables on device
checkCudaErrors(cudaMalloc((void **)&d_list, num_hitables*sizeof(hitable *)));
//allocate world on device
checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *)));
//allocate camera on device
checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(Camera *)));
//initialize the world on the device
create_world<<<1,1>>>(d_list, d_world, d_camera, nx, ny, d_rand_state2,num_hitables);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
}
__host__ void RayTracer::write_image(){
std::string filepath = "image.ppm";
std::ofstream of;
of.open(filepath.c_str(), std::ios_base::app);
of << "P3\n" << nx << " " << ny << "\n255\n";
for(int j = ny-1; j>= 0; j--){
for(int i = 0; i < nx; i++){
unsigned int pixel_index = j*nx + i;
vec3 c = frame_buffer[pixel_index];
int ir = int(255.99f*c.r());
int ig = int(255.99f*c.g());
int ib = int(255.99f*c.b());
of << ir << " " << ig << " " << ib << "\n";
}
}
of.close();
// clean up
checkCudaErrors(cudaDeviceSynchronize());
}
__host__ RayTracer::~RayTracer(){
free_world<<<1,1>>>(d_list,d_world,d_camera,num_hitables);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(frame_buffer));
cudaDeviceReset();
}
__host__ void RayTracer::render_kernel(dim3 n, dim3 m){
//cudaEvent_t start1, stop1;
//float t_elapsed;
//checkCudaErrors(cudaEventCreate(&start1));
//checkCudaErrors(cudaEventCreate(&stop1));
//checkCudaErrors(cudaEventRecord(start1));
render<<<n, m>>>(frame_buffer, nx, ny, ns, d_camera, d_world, d_rand_state);
//checkCudaErrors(cudaEventRecord(stop1));
//checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//checkCudaErrors(cudaEventSynchronize(stop1));
//checkCudaErrors(cudaEventSynchronize(stop1));
//checkCudaErrors(cudaEventElapsedTime(&t_elapsed,start1,stop1));
//checkCudaErrors(cudaEventDestroy(start1));
//checkCudaErrors(cudaEventDestroy(stop1));
//elapsedTime += t_elapsed;
}
__host__ void RayTracer::render_init_kernel(dim3 n, dim3 m){
cudaEvent_t start, stop;
float t_elapsed2;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
render_init<<<n, m>>>(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&t_elapsed2,start,stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
elapsedTime += t_elapsed2;
}
__host__ void RayTracer::render_image(){
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
double dist_to_focus = 10.0;
double aperture = 0.1;
Camera cam(lookfrom, lookat, vec3(0, 1, 0), 30.0f, float(nx) / float(ny), aperture, dist_to_focus);
dim3 blocks(nx/tx+1,ny/ty +1);
dim3 threads(tx,ty);
//start and stop used to measure performance
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//RENDER THE SCENE HERE
checkCudaErrors(cudaEventRecord(start));
render_init<<<blocks, threads>>>(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render<<<blocks, threads>>>(frame_buffer, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventElapsedTime(&elapsedTime,start,stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
//DONE RENDERING THE SCENE HERE
elapsedTime /= 10000; //convert ms to s.
printf(" Time Elapsed: %10.2f\n",elapsedTime);
printf(" Pixels/Second: %10.2f\n",num_pixels/elapsedTime);
printf(" Rays/Second: %10.2f\n",num_pixels*ns/elapsedTime);
//checkCudaErrors(cudaGetLastError());
//checkCudaErrors(cudaDeviceSynchronize());
}
|
f9dc1ccde7cfed35284d817222bfaeb625011f03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| f9dc1ccde7cfed35284d817222bfaeb625011f03.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<1, 1>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
d7fb8df7274e227a34abaf97317bc1bab6c6ee6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "add.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define RECT_SIZE 32
__device__ bool isPixelLighterThanCentre(int y, int x, unsigned char centerVal, unsigned char* inImg, int width, int height) {
if (x < 0 || y < 0 || x > width - 1 || y > height - 1) {
return false;
}
int idx = x + y * gridDim.x;
return inImg[idx] >= centerVal;
}
__device__ unsigned char getLBPVal(int x, int y, unsigned char* inImg, int width, int height) {
unsigned char result = 0;
int idx = x + y * gridDim.x;
unsigned char center = inImg[idx];
result |= ((unsigned char) isPixelLighterThanCentre(y-1, x-1, center, inImg, width, height)) << 7;
result |= ((unsigned char) isPixelLighterThanCentre(y-1, x, center, inImg, width, height)) << 6;
result |= ((unsigned char) isPixelLighterThanCentre(y-1, x+1, center, inImg, width, height)) << 5;
result |= ((unsigned char) isPixelLighterThanCentre(y, x+1, center, inImg, width, height)) << 4;
result |= ((unsigned char) isPixelLighterThanCentre(y+1, x+1, center, inImg, width, height)) << 3;
result |= ((unsigned char) isPixelLighterThanCentre(y+1, x, center, inImg, width, height)) << 2;
result |= ((unsigned char) isPixelLighterThanCentre(y+1, x-1, center, inImg, width, height)) << 1;
result |= ((unsigned char) isPixelLighterThanCentre(y, x-1, center, inImg, width, height)) << 0;
return result;
}
__global__ void lbpCUDA(unsigned char* inImg, unsigned char* outImg, int width, int height) {
int x = blockIdx.x;
int y = blockIdx.y;
int idx = x + y * gridDim.x;
outImg[idx] = getLBPVal(x, y, inImg, width, height);
}
__device__ bool getRectCoords(int &xFrom, int&yFrom, int startX, int startY, int width, int height) {
int areaX, areaY;
areaX = startX + blockIdx.x;
areaY = startY;
while (areaX > width - 256 * 3) {
areaY++;
areaX -= 256 * 3;
if (areaY > height - 256 * 3) {
xFrom = -1;
yFrom = -1;
return false;
}
}
switch (threadIdx.x) {
case 0:
xFrom = areaX;
yFrom = areaY;
break;
case 1:
xFrom = areaX + 256;
yFrom = areaY;
break;
case 2:
xFrom = areaX + 2 * 256;
yFrom = areaY;
break;
case 3:
xFrom = areaX;
yFrom = areaY + 256;
break;
case 4:
xFrom = areaX + 256;
yFrom = areaY + 256;
break;
case 5:
xFrom = areaX + 2 * 256;
yFrom = areaY + 256;
break;
case 6:
xFrom = areaX;
yFrom = areaY + 2 * 256;
break;
case 7:
xFrom = areaX + 256;
yFrom = areaY + 2 * 256;
break;
case 8:
xFrom = areaX + 2 * 256;
yFrom = areaY + 2 * 256;
break;
}
return true;
}
__global__ void calculateHistograms(unsigned char* lbpImg, int* histogram, int startX, int startY, int width, int height) {
int histStart = 9 * 256 * blockIdx.x + 256 * threadIdx.x;
int xFrom, yFrom;
getRectCoords(xFrom, yFrom, startX, startY, width, height);
for (int i = 0; i < 256; i++) {
histogram[histStart + i] = 0;
}
for (int x = 0; x < RECT_SIZE; x++) {
for (int y = 0; y < RECT_SIZE; y++) {
int value = lbpImg[x+y*width];
histogram[value + histStart]++;
}
}
}
void convertImageToLBP(unsigned char* imputImg, int width, int height, int* histograms) {
unsigned char* Dev_InImg = nullptr;
unsigned char* Dev_OutImg = nullptr;
hipMalloc((void**)&Dev_InImg, height*width);
hipMalloc((void**)&Dev_OutImg, height*width);
hipMemcpy(Dev_InImg, imputImg, width * height, hipMemcpyHostToDevice);
dim3 gridImg(width, height);
hipLaunchKernelGGL(( lbpCUDA), dim3(gridImg), dim3(1), 0, 0, Dev_InImg, Dev_OutImg, width, height);
hipMemcpy(imputImg, Dev_OutImg, width * height, hipMemcpyDeviceToHost);
hipFree(Dev_InImg);
int histSize = 9*256;
int histCount = 1;
int* Dev_histograms = nullptr;
hipMalloc((void**)&Dev_histograms, histSize * histCount * sizeof(int));
dim3 gridHist(histCount, 1);
dim3 blockHist(9, 1,1);
hipLaunchKernelGGL(( calculateHistograms), dim3(gridHist), dim3(blockHist), 0, 0, Dev_OutImg, Dev_histograms, 0, 0, width, height);
hipMemcpy(histograms, Dev_histograms, histSize * histCount * sizeof(int), hipMemcpyDeviceToHost);
hipFree(Dev_histograms);
hipFree(Dev_OutImg);
}
| d7fb8df7274e227a34abaf97317bc1bab6c6ee6e.cu | #include "add.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define RECT_SIZE 32
__device__ bool isPixelLighterThanCentre(int y, int x, unsigned char centerVal, unsigned char* inImg, int width, int height) {
if (x < 0 || y < 0 || x > width - 1 || y > height - 1) {
return false;
}
int idx = x + y * gridDim.x;
return inImg[idx] >= centerVal;
}
__device__ unsigned char getLBPVal(int x, int y, unsigned char* inImg, int width, int height) {
unsigned char result = 0;
int idx = x + y * gridDim.x;
unsigned char center = inImg[idx];
result |= ((unsigned char) isPixelLighterThanCentre(y-1, x-1, center, inImg, width, height)) << 7;
result |= ((unsigned char) isPixelLighterThanCentre(y-1, x, center, inImg, width, height)) << 6;
result |= ((unsigned char) isPixelLighterThanCentre(y-1, x+1, center, inImg, width, height)) << 5;
result |= ((unsigned char) isPixelLighterThanCentre(y, x+1, center, inImg, width, height)) << 4;
result |= ((unsigned char) isPixelLighterThanCentre(y+1, x+1, center, inImg, width, height)) << 3;
result |= ((unsigned char) isPixelLighterThanCentre(y+1, x, center, inImg, width, height)) << 2;
result |= ((unsigned char) isPixelLighterThanCentre(y+1, x-1, center, inImg, width, height)) << 1;
result |= ((unsigned char) isPixelLighterThanCentre(y, x-1, center, inImg, width, height)) << 0;
return result;
}
__global__ void lbpCUDA(unsigned char* inImg, unsigned char* outImg, int width, int height) {
int x = blockIdx.x;
int y = blockIdx.y;
int idx = x + y * gridDim.x;
outImg[idx] = getLBPVal(x, y, inImg, width, height);
}
__device__ bool getRectCoords(int &xFrom, int&yFrom, int startX, int startY, int width, int height) {
int areaX, areaY;
areaX = startX + blockIdx.x;
areaY = startY;
while (areaX > width - 256 * 3) {
areaY++;
areaX -= 256 * 3;
if (areaY > height - 256 * 3) {
xFrom = -1;
yFrom = -1;
return false;
}
}
switch (threadIdx.x) {
case 0:
xFrom = areaX;
yFrom = areaY;
break;
case 1:
xFrom = areaX + 256;
yFrom = areaY;
break;
case 2:
xFrom = areaX + 2 * 256;
yFrom = areaY;
break;
case 3:
xFrom = areaX;
yFrom = areaY + 256;
break;
case 4:
xFrom = areaX + 256;
yFrom = areaY + 256;
break;
case 5:
xFrom = areaX + 2 * 256;
yFrom = areaY + 256;
break;
case 6:
xFrom = areaX;
yFrom = areaY + 2 * 256;
break;
case 7:
xFrom = areaX + 256;
yFrom = areaY + 2 * 256;
break;
case 8:
xFrom = areaX + 2 * 256;
yFrom = areaY + 2 * 256;
break;
}
return true;
}
__global__ void calculateHistograms(unsigned char* lbpImg, int* histogram, int startX, int startY, int width, int height) {
int histStart = 9 * 256 * blockIdx.x + 256 * threadIdx.x;
int xFrom, yFrom;
getRectCoords(xFrom, yFrom, startX, startY, width, height);
for (int i = 0; i < 256; i++) {
histogram[histStart + i] = 0;
}
for (int x = 0; x < RECT_SIZE; x++) {
for (int y = 0; y < RECT_SIZE; y++) {
int value = lbpImg[x+y*width];
histogram[value + histStart]++;
}
}
}
void convertImageToLBP(unsigned char* imputImg, int width, int height, int* histograms) {
unsigned char* Dev_InImg = nullptr;
unsigned char* Dev_OutImg = nullptr;
cudaMalloc((void**)&Dev_InImg, height*width);
cudaMalloc((void**)&Dev_OutImg, height*width);
cudaMemcpy(Dev_InImg, imputImg, width * height, cudaMemcpyHostToDevice);
dim3 gridImg(width, height);
lbpCUDA<<<gridImg, 1>>>(Dev_InImg, Dev_OutImg, width, height);
cudaMemcpy(imputImg, Dev_OutImg, width * height, cudaMemcpyDeviceToHost);
cudaFree(Dev_InImg);
int histSize = 9*256;
int histCount = 1;
int* Dev_histograms = nullptr;
cudaMalloc((void**)&Dev_histograms, histSize * histCount * sizeof(int));
dim3 gridHist(histCount, 1);
dim3 blockHist(9, 1,1);
calculateHistograms<<<gridHist, blockHist>>>(Dev_OutImg, Dev_histograms, 0, 0, width, height);
cudaMemcpy(histograms, Dev_histograms, histSize * histCount * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(Dev_histograms);
cudaFree(Dev_OutImg);
}
|
e999660ffeff5052a269bb373413b6308f630af4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <GpuTimerLib.h>
#include <stdio.h>
#include <Windows.h>
#define NUM_THREADS 10000000
#define ARRAY_SIZE 1000
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(&g[i], 1);
}
int main(int argc, char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **)&d_array, ARRAY_BYTES);
hipMemset((void *)d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.startTimer();
increment_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> >(d_array);
timer.stopTimer();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
timer.printTime();
// free GPU memory allocation and exit
hipFree(d_array);
system("pause");
return 0;
} | e999660ffeff5052a269bb373413b6308f630af4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <GpuTimerLib.h>
#include <stdio.h>
#include <Windows.h>
#define NUM_THREADS 10000000
#define ARRAY_SIZE 1000
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(&g[i], 1);
}
int main(int argc, char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **)&d_array, ARRAY_BYTES);
cudaMemset((void *)d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.startTimer();
increment_atomic << <NUM_THREADS / BLOCK_WIDTH, BLOCK_WIDTH >> >(d_array);
timer.stopTimer();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
timer.printTime();
// free GPU memory allocation and exit
cudaFree(d_array);
system("pause");
return 0;
} |
fc264c7a945e5d067257d9e22a1e0153a73f91ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "stoper.h"
/**
* This is slow. Should not be done by CPU>GPU>CPU. Just do it on CPU.
*/
const int TSIZE = 16; //matrices divided into 32 x 32 tiles; CPU & GPU
const int MSIZE = 16; //assume square matrices of this size
const int MSSIZE = MSIZE * MSIZE;
// Single kernel for (tiled, SMEM) matrix multiplication.
__global__ void Multiply(const int *A, const int *B, int *C);
// Straightforward matrix multiplication kernel. Doesn't use shared memory SMEM.
__global__ void SlowTranspose(const int *A, int *T);
__global__ void funkcja() {
printf("test\n");
}
int main(void) {
// Allocation
int *hA, *hT; //allocated CPU
hA = new int[MSSIZE];
hT = new int[MSSIZE];
int *A, *T; //allocated on GPU
hipMalloc(&A, MSSIZE * 4); //size in Bytes
hipMalloc(&T, MSSIZE * 4); //size in Bytes
printf("Matrix transpose; width=height=%i\n", MSIZE);
// Filling
for(int i=0; i<MSSIZE; ++i)
hA[i] = i; //small mixed-size numbers
// Copy to GPU
PosixStoper xx;
hipMemcpy(A, hA, MSSIZE * 4, hipMemcpyHostToDevice);
// Transpose on GPU, and bring results back
dim3 blocks(MSIZE / TSIZE, MSIZE / TSIZE);
dim3 threads(TSIZE, TSIZE);
//SlowTranspose<<<1, 1>>>(A, T);
hipLaunchKernelGGL(( funkcja), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
hipMemcpy(hT, T, MSSIZE * 4, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
xx.Snap(); printf("GPUtime=%3.4f[msec]\n",xx.LastDt()/1000);
// Test result on CPU
int *htest = new int[MSSIZE]; //tested on CPU
bzero(htest, MSSIZE * 4); //clean it up
PosixStoper yy;
int tmp=MSIZE;
for(int x=0; x<tmp; ++x)
for(int y=0; y<tmp; ++y)
htest[x + y *MSIZE] = hA[y + x * MSIZE];
yy.Snap(); printf("CPUtime=%3.4f[msec]\n",yy.LastDt()/1000);
//Comparison
int z=0;
// for(int x=0; x<kMatrixWidth; ++x)
// for(int y=0; y<kMatrixWidth; ++y)
// if (htest[x + y * kMatrixWidth] != hT[x + y * kMatrixWidth]) {
// ++z;
// printf("CPU:%i GPU:%i\n", htest[x + y * kMatrixWidth],
// hT[x + y * kMatrixWidth]);
// }
printf("Err:%i\n",z);
delete[] htest;
hipFree(A); hipFree(T);
delete[] hA; delete[] hT;
}
/**
* Each block computes a tile (bx=column,by=row) of C.
* It must loop over a few tiles of A and B, and sum results.
*/
__global__ void Multiply(const int *A, const int *B, int *C) {
// Tiles held in matrices sA, sB (SMEM), loaded by threads first.
int bx = blockIdx.x; //block-column in C (column in B)
int by = blockIdx.y; //block-row in C (row in A)
int tx = threadIdx.x;
int ty = threadIdx.y;
int bk; //index for loop over block of _tiles_ in A (row) and B (column).
int Csub = 0; //Store locally data in loop; write to GMEM only once at end.
__shared__ float sA[TSIZE][TSIZE]; //"tile" matrices
__shared__ float sB[TSIZE][TSIZE];
// Loop over tiles, for each block in C seleceted by (bx,by)
for(bk=0; bk < MSIZE / TSIZE; ++bk) {
//load matrices into SMEM
sA[ty][tx] = A[(by * TSIZE + ty) * MSIZE + (bk * TSIZE) +tx];
sB[ty][tx] = B[(bk * TSIZE + ty) * MSIZE + (bx * TSIZE) +tx];
__syncthreads();
// Multiple the tiles A * B --store--> C
for(int k=0; k<TSIZE; ++k)
Csub += sA[ty][k] * sB[k][tx];
__syncthreads();
}
C[(by * TSIZE + ty) * MSIZE + (bx * TSIZE + tx)] = Csub;
}
/**
*/
__global__ void SlowTranspose(const int *A, int *T) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int posx = bx * TSIZE + tx;
int posy = by * TSIZE + ty;
T[posy * MSIZE + posx] = A[posx * MSIZE + posy];
printf("%i",T[posy * MSIZE + posx]);
}
| fc264c7a945e5d067257d9e22a1e0153a73f91ac.cu | #include <cstdio>
#include "stoper.h"
/**
* This is slow. Should not be done by CPU>GPU>CPU. Just do it on CPU.
*/
const int TSIZE = 16; //matrices divided into 32 x 32 tiles; CPU & GPU
const int MSIZE = 16; //assume square matrices of this size
const int MSSIZE = MSIZE * MSIZE;
// Single kernel for (tiled, SMEM) matrix multiplication.
__global__ void Multiply(const int *A, const int *B, int *C);
// Straightforward matrix multiplication kernel. Doesn't use shared memory SMEM.
__global__ void SlowTranspose(const int *A, int *T);
__global__ void funkcja() {
printf("test\n");
}
int main(void) {
// Allocation
int *hA, *hT; //allocated CPU
hA = new int[MSSIZE];
hT = new int[MSSIZE];
int *A, *T; //allocated on GPU
cudaMalloc(&A, MSSIZE * 4); //size in Bytes
cudaMalloc(&T, MSSIZE * 4); //size in Bytes
printf("Matrix transpose; width=height=%i\n", MSIZE);
// Filling
for(int i=0; i<MSSIZE; ++i)
hA[i] = i; //small mixed-size numbers
// Copy to GPU
PosixStoper xx;
cudaMemcpy(A, hA, MSSIZE * 4, cudaMemcpyHostToDevice);
// Transpose on GPU, and bring results back
dim3 blocks(MSIZE / TSIZE, MSIZE / TSIZE);
dim3 threads(TSIZE, TSIZE);
//SlowTranspose<<<1, 1>>>(A, T);
funkcja<<<1,1>>>();
cudaDeviceSynchronize();
cudaMemcpy(hT, T, MSSIZE * 4, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
xx.Snap(); printf("GPUtime=%3.4f[msec]\n",xx.LastDt()/1000);
// Test result on CPU
int *htest = new int[MSSIZE]; //tested on CPU
bzero(htest, MSSIZE * 4); //clean it up
PosixStoper yy;
int tmp=MSIZE;
for(int x=0; x<tmp; ++x)
for(int y=0; y<tmp; ++y)
htest[x + y *MSIZE] = hA[y + x * MSIZE];
yy.Snap(); printf("CPUtime=%3.4f[msec]\n",yy.LastDt()/1000);
//Comparison
int z=0;
// for(int x=0; x<kMatrixWidth; ++x)
// for(int y=0; y<kMatrixWidth; ++y)
// if (htest[x + y * kMatrixWidth] != hT[x + y * kMatrixWidth]) {
// ++z;
// printf("CPU:%i GPU:%i\n", htest[x + y * kMatrixWidth],
// hT[x + y * kMatrixWidth]);
// }
printf("Err:%i\n",z);
delete[] htest;
cudaFree(A); cudaFree(T);
delete[] hA; delete[] hT;
}
/**
* Each block computes a tile (bx=column,by=row) of C.
* It must loop over a few tiles of A and B, and sum results.
*/
__global__ void Multiply(const int *A, const int *B, int *C) {
// Tiles held in matrices sA, sB (SMEM), loaded by threads first.
int bx = blockIdx.x; //block-column in C (column in B)
int by = blockIdx.y; //block-row in C (row in A)
int tx = threadIdx.x;
int ty = threadIdx.y;
int bk; //index for loop over block of _tiles_ in A (row) and B (column).
int Csub = 0; //Store locally data in loop; write to GMEM only once at end.
__shared__ float sA[TSIZE][TSIZE]; //"tile" matrices
__shared__ float sB[TSIZE][TSIZE];
// Loop over tiles, for each block in C seleceted by (bx,by)
for(bk=0; bk < MSIZE / TSIZE; ++bk) {
//load matrices into SMEM
sA[ty][tx] = A[(by * TSIZE + ty) * MSIZE + (bk * TSIZE) +tx];
sB[ty][tx] = B[(bk * TSIZE + ty) * MSIZE + (bx * TSIZE) +tx];
__syncthreads();
// Multiple the tiles A * B --store--> C
for(int k=0; k<TSIZE; ++k)
Csub += sA[ty][k] * sB[k][tx];
__syncthreads();
}
C[(by * TSIZE + ty) * MSIZE + (bx * TSIZE + tx)] = Csub;
}
/**
*/
__global__ void SlowTranspose(const int *A, int *T) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int posx = bx * TSIZE + tx;
int posy = by * TSIZE + ty;
T[posy * MSIZE + posx] = A[posx * MSIZE + posy];
printf("%i",T[posy * MSIZE + posx]);
}
|
d4b629d1df9e5f81321605132218de7af1e62013.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <functional>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "dnn.hpp"
//Define the parameters if not defined externally
#ifndef Nn
#define Nn 128 // Number of Output Layers
#define Ni 224 // Number of Input Layers
#endif
#ifndef Tii
// Tiling Sizes
#define Tnn 32
#define Tii 32
#define Tn 16
#define Ti 16
#endif
VTYPE (*synapse)[Nn][Ni];
VTYPE (*neuron_i)[Ni];
VTYPE (*neuron_n)[Nn];
VTYPE (*neuron_n2)[Nn];
VTYPE (*neuron_n3)[Nn];
void classifier(const VTYPE synapse[Nn][Ni],
const VTYPE neuron_i[Ni],
VTYPE neuron_n[Nn]) {
for (int n = 0; n < Nn; n++) {
VTYPE temp = 0;
for (int i = 0; i < Ni; i++) {
temp += synapse[n][i] * neuron_i[i];
}
neuron_n[n] = transfer(temp);
}
}
void classifier_tiled(const VTYPE synapse[Nn][Ni],
const VTYPE neuron_i[Ni],
VTYPE neuron_n[Nn]) {
VTYPE sum[Nn] = {};
for (int outer_n = 0; outer_n < Nn; outer_n += Tnn) { // tiling for output neurons;
for (int outer_i = 0; outer_i < Ni; outer_i += Tii) { // tiling for input neurons;
for (int inner_n = outer_n; inner_n < outer_n + Tnn; inner_n += Tn) {
for (int inner_i = outer_i; inner_i < outer_i + Tii; inner_i += Ti) {
// Original code
for (int n = inner_n; n < inner_n + Tn; n++) {
VTYPE sum_sc = 0;
for (int i = inner_i; i < inner_i + Ti; i++) {
sum_sc += synapse[n][i] * neuron_i[i];
}
sum[n] += sum_sc;
}
}
}
}
for (int n = outer_n; n < outer_n + Tnn; n++) {
neuron_n[n] = transfer(sum[n]);
}
}
}
__global__ void GPU_classifier(const hipPitchedPtr synapse,
const hipPitchedPtr neuron_i,
const hipPitchedPtr neuron_n,
size_t batch_begin,
int mn) {
/* x => n, y => batch */
int b = blockIdx.y + batch_begin;
int n_begin = (blockIdx.x * blockDim.x + threadIdx.x) * mn;
VTYPE * const neuron_i_row = (VTYPE *)((char *)neuron_i.ptr + b * neuron_i.pitch);
VTYPE * const neuron_n_row = (VTYPE *)((char *)neuron_n.ptr + b * neuron_n.pitch);
for (int n = n_begin; n < n_begin + mn; n++) {
VTYPE sum = 0;
VTYPE * const synapse_row = (VTYPE *)((char *)synapse.ptr + (b * Nn + n) * synapse.pitch);
for (int i = 0; i < Ni; i++) {
sum += synapse_row[i] * neuron_i_row[i];
}
neuron_n_row[n] = GPU_transfer(sum);
}
}
__global__ void GPU_classifier_tiled(const hipPitchedPtr synapse,
const hipPitchedPtr neuron_i,
const hipPitchedPtr neuron_n,
size_t batch_begin,
int mn, int tiling_size) {
/* x => n, y => batch */
int b = blockIdx.y + batch_begin;
int n_begin = (blockIdx.x * blockDim.x + threadIdx.x) * mn;
VTYPE * const neuron_i_row = (VTYPE *)((char *)neuron_i.ptr + b * neuron_i.pitch);
VTYPE * const neuron_n_row = (VTYPE *)((char *)neuron_n.ptr + b * neuron_n.pitch);
for (int n = n_begin; n < n_begin + mn; n++) {
VTYPE sum = 0;
VTYPE * const synapse_row = (VTYPE *)((char *)synapse.ptr + (b * Nn + n) * synapse.pitch);
for (int i = 0; i < Ni; i += tiling_size) {
for(int ii = i; ii < i + tiling_size; ii++) {
sum += synapse_row[ii] * neuron_i_row[ii];
}
}
neuron_n_row[n] = GPU_transfer(sum);
}
}
int main(int argc, char **argv) {
if (argc < 3) {
std::cerr << "Usage : " << argv[0] << " BATCH_SIZE BATCH_IN_PARALLEL" << std::endl;
exit(0);
}
const int batch = strtol(argv[1], nullptr, 10);
const int batch_in_parallel = strtol(argv[2], nullptr, 10);
if (batch_in_parallel > batch) {
std::cerr << "BATCH_IN_PARALLEL must smaller than BATCH" << std::endl;
exit(0);
} else if (batch % batch_in_parallel) {
std::cerr << "BATCH must be a multiple of BATCH_IN_PARALLEL" << std::endl;
exit(0);
}
std::cout << "Initializing ..." << std::endl;
synapse = (VTYPE (*)[Nn][Ni]) aligned_alloc(64, batch * Nn * Ni * sizeof(VTYPE));
neuron_i = (VTYPE (*)[Ni]) aligned_alloc(64, batch * Ni * sizeof(VTYPE));
neuron_n = (VTYPE (*)[Nn]) aligned_alloc(64, batch * Nn * sizeof(VTYPE));
neuron_n2 = (VTYPE (*)[Nn]) aligned_alloc(64, batch * Nn * sizeof(VTYPE));
neuron_n3 = (VTYPE (*)[Nn]) aligned_alloc(64, batch * Nn * sizeof(VTYPE));
fill_random((VTYPE *) synapse, Nn * Ni * batch);
fill_random((VTYPE *) neuron_i, Ni * batch);
memset(neuron_n, 0, Nn * batch * sizeof(VTYPE));
memset(neuron_n2, 0, Nn * batch * sizeof(VTYPE));
memset(neuron_n3, 0, Nn * batch * sizeof(VTYPE));
std::cout << "CPU Simple version: \t\t\t\t\t";
timeit([&]() {
for(int b = 0; b < batch; ++b) {
classifier(synapse[b], neuron_i[b], neuron_n[b]);
}
});
std::cout << "CPU Tiled version: \t\t\t\t\t";
timeit([&]() {
for(int b = 0; b < batch; ++b) {
classifier_tiled(synapse[b], neuron_i[b], neuron_n2[b]);
}
});
compare((VTYPE *)neuron_n, (VTYPE *)neuron_n2, Nn * batch);
hipExtent extent_synapse = make_hipExtent(Ni * sizeof(VTYPE), Nn, batch);
hipPitchedPtr d_synapse;
MallocAndCpy3D(d_synapse, synapse, extent_synapse);
size_t pitch_neuron_i, pitch_neuron_n;
VTYPE (*_d_neuron_i)[Ni], (*_d_neuron_n)[Nn];
hipMallocPitch((void **)&_d_neuron_i, &pitch_neuron_i, Ni * sizeof(VTYPE), batch);
hipMallocPitch((void **)&_d_neuron_n, &pitch_neuron_n, Nn * sizeof(VTYPE), batch);
hipPitchedPtr d_neuron_i = make_hipPitchedPtr(_d_neuron_i, pitch_neuron_i, Ni, batch);
hipPitchedPtr d_neuron_n = make_hipPitchedPtr(_d_neuron_n, pitch_neuron_n, Nn, batch);
hipMemcpy2D(d_neuron_i.ptr, d_neuron_i.pitch, neuron_i, Ni * sizeof(VTYPE), Ni * sizeof(VTYPE), batch, hipMemcpyHostToDevice);
std::cout << "GPU version:\n";
int mn = 1;
int threads_total = Nn / mn;
for(int block_num = 1; block_num < threads_total; block_num *= 2) {
int block_num = 64;
dim3 block_size(threads_total / block_num);
dim3 grid_size(block_num, batch_in_parallel);
if (threads_total / block_num > 1024 || threads_total/block_num == 0)
continue;
hipMemset(d_neuron_n.ptr, 0, batch * Nn * sizeof(VTYPE));
printf("Grid: (%4d, %4d), Block: (%4d, %4d), Mn=%4d\t", grid_size.x, grid_size.y, block_size.x, block_size.y, mn);
CUDA_timeit([&]() {
for(int b = 0; b < batch; b += batch_in_parallel) {
hipLaunchKernelGGL(( GPU_classifier), dim3(grid_size), dim3(block_size), 0, 0, d_synapse, d_neuron_i, d_neuron_n, b, mn);
}
});
hipMemcpy2D(neuron_n3, Nn * sizeof(VTYPE), d_neuron_n.ptr, d_neuron_n.pitch, Nn * sizeof(VTYPE), batch, hipMemcpyDeviceToHost);
cuda_check_error();
compare((VTYPE *)neuron_n, (VTYPE *)neuron_n3, Nn * batch);
}
}
| d4b629d1df9e5f81321605132218de7af1e62013.cu | #include <iostream>
#include <functional>
#include <cuda.h>
#include <cuda_runtime.h>
#include "dnn.hpp"
//Define the parameters if not defined externally
#ifndef Nn
#define Nn 128 // Number of Output Layers
#define Ni 224 // Number of Input Layers
#endif
#ifndef Tii
// Tiling Sizes
#define Tnn 32
#define Tii 32
#define Tn 16
#define Ti 16
#endif
VTYPE (*synapse)[Nn][Ni];
VTYPE (*neuron_i)[Ni];
VTYPE (*neuron_n)[Nn];
VTYPE (*neuron_n2)[Nn];
VTYPE (*neuron_n3)[Nn];
void classifier(const VTYPE synapse[Nn][Ni],
const VTYPE neuron_i[Ni],
VTYPE neuron_n[Nn]) {
for (int n = 0; n < Nn; n++) {
VTYPE temp = 0;
for (int i = 0; i < Ni; i++) {
temp += synapse[n][i] * neuron_i[i];
}
neuron_n[n] = transfer(temp);
}
}
void classifier_tiled(const VTYPE synapse[Nn][Ni],
const VTYPE neuron_i[Ni],
VTYPE neuron_n[Nn]) {
VTYPE sum[Nn] = {};
for (int outer_n = 0; outer_n < Nn; outer_n += Tnn) { // tiling for output neurons;
for (int outer_i = 0; outer_i < Ni; outer_i += Tii) { // tiling for input neurons;
for (int inner_n = outer_n; inner_n < outer_n + Tnn; inner_n += Tn) {
for (int inner_i = outer_i; inner_i < outer_i + Tii; inner_i += Ti) {
// Original code
for (int n = inner_n; n < inner_n + Tn; n++) {
VTYPE sum_sc = 0;
for (int i = inner_i; i < inner_i + Ti; i++) {
sum_sc += synapse[n][i] * neuron_i[i];
}
sum[n] += sum_sc;
}
}
}
}
for (int n = outer_n; n < outer_n + Tnn; n++) {
neuron_n[n] = transfer(sum[n]);
}
}
}
__global__ void GPU_classifier(const cudaPitchedPtr synapse,
const cudaPitchedPtr neuron_i,
const cudaPitchedPtr neuron_n,
size_t batch_begin,
int mn) {
/* x => n, y => batch */
int b = blockIdx.y + batch_begin;
int n_begin = (blockIdx.x * blockDim.x + threadIdx.x) * mn;
VTYPE * const neuron_i_row = (VTYPE *)((char *)neuron_i.ptr + b * neuron_i.pitch);
VTYPE * const neuron_n_row = (VTYPE *)((char *)neuron_n.ptr + b * neuron_n.pitch);
for (int n = n_begin; n < n_begin + mn; n++) {
VTYPE sum = 0;
VTYPE * const synapse_row = (VTYPE *)((char *)synapse.ptr + (b * Nn + n) * synapse.pitch);
for (int i = 0; i < Ni; i++) {
sum += synapse_row[i] * neuron_i_row[i];
}
neuron_n_row[n] = GPU_transfer(sum);
}
}
__global__ void GPU_classifier_tiled(const cudaPitchedPtr synapse,
const cudaPitchedPtr neuron_i,
const cudaPitchedPtr neuron_n,
size_t batch_begin,
int mn, int tiling_size) {
/* x => n, y => batch */
int b = blockIdx.y + batch_begin;
int n_begin = (blockIdx.x * blockDim.x + threadIdx.x) * mn;
VTYPE * const neuron_i_row = (VTYPE *)((char *)neuron_i.ptr + b * neuron_i.pitch);
VTYPE * const neuron_n_row = (VTYPE *)((char *)neuron_n.ptr + b * neuron_n.pitch);
for (int n = n_begin; n < n_begin + mn; n++) {
VTYPE sum = 0;
VTYPE * const synapse_row = (VTYPE *)((char *)synapse.ptr + (b * Nn + n) * synapse.pitch);
for (int i = 0; i < Ni; i += tiling_size) {
for(int ii = i; ii < i + tiling_size; ii++) {
sum += synapse_row[ii] * neuron_i_row[ii];
}
}
neuron_n_row[n] = GPU_transfer(sum);
}
}
int main(int argc, char **argv) {
if (argc < 3) {
std::cerr << "Usage : " << argv[0] << " BATCH_SIZE BATCH_IN_PARALLEL" << std::endl;
exit(0);
}
const int batch = strtol(argv[1], nullptr, 10);
const int batch_in_parallel = strtol(argv[2], nullptr, 10);
if (batch_in_parallel > batch) {
std::cerr << "BATCH_IN_PARALLEL must smaller than BATCH" << std::endl;
exit(0);
} else if (batch % batch_in_parallel) {
std::cerr << "BATCH must be a multiple of BATCH_IN_PARALLEL" << std::endl;
exit(0);
}
std::cout << "Initializing ..." << std::endl;
synapse = (VTYPE (*)[Nn][Ni]) aligned_alloc(64, batch * Nn * Ni * sizeof(VTYPE));
neuron_i = (VTYPE (*)[Ni]) aligned_alloc(64, batch * Ni * sizeof(VTYPE));
neuron_n = (VTYPE (*)[Nn]) aligned_alloc(64, batch * Nn * sizeof(VTYPE));
neuron_n2 = (VTYPE (*)[Nn]) aligned_alloc(64, batch * Nn * sizeof(VTYPE));
neuron_n3 = (VTYPE (*)[Nn]) aligned_alloc(64, batch * Nn * sizeof(VTYPE));
fill_random((VTYPE *) synapse, Nn * Ni * batch);
fill_random((VTYPE *) neuron_i, Ni * batch);
memset(neuron_n, 0, Nn * batch * sizeof(VTYPE));
memset(neuron_n2, 0, Nn * batch * sizeof(VTYPE));
memset(neuron_n3, 0, Nn * batch * sizeof(VTYPE));
std::cout << "CPU Simple version: \t\t\t\t\t";
timeit([&]() {
for(int b = 0; b < batch; ++b) {
classifier(synapse[b], neuron_i[b], neuron_n[b]);
}
});
std::cout << "CPU Tiled version: \t\t\t\t\t";
timeit([&]() {
for(int b = 0; b < batch; ++b) {
classifier_tiled(synapse[b], neuron_i[b], neuron_n2[b]);
}
});
compare((VTYPE *)neuron_n, (VTYPE *)neuron_n2, Nn * batch);
cudaExtent extent_synapse = make_cudaExtent(Ni * sizeof(VTYPE), Nn, batch);
cudaPitchedPtr d_synapse;
MallocAndCpy3D(d_synapse, synapse, extent_synapse);
size_t pitch_neuron_i, pitch_neuron_n;
VTYPE (*_d_neuron_i)[Ni], (*_d_neuron_n)[Nn];
cudaMallocPitch((void **)&_d_neuron_i, &pitch_neuron_i, Ni * sizeof(VTYPE), batch);
cudaMallocPitch((void **)&_d_neuron_n, &pitch_neuron_n, Nn * sizeof(VTYPE), batch);
cudaPitchedPtr d_neuron_i = make_cudaPitchedPtr(_d_neuron_i, pitch_neuron_i, Ni, batch);
cudaPitchedPtr d_neuron_n = make_cudaPitchedPtr(_d_neuron_n, pitch_neuron_n, Nn, batch);
cudaMemcpy2D(d_neuron_i.ptr, d_neuron_i.pitch, neuron_i, Ni * sizeof(VTYPE), Ni * sizeof(VTYPE), batch, cudaMemcpyHostToDevice);
std::cout << "GPU version:\n";
int mn = 1;
int threads_total = Nn / mn;
for(int block_num = 1; block_num < threads_total; block_num *= 2) {
int block_num = 64;
dim3 block_size(threads_total / block_num);
dim3 grid_size(block_num, batch_in_parallel);
if (threads_total / block_num > 1024 || threads_total/block_num == 0)
continue;
cudaMemset(d_neuron_n.ptr, 0, batch * Nn * sizeof(VTYPE));
printf("Grid: (%4d, %4d), Block: (%4d, %4d), Mn=%4d\t", grid_size.x, grid_size.y, block_size.x, block_size.y, mn);
CUDA_timeit([&]() {
for(int b = 0; b < batch; b += batch_in_parallel) {
GPU_classifier<<<grid_size, block_size>>>(d_synapse, d_neuron_i, d_neuron_n, b, mn);
}
});
cudaMemcpy2D(neuron_n3, Nn * sizeof(VTYPE), d_neuron_n.ptr, d_neuron_n.pitch, Nn * sizeof(VTYPE), batch, cudaMemcpyDeviceToHost);
cuda_check_error();
compare((VTYPE *)neuron_n, (VTYPE *)neuron_n3, Nn * batch);
}
}
|
8c4aebdfcab3fb6fd6d9d837152f1d014a473795.hip | // !!! This is a file automatically generated by hipify!!!
/*
ECL-GC code: ECL-GC is a graph-coloring algorithm with shortcutting. The CUDA
implementation thereof is quite fast. It operates on graphs stored in binary
CSR format.
Copyright (c) 2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Ghadeer Alabandi, Evan Powers, and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/ECL-GC/.
Publication: This work is described in detail in the following paper.
Ghadeer Alabandi, Evan Powers, and Martin Burtscher. Increasing the Parallelism
of Graph Coloring via Shortcutting. Proceedings of the 2020 ACM SIGPLAN
Symposium on Principles and Practice of Parallel Programming, pp. 262-275.
February 2020.
*/
#include <cstdio>
#include <cstdlib>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
#include "graph.h"
static const int ThreadsPerBlock = 256;
static const unsigned int Warp = 0xffffffff;
static const int WS = 32; // warp size and bits per int
static const int MSB = 1 << (WS - 1);
static const int Mask = (1 << (WS / 2)) - 1;
static __device__ int wlsize = 0;
// https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key
static __device__ unsigned int hash(unsigned int val)
{
val = ((val >> 16) ^ val) * 0x45d9f3b;
val = ((val >> 16) ^ val) * 0x45d9f3b;
return (val >> 16) ^ val;
}
__global__
void init(const int nodes,
const int edges,
const int* const __restrict__ nidx,
const int* const __restrict__ nlist,
int* const __restrict__ nlist2,
int* const __restrict__ posscol,
int* const __restrict__ posscol2,
int* const __restrict__ color,
int* const __restrict__ wl)
{
const int lane = threadIdx.x % WS;
const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int threads = gridDim.x * ThreadsPerBlock;
int maxrange = -1;
for (int v = thread; __any_sync(Warp, v < nodes); v += threads) {
bool cond = false;
int beg, end, pos, degv, active;
if (v < nodes) {
beg = nidx[v];
end = nidx[v + 1];
degv = end - beg;
cond = (degv >= WS);
if (cond) {
wl[atomicAdd(&wlsize, 1)] = v;
} else {
active = 0;
pos = beg;
for (int i = beg; i < end; i++) {
const int nei = nlist[i];
const int degn = nidx[nei + 1] - nidx[nei];
if ((degv < degn) || ((degv == degn) && (hash(v) < hash(nei))) || ((degv == degn) && (hash(v) == hash(nei)) && (v < nei))) {
active |= (unsigned int)MSB >> (i - beg);
pos++;
}
}
}
}
int bal = __ballot_sync(Warp, cond);
while (bal != 0) {
const int who = __ffs(bal) - 1;
bal &= bal - 1;
const int wv = __shfl_sync(Warp, v, who);
const int wbeg = __shfl_sync(Warp, beg, who);
const int wend = __shfl_sync(Warp, end, who);
const int wdegv = wend - wbeg;
int wpos = wbeg;
for (int i = wbeg + lane; __any_sync(Warp, i < wend); i += WS) {
int wnei;
bool prio = false;
if (i < wend) {
wnei = nlist[i];
const int wdegn = nidx[wnei + 1] - nidx[wnei];
prio = ((wdegv < wdegn) || ((wdegv == wdegn) && (hash(wv) < hash(wnei))) || ((wdegv == wdegn) && (hash(wv) == hash(wnei)) && (wv < wnei)));
}
const int b = __ballot_sync(Warp, prio);
const int offs = __popc(b & ((1 << lane) - 1));
if (prio) nlist2[wpos + offs] = wnei;
wpos += __popc(b);
}
if (who == lane) pos = wpos;
}
if (v < nodes) {
const int range = pos - beg;
maxrange = max(maxrange, range);
color[v] = (cond || (range == 0)) ? (range << (WS / 2)) : active;
posscol[v] = (range >= WS) ? -1 : (MSB >> range);
}
}
if (maxrange >= Mask) printf("too many active neighbors\n");
for (int i = thread; i < edges / WS + 1; i += threads) posscol2[i] = -1;
}
__global__
void runLarge(const int nodes,
const int* const __restrict__ nidx,
const int* const __restrict__ nlist,
int* const __restrict__ posscol,
int* const __restrict__ posscol2,
volatile int* const __restrict__ color,
const int* const __restrict__ wl)
{
const int stop = wlsize;
if (stop != 0) {
const int lane = threadIdx.x % WS;
const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int threads = gridDim.x * ThreadsPerBlock;
bool again;
do {
again = false;
for (int w = thread; __any_sync(Warp, w < stop); w += threads) {
bool shortcut, done, cond = false;
int v, data, range, beg, pcol;
if (w < stop) {
v = wl[w];
data = color[v];
range = data >> (WS / 2);
if (range > 0) {
beg = nidx[v];
pcol = posscol[v];
cond = true;
}
}
int bal = __ballot_sync(Warp, cond);
while (bal != 0) {
const int who = __ffs(bal) - 1;
bal &= bal - 1;
const int wdata = __shfl_sync(Warp, data, who);
const int wrange = wdata >> (WS / 2);
const int wbeg = __shfl_sync(Warp, beg, who);
const int wmincol = wdata & Mask;
const int wmaxcol = wmincol + wrange;
const int wend = wbeg + wmaxcol;
const int woffs = wbeg / WS;
int wpcol = __shfl_sync(Warp, pcol, who);
bool wshortcut = true;
bool wdone = true;
for (int i = wbeg + lane; __any_sync(Warp, i < wend); i += WS) {
int nei, neidata, neirange;
if (i < wend) {
nei = nlist[i];
neidata = color[nei];
neirange = neidata >> (WS / 2);
const bool neidone = (neirange == 0);
wdone &= neidone; //consolidated below
if (neidone) {
const int neicol = neidata;
if (neicol < WS) {
wpcol &= ~((unsigned int)MSB >> neicol); //consolidated below
} else {
if ((wmincol <= neicol) && (neicol < wmaxcol) && ((posscol2[woffs + neicol / WS] << (neicol % WS)) < 0)) {
atomicAnd((int*)&posscol2[woffs + neicol / WS], ~((unsigned int)MSB >> (neicol % WS)));
}
}
} else {
const int neimincol = neidata & Mask;
const int neimaxcol = neimincol + neirange;
if ((neimincol <= wmincol) && (neimaxcol >= wmincol)) wshortcut = false; //consolidated below
}
}
}
wshortcut = __all_sync(Warp, wshortcut);
wdone = __all_sync(Warp, wdone);
wpcol &= __shfl_xor_sync(Warp, wpcol, 1);
wpcol &= __shfl_xor_sync(Warp, wpcol, 2);
wpcol &= __shfl_xor_sync(Warp, wpcol, 4);
wpcol &= __shfl_xor_sync(Warp, wpcol, 8);
wpcol &= __shfl_xor_sync(Warp, wpcol, 16);
if (who == lane) pcol = wpcol;
if (who == lane) done = wdone;
if (who == lane) shortcut = wshortcut;
}
if (w < stop) {
if (range > 0) {
const int mincol = data & Mask;
int val = pcol, mc = 0;
if (pcol == 0) {
const int offs = beg / WS;
mc = max(1, mincol / WS);
while ((val = posscol2[offs + mc]) == 0) mc++;
}
int newmincol = mc * WS + __clz(val);
if (mincol != newmincol) shortcut = false;
if (shortcut || done) {
pcol = (newmincol < WS) ? ((unsigned int)MSB >> newmincol) : 0;
} else {
const int maxcol = mincol + range;
const int range = maxcol - newmincol;
newmincol = (range << (WS / 2)) | newmincol;
again = true;
}
posscol[v] = pcol;
color[v] = newmincol;
}
}
}
} while (__any_sync(Warp, again));
}
}
__global__
void runSmall(const int nodes,
const int* const __restrict__ nidx,
const int* const __restrict__ nlist,
volatile int* const __restrict__ posscol,
int* const __restrict__ color)
{
const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int threads = gridDim.x * ThreadsPerBlock;
if (thread == 0) wlsize = 0;
bool again;
do {
again = false;
for (int v = thread; v < nodes; v += threads) {
int pcol = posscol[v];
if (__popc(pcol) > 1) {
const int beg = nidx[v];
int active = color[v];
int allnei = 0;
int keep = active;
do {
const int old = active;
active &= active - 1;
const int curr = old ^ active;
const int i = beg + __clz(curr);
const int nei = nlist[i];
const int neipcol = posscol[nei];
allnei |= neipcol;
if ((pcol & neipcol) == 0) {
pcol &= pcol - 1;
keep ^= curr;
} else if (__popc(neipcol) == 1) {
pcol ^= neipcol;
keep ^= curr;
}
} while (active != 0);
if (keep != 0) {
const int best = (unsigned int)MSB >> __clz(pcol);
if ((best & ~allnei) != 0) {
pcol = best;
keep = 0;
}
}
again |= keep;
if (keep == 0) keep = __clz(pcol);
color[v] = keep;
posscol[v] = pcol;
}
}
} while (again);
}
int main(int argc, char* argv[])
{
printf("ECL-GC v1.2 (%s)\n", __FILE__);
printf("Copyright 2020 Texas State University\n\n");
if (argc != 2) {printf("USAGE: %s input_file_name\n\n", argv[0]); exit(-1);}
if (WS != 32) {printf("ERROR: warp size must be 32\n\n"); exit(-1);}
if (WS != sizeof(int) * 8) {printf("ERROR: bits per word must match warp size\n\n"); exit(-1);}
if ((ThreadsPerBlock < WS) || ((ThreadsPerBlock % WS) != 0)) {
printf("ERROR: threads per block must be a multiple of the warp size\n\n");
exit(-1);
}
if ((ThreadsPerBlock & (ThreadsPerBlock - 1)) != 0) {
printf("ERROR: threads per block must be a power of two\n\n");
exit(-1);
}
ECLgraph g = readECLgraph(argv[1]);
printf("input: %s\n", argv[1]);
printf("nodes: %d\n", g.nodes);
printf("edges: %d\n", g.edges);
printf("avg degree: %.2f\n", 1.0 * g.edges / g.nodes);
int* const color = new int [g.nodes];
int *nidx_d, *nlist_d, *nlist2_d, *posscol_d, *posscol2_d, *color_d, *wl_d;
if (hipSuccess != hipMalloc((void **)&nidx_d, (g.nodes + 1) * sizeof(int)))
printf("ERROR: could not allocate nidx_d\n\n");
if (hipSuccess != hipMalloc((void **)&nlist_d, g.edges * sizeof(int)))
printf("ERROR: could not allocate nlist_d\n\n");
if (hipSuccess != hipMalloc((void **)&nlist2_d, g.edges * sizeof(int)))
printf("ERROR: could not allocate nlist2_d\n\n");
if (hipSuccess != hipMalloc((void **)&posscol_d, g.nodes * sizeof(int)))
printf("ERROR: could not allocate posscol_d\n\n");
if (hipSuccess != hipMalloc((void **)&posscol2_d, (g.edges / WS + 1) * sizeof(int)))
printf("ERROR: could not allocate posscol2_d\n\n");
if (hipSuccess != hipMalloc((void **)&color_d, g.nodes * sizeof(int)))
printf("ERROR: could not allocate color_d\n\n");
if (hipSuccess != hipMalloc((void **)&wl_d, g.nodes * sizeof(int)))
printf("ERROR: could not allocate wl_d\n\n");
if (hipSuccess != hipMemcpy(nidx_d, g.nindex, (g.nodes + 1) * sizeof(int), hipMemcpyHostToDevice))
printf("ERROR: copying nidx to device failed\n\n");
if (hipSuccess != hipMemcpy(nlist_d, g.nlist, g.edges * sizeof(int), hipMemcpyHostToDevice))
printf("ERROR: copying nlist to device failed\n\n");
const int blocks = 24;
hipDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (int n = 0; n < 100; n++) {
hipLaunchKernelGGL(( init), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, g.edges, nidx_d, nlist_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d);
hipLaunchKernelGGL(( runLarge), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, nidx_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d);
hipLaunchKernelGGL(( runSmall), dim3(blocks), dim3(ThreadsPerBlock), 0, 0, g.nodes, nidx_d, nlist_d, posscol_d, color_d);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
float runtime = (float)elapsed_seconds.count() / 100;
printf("runtime: %.6f s\n", runtime);
printf("throughput: %.6f Mnodes/s\n", g.nodes * 0.000001 / runtime);
printf("throughput: %.6f Medges/s\n", g.edges * 0.000001 / runtime);
if (hipSuccess != hipMemcpy(color, color_d, g.nodes * sizeof(int), hipMemcpyDeviceToHost))
printf("ERROR: copying color from device failed\n\n");
hipFree(wl_d);
hipFree(color_d);
hipFree(posscol2_d);
hipFree(posscol_d);
hipFree(nlist2_d);
hipFree(nlist_d);
hipFree(nidx_d);
for (int v = 0; v < g.nodes; v++) {
if (color[v] < 0)
printf("ERROR: found unprocessed node in graph (node %d with deg %d)\n\n",
v, g.nindex[v + 1] - g.nindex[v]);
for (int i = g.nindex[v]; i < g.nindex[v + 1]; i++) {
if (color[g.nlist[i]] == color[v])
printf("ERROR: found adjacent nodes with same color %d (%d %d)\n\n",
color[v], v, g.nlist[i]);
}
}
printf("Passed\n");
#ifdef DEBUG
const int vals = 16;
int c[vals];
for (int i = 0; i < vals; i++) c[i] = 0;
int cols = -1;
for (int v = 0; v < g.nodes; v++) {
cols = ::max(cols, color[v]);
if (color[v] < vals) c[color[v]]++;
}
cols++;
printf("colors used: %d\n", cols);
int sum = 0;
for (int i = 0; i < ::min(vals, cols); i++) {
sum += c[i];
printf("col %2d: %10d (%5.1f%%)\n", i, c[i], 100.0 * sum / g.nodes);
}
#endif
delete [] color;
freeECLgraph(g);
return 0;
}
| 8c4aebdfcab3fb6fd6d9d837152f1d014a473795.cu | /*
ECL-GC code: ECL-GC is a graph-coloring algorithm with shortcutting. The CUDA
implementation thereof is quite fast. It operates on graphs stored in binary
CSR format.
Copyright (c) 2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Ghadeer Alabandi, Evan Powers, and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/ECL-GC/.
Publication: This work is described in detail in the following paper.
Ghadeer Alabandi, Evan Powers, and Martin Burtscher. Increasing the Parallelism
of Graph Coloring via Shortcutting. Proceedings of the 2020 ACM SIGPLAN
Symposium on Principles and Practice of Parallel Programming, pp. 262-275.
February 2020.
*/
#include <cstdio>
#include <cstdlib>
#include <algorithm>
#include <chrono>
#include <cuda.h>
#include "graph.h"
static const int ThreadsPerBlock = 256;
static const unsigned int Warp = 0xffffffff;
static const int WS = 32; // warp size and bits per int
static const int MSB = 1 << (WS - 1);
static const int Mask = (1 << (WS / 2)) - 1;
static __device__ int wlsize = 0;
// https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key
static __device__ unsigned int hash(unsigned int val)
{
val = ((val >> 16) ^ val) * 0x45d9f3b;
val = ((val >> 16) ^ val) * 0x45d9f3b;
return (val >> 16) ^ val;
}
__global__
void init(const int nodes,
const int edges,
const int* const __restrict__ nidx,
const int* const __restrict__ nlist,
int* const __restrict__ nlist2,
int* const __restrict__ posscol,
int* const __restrict__ posscol2,
int* const __restrict__ color,
int* const __restrict__ wl)
{
const int lane = threadIdx.x % WS;
const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int threads = gridDim.x * ThreadsPerBlock;
int maxrange = -1;
for (int v = thread; __any_sync(Warp, v < nodes); v += threads) {
bool cond = false;
int beg, end, pos, degv, active;
if (v < nodes) {
beg = nidx[v];
end = nidx[v + 1];
degv = end - beg;
cond = (degv >= WS);
if (cond) {
wl[atomicAdd(&wlsize, 1)] = v;
} else {
active = 0;
pos = beg;
for (int i = beg; i < end; i++) {
const int nei = nlist[i];
const int degn = nidx[nei + 1] - nidx[nei];
if ((degv < degn) || ((degv == degn) && (hash(v) < hash(nei))) || ((degv == degn) && (hash(v) == hash(nei)) && (v < nei))) {
active |= (unsigned int)MSB >> (i - beg);
pos++;
}
}
}
}
int bal = __ballot_sync(Warp, cond);
while (bal != 0) {
const int who = __ffs(bal) - 1;
bal &= bal - 1;
const int wv = __shfl_sync(Warp, v, who);
const int wbeg = __shfl_sync(Warp, beg, who);
const int wend = __shfl_sync(Warp, end, who);
const int wdegv = wend - wbeg;
int wpos = wbeg;
for (int i = wbeg + lane; __any_sync(Warp, i < wend); i += WS) {
int wnei;
bool prio = false;
if (i < wend) {
wnei = nlist[i];
const int wdegn = nidx[wnei + 1] - nidx[wnei];
prio = ((wdegv < wdegn) || ((wdegv == wdegn) && (hash(wv) < hash(wnei))) || ((wdegv == wdegn) && (hash(wv) == hash(wnei)) && (wv < wnei)));
}
const int b = __ballot_sync(Warp, prio);
const int offs = __popc(b & ((1 << lane) - 1));
if (prio) nlist2[wpos + offs] = wnei;
wpos += __popc(b);
}
if (who == lane) pos = wpos;
}
if (v < nodes) {
const int range = pos - beg;
maxrange = max(maxrange, range);
color[v] = (cond || (range == 0)) ? (range << (WS / 2)) : active;
posscol[v] = (range >= WS) ? -1 : (MSB >> range);
}
}
if (maxrange >= Mask) printf("too many active neighbors\n");
for (int i = thread; i < edges / WS + 1; i += threads) posscol2[i] = -1;
}
__global__
void runLarge(const int nodes,
const int* const __restrict__ nidx,
const int* const __restrict__ nlist,
int* const __restrict__ posscol,
int* const __restrict__ posscol2,
volatile int* const __restrict__ color,
const int* const __restrict__ wl)
{
const int stop = wlsize;
if (stop != 0) {
const int lane = threadIdx.x % WS;
const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int threads = gridDim.x * ThreadsPerBlock;
bool again;
do {
again = false;
for (int w = thread; __any_sync(Warp, w < stop); w += threads) {
bool shortcut, done, cond = false;
int v, data, range, beg, pcol;
if (w < stop) {
v = wl[w];
data = color[v];
range = data >> (WS / 2);
if (range > 0) {
beg = nidx[v];
pcol = posscol[v];
cond = true;
}
}
int bal = __ballot_sync(Warp, cond);
while (bal != 0) {
const int who = __ffs(bal) - 1;
bal &= bal - 1;
const int wdata = __shfl_sync(Warp, data, who);
const int wrange = wdata >> (WS / 2);
const int wbeg = __shfl_sync(Warp, beg, who);
const int wmincol = wdata & Mask;
const int wmaxcol = wmincol + wrange;
const int wend = wbeg + wmaxcol;
const int woffs = wbeg / WS;
int wpcol = __shfl_sync(Warp, pcol, who);
bool wshortcut = true;
bool wdone = true;
for (int i = wbeg + lane; __any_sync(Warp, i < wend); i += WS) {
int nei, neidata, neirange;
if (i < wend) {
nei = nlist[i];
neidata = color[nei];
neirange = neidata >> (WS / 2);
const bool neidone = (neirange == 0);
wdone &= neidone; //consolidated below
if (neidone) {
const int neicol = neidata;
if (neicol < WS) {
wpcol &= ~((unsigned int)MSB >> neicol); //consolidated below
} else {
if ((wmincol <= neicol) && (neicol < wmaxcol) && ((posscol2[woffs + neicol / WS] << (neicol % WS)) < 0)) {
atomicAnd((int*)&posscol2[woffs + neicol / WS], ~((unsigned int)MSB >> (neicol % WS)));
}
}
} else {
const int neimincol = neidata & Mask;
const int neimaxcol = neimincol + neirange;
if ((neimincol <= wmincol) && (neimaxcol >= wmincol)) wshortcut = false; //consolidated below
}
}
}
wshortcut = __all_sync(Warp, wshortcut);
wdone = __all_sync(Warp, wdone);
wpcol &= __shfl_xor_sync(Warp, wpcol, 1);
wpcol &= __shfl_xor_sync(Warp, wpcol, 2);
wpcol &= __shfl_xor_sync(Warp, wpcol, 4);
wpcol &= __shfl_xor_sync(Warp, wpcol, 8);
wpcol &= __shfl_xor_sync(Warp, wpcol, 16);
if (who == lane) pcol = wpcol;
if (who == lane) done = wdone;
if (who == lane) shortcut = wshortcut;
}
if (w < stop) {
if (range > 0) {
const int mincol = data & Mask;
int val = pcol, mc = 0;
if (pcol == 0) {
const int offs = beg / WS;
mc = max(1, mincol / WS);
while ((val = posscol2[offs + mc]) == 0) mc++;
}
int newmincol = mc * WS + __clz(val);
if (mincol != newmincol) shortcut = false;
if (shortcut || done) {
pcol = (newmincol < WS) ? ((unsigned int)MSB >> newmincol) : 0;
} else {
const int maxcol = mincol + range;
const int range = maxcol - newmincol;
newmincol = (range << (WS / 2)) | newmincol;
again = true;
}
posscol[v] = pcol;
color[v] = newmincol;
}
}
}
} while (__any_sync(Warp, again));
}
}
__global__
void runSmall(const int nodes,
const int* const __restrict__ nidx,
const int* const __restrict__ nlist,
volatile int* const __restrict__ posscol,
int* const __restrict__ color)
{
const int thread = threadIdx.x + blockIdx.x * ThreadsPerBlock;
const int threads = gridDim.x * ThreadsPerBlock;
if (thread == 0) wlsize = 0;
bool again;
do {
again = false;
for (int v = thread; v < nodes; v += threads) {
int pcol = posscol[v];
if (__popc(pcol) > 1) {
const int beg = nidx[v];
int active = color[v];
int allnei = 0;
int keep = active;
do {
const int old = active;
active &= active - 1;
const int curr = old ^ active;
const int i = beg + __clz(curr);
const int nei = nlist[i];
const int neipcol = posscol[nei];
allnei |= neipcol;
if ((pcol & neipcol) == 0) {
pcol &= pcol - 1;
keep ^= curr;
} else if (__popc(neipcol) == 1) {
pcol ^= neipcol;
keep ^= curr;
}
} while (active != 0);
if (keep != 0) {
const int best = (unsigned int)MSB >> __clz(pcol);
if ((best & ~allnei) != 0) {
pcol = best;
keep = 0;
}
}
again |= keep;
if (keep == 0) keep = __clz(pcol);
color[v] = keep;
posscol[v] = pcol;
}
}
} while (again);
}
int main(int argc, char* argv[])
{
printf("ECL-GC v1.2 (%s)\n", __FILE__);
printf("Copyright 2020 Texas State University\n\n");
if (argc != 2) {printf("USAGE: %s input_file_name\n\n", argv[0]); exit(-1);}
if (WS != 32) {printf("ERROR: warp size must be 32\n\n"); exit(-1);}
if (WS != sizeof(int) * 8) {printf("ERROR: bits per word must match warp size\n\n"); exit(-1);}
if ((ThreadsPerBlock < WS) || ((ThreadsPerBlock % WS) != 0)) {
printf("ERROR: threads per block must be a multiple of the warp size\n\n");
exit(-1);
}
if ((ThreadsPerBlock & (ThreadsPerBlock - 1)) != 0) {
printf("ERROR: threads per block must be a power of two\n\n");
exit(-1);
}
ECLgraph g = readECLgraph(argv[1]);
printf("input: %s\n", argv[1]);
printf("nodes: %d\n", g.nodes);
printf("edges: %d\n", g.edges);
printf("avg degree: %.2f\n", 1.0 * g.edges / g.nodes);
int* const color = new int [g.nodes];
int *nidx_d, *nlist_d, *nlist2_d, *posscol_d, *posscol2_d, *color_d, *wl_d;
if (cudaSuccess != cudaMalloc((void **)&nidx_d, (g.nodes + 1) * sizeof(int)))
printf("ERROR: could not allocate nidx_d\n\n");
if (cudaSuccess != cudaMalloc((void **)&nlist_d, g.edges * sizeof(int)))
printf("ERROR: could not allocate nlist_d\n\n");
if (cudaSuccess != cudaMalloc((void **)&nlist2_d, g.edges * sizeof(int)))
printf("ERROR: could not allocate nlist2_d\n\n");
if (cudaSuccess != cudaMalloc((void **)&posscol_d, g.nodes * sizeof(int)))
printf("ERROR: could not allocate posscol_d\n\n");
if (cudaSuccess != cudaMalloc((void **)&posscol2_d, (g.edges / WS + 1) * sizeof(int)))
printf("ERROR: could not allocate posscol2_d\n\n");
if (cudaSuccess != cudaMalloc((void **)&color_d, g.nodes * sizeof(int)))
printf("ERROR: could not allocate color_d\n\n");
if (cudaSuccess != cudaMalloc((void **)&wl_d, g.nodes * sizeof(int)))
printf("ERROR: could not allocate wl_d\n\n");
if (cudaSuccess != cudaMemcpy(nidx_d, g.nindex, (g.nodes + 1) * sizeof(int), cudaMemcpyHostToDevice))
printf("ERROR: copying nidx to device failed\n\n");
if (cudaSuccess != cudaMemcpy(nlist_d, g.nlist, g.edges * sizeof(int), cudaMemcpyHostToDevice))
printf("ERROR: copying nlist to device failed\n\n");
const int blocks = 24;
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
for (int n = 0; n < 100; n++) {
init<<<blocks, ThreadsPerBlock>>>(g.nodes, g.edges, nidx_d, nlist_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d);
runLarge<<<blocks, ThreadsPerBlock>>>(g.nodes, nidx_d, nlist2_d, posscol_d, posscol2_d, color_d, wl_d);
runSmall<<<blocks, ThreadsPerBlock>>>(g.nodes, nidx_d, nlist_d, posscol_d, color_d);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
float runtime = (float)elapsed_seconds.count() / 100;
printf("runtime: %.6f s\n", runtime);
printf("throughput: %.6f Mnodes/s\n", g.nodes * 0.000001 / runtime);
printf("throughput: %.6f Medges/s\n", g.edges * 0.000001 / runtime);
if (cudaSuccess != cudaMemcpy(color, color_d, g.nodes * sizeof(int), cudaMemcpyDeviceToHost))
printf("ERROR: copying color from device failed\n\n");
cudaFree(wl_d);
cudaFree(color_d);
cudaFree(posscol2_d);
cudaFree(posscol_d);
cudaFree(nlist2_d);
cudaFree(nlist_d);
cudaFree(nidx_d);
for (int v = 0; v < g.nodes; v++) {
if (color[v] < 0)
printf("ERROR: found unprocessed node in graph (node %d with deg %d)\n\n",
v, g.nindex[v + 1] - g.nindex[v]);
for (int i = g.nindex[v]; i < g.nindex[v + 1]; i++) {
if (color[g.nlist[i]] == color[v])
printf("ERROR: found adjacent nodes with same color %d (%d %d)\n\n",
color[v], v, g.nlist[i]);
}
}
printf("Passed\n");
#ifdef DEBUG
const int vals = 16;
int c[vals];
for (int i = 0; i < vals; i++) c[i] = 0;
int cols = -1;
for (int v = 0; v < g.nodes; v++) {
cols = std::max(cols, color[v]);
if (color[v] < vals) c[color[v]]++;
}
cols++;
printf("colors used: %d\n", cols);
int sum = 0;
for (int i = 0; i < std::min(vals, cols); i++) {
sum += c[i];
printf("col %2d: %10d (%5.1f%%)\n", i, c[i], 100.0 * sum / g.nodes);
}
#endif
delete [] color;
freeECLgraph(g);
return 0;
}
|
523e7fcadcc9fe8e74e0dde611207ab07e15193d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <cfloat>
// TODO(jamesreed): I would use <cmath> here but std::isnan
// and std::isinf are declared constexpr there and the nvidia
// compiler throws an error because of it
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/unique.h>
#include "caffe2/core/context_gpu.h"
#include "utility_ops.h"
namespace caffe2 {
CAFFE_KNOWN_TYPE(const float*);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.size();
const float* data_ptr = X.data<float>();
scratch_.Resize(1);
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
hipLaunchKernelGGL(( NanCheckKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(hipMemcpyAsync(
&result,
scratch_.raw_data(),
1,
hipMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
TensorCPU cpu_X;
cpu_X.ResizeLike(Input(j));
// Hack to cause allocaiton happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j), &context_);
}
context_.FinishDeviceComputation();
std::cerr << "Input tensor: " << j << ": [" << def().input(j) << "]"
<< std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.size(); ++i) {
if (isnan(cpu_X_data[i]) || isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
__global__ void
ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
maxout[i] = max(X[i], Y[i]);
}
}
template <>
bool MaxOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->mutable_data<float>();
const int N = Input(0).size();
// Run pairwise-maxes
for (int i = 1; i < InputSize(); ++i) {
hipLaunchKernelGGL(( ElwiseMaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>);
template<typename T_INDEX>
__global__ void
GatherKernel(const float* X, float* Y, const T_INDEX* indices, const int N, const int block_size) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = indices[i];
const float* src_offset = X + idx * block_size;
float* dst_offset = Y + i * block_size;
for (int j = threadIdx.x; j < block_size; j += blockDim.x) {
dst_offset[j] = src_offset[j];
}
}
}
template <>
bool GatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t,int64_t>>::call(
this, OperatorBase::Input<TensorCUDA>(INDICES));
}
template <>
template <typename Index>
bool GatherOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto* output = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
auto shape = indices.dims();
shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end());
output->Resize(shape);
int block_size = data.size() / data.dim(0);
auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();
CAFFE_ENFORCE(
block_bytesize == data.nbytes() / data.dim(0),
"block_bytesize should be consistent with data dim");
int N = indices.size();
auto src_base = static_cast<const float*>(data.raw_data());
const Index* idxs = indices.template data<Index>();
auto out = static_cast<float*>(output->raw_mutable_data(data.meta()));
hipLaunchKernelGGL(( GatherKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
src_base, out, idxs, N, block_size
);
return true;
}
REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>);
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const TIndex N,
const TIndex B,
const TIndex slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const TIndex M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float,CUDAContext>::DoRunWithType() {
DCHECK_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
DCHECK_GT(X0.size(), 0);
DCHECK_GT(X0.ndim(), 0) << "X0 has to be at least the vector";
DCHECK_EQ(weight0.size(), 1);
TIndex M = X0.size();
TIndex N = X0.dim(0);
TIndex K = indices.size();
TIndex block_size = M / N;
T* data = output->template mutable_data<T>();
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
const TIndex B = (InputSize() - 3) / 2;
x_data_host_.Resize(B);
weights_host_.Resize(B);
x_data_device_.Resize(B);
weights_device_.Resize(B);
const float** x_data_host = x_data_host_.mutable_data<const float*>();
const float** weights_host = weights_host_.mutable_data<const float*>();
const float** x_data_device = x_data_device_.mutable_data<const float*>();
const float** weights_device = weights_device_.mutable_data<const float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data());
}
context_.Copy<const float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<const float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
hipLaunchKernelGGL(( AxpySliceKernel),
dim3(std::min<TIndex>(K, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
weight0.template data<float>(),
K,
B,
block_size,
weights_device,
x_data_device,
indices.template data<Index>(),
data,
M);
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
#if THRUST_VERSION >= 100800
__global__ void remap_kernel(
thrust::device_ptr<int> second_order,
thrust::device_ptr<int> order,
int* output,
int N,
int K) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= K)
return;
int idx = second_order[i];
output[order[idx]] = i;
// Maybe cuda 1D kernel?
for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {
output[order[idx]] = i;
}
return;
}
template <>
template <typename T>
void UniqueOp<CUDAContext>::DoRun() {
auto& inputTensor = Input(0);
// use dim32 to enforce that it's fine to have remapping of type int
int N = inputTensor.dim32(0);
CAFFE_ENFORCE_EQ(inputTensor.ndim(), 1, "Input should be a vector");
auto* uniqueTensor = Output(UNIQUE);
int* remapping = nullptr;
if (REMAPPING < OutputSize()) {
auto* remappingTensor = Output(REMAPPING);
remappingTensor->ResizeLike(inputTensor);
remapping = remappingTensor->template mutable_data<int>();
}
const T* input = inputTensor.template data<T>();
thrust_unique_buffer_.Resize(N);
auto* buffer = thrust_unique_buffer_.template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
inputTensor.meta(), N, input, buffer);
// Create two vector of {0, 1, ..., N-1} on CUDA device
thrust::device_vector<int> order1(N), order2(N);
thrust::sequence(
thrust::hip::par.on(context_.cuda_stream()),
order1.begin(),
order1.end());
thrust::sequence(
thrust::hip::par.on(context_.cuda_stream()),
order2.begin(),
order2.end());
// Sort the input along with order vector. So now we know where each element
// is permutated to. For example:
// input1 = 1,3,5,1,5,7,9
// order1 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,1,3,5,5,7,9
// order1 = 0,3,1,2,4,5,6
thrust::sort_by_key(
thrust::hip::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order1.begin());
// Use consequent unique op to get another order_buffer
// input2 = 1,1,3,5,5,7,9
// order2 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,3,5,7,9
// order2 = 0,2,3,5,6
auto new_last = thrust::unique_by_key(
thrust::hip::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order2.begin());
int K = new_last.first - buffer;
uniqueTensor->Resize(K);
T* unique = uniqueTensor->template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
thrust_unique_buffer_.meta(), K, buffer, unique);
// Compute the remapping. For example, for the number 1, if we look at
// order2[0] and order2[1], we know that input2[0:2) are all 1. They are all
// remapped to 0 in final input. And from order1, we know where they come
// from. The rest is easy.
if (remapping != nullptr) {
// record remap
hipLaunchKernelGGL(( remap_kernel),
dim3(CAFFE_GET_BLOCKS(K)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
order2.data(), order1.data(), remapping, N, K);
}
}
namespace {
REGISTER_CUDA_OPERATOR(Unique, UniqueOp<CUDAContext>);
} // namespace
#endif // THRUST_VERSION >= 100800
} // namespace caffe2
| 523e7fcadcc9fe8e74e0dde611207ab07e15193d.cu | #include <math.h>
#include <cfloat>
// TODO(jamesreed): I would use <cmath> here but std::isnan
// and std::isinf are declared constexpr there and the nvidia
// compiler throws an error because of it
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/unique.h>
#include "caffe2/core/context_gpu.h"
#include "utility_ops.h"
namespace caffe2 {
CAFFE_KNOWN_TYPE(const float*);
__global__ void NanCheckKernel(int N, const float* X, bool* result) {
bool has_nan = false;
CUDA_1D_KERNEL_LOOP(i, N) {
// Note: we have no need to do early return, since only if this fails
// will we not need to inspect all elements. No need to optimize the
// case that will fail.
has_nan = has_nan || isnan(X[i]) || isinf(X[i]);
}
__syncthreads();
if (has_nan) {
result[0] = true;
}
}
template <>
bool NanCheckOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
const size_t N = X.size();
const float* data_ptr = X.data<float>();
scratch_.Resize(1);
math::Set<bool, CUDAContext>(
1, false, scratch_.mutable_data<bool>(), &context_);
NanCheckKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, X.data<float>(), scratch_.mutable_data<bool>());
bool result = false;
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CUDA_ENFORCE(cudaMemcpyAsync(
&result,
scratch_.raw_data(),
1,
cudaMemcpyDefault,
context_.cuda_stream()));
}
// Note: we must synchronize here so we can inspect the result
context_.FinishDeviceComputation();
// Print out diagnostic info if we have a NaN or inf
if (result) {
std::cerr << "Tensor contained NaN or inf: " << this->def().input(0)
<< std::endl;
for (int j = 0; j < InputSize(); j++) {
TensorCPU cpu_X;
cpu_X.ResizeLike(Input(j));
// Hack to cause allocaiton happen here, so it won't happen
// when we do CopyFrom. We need the mutex then because host->gpu
// copies seem to possibly lock with NCCL.
cpu_X.mutable_data<float>();
{
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
cpu_X.CopyFrom(Input(j), &context_);
}
context_.FinishDeviceComputation();
std::cerr << "Input tensor: " << j << ": [" << def().input(j) << "]"
<< std::endl;
tensorPrinter_.Print<float>(cpu_X);
if (j == 0) {
std::cerr << "NaN idxs:" << std::endl;
auto* cpu_X_data = cpu_X.data<float>();
for (size_t i = 0; i < cpu_X.size(); ++i) {
if (isnan(cpu_X_data[i]) || isinf(cpu_X_data[i])) {
std::cerr << i << " ";
}
}
}
std::cerr << std::endl;
}
return false;
}
// This op should act as an identity matrix if we don't find any NaNs/infs.
// Copy over the data if we are not doing this in-place.
if (&X != Y) {
Y->CopyFrom(X, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(NanCheck, NanCheckOp<CUDAContext>);
__global__ void
ElwiseMaxKernel(const float* X, const float* Y, float* maxout, const int N) {
CUDA_1D_KERNEL_LOOP(i, N) {
maxout[i] = max(X[i], Y[i]);
}
}
template <>
bool MaxOp<float, CUDAContext>::Compute() {
float* output_data = Output(0)->mutable_data<float>();
const int N = Input(0).size();
// Run pairwise-maxes
for (int i = 1; i < InputSize(); ++i) {
ElwiseMaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
(i == 0 ? Input(0).data<float>() : Output(0)->data<float>()),
Input(i).data<float>(),
output_data,
N);
}
return true;
}
REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>);
template<typename T_INDEX>
__global__ void
GatherKernel(const float* X, float* Y, const T_INDEX* indices, const int N, const int block_size) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = indices[i];
const float* src_offset = X + idx * block_size;
float* dst_offset = Y + i * block_size;
for (int j = threadIdx.x; j < block_size; j += blockDim.x) {
dst_offset[j] = src_offset[j];
}
}
}
template <>
bool GatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t,int64_t>>::call(
this, OperatorBase::Input<TensorCUDA>(INDICES));
}
template <>
template <typename Index>
bool GatherOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto* output = Output(0);
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
auto shape = indices.dims();
shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end());
output->Resize(shape);
int block_size = data.size() / data.dim(0);
auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();
CAFFE_ENFORCE(
block_bytesize == data.nbytes() / data.dim(0),
"block_bytesize should be consistent with data dim");
int N = indices.size();
auto src_base = static_cast<const float*>(data.raw_data());
const Index* idxs = indices.template data<Index>();
auto out = static_cast<float*>(output->raw_mutable_data(data.meta()));
GatherKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
src_base, out, idxs, N, block_size
);
return true;
}
REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>);
/**
* @brief Update slices of Y in-place with a batch of weighted X's.
* Y[idx] = alpha[b] * X[b][i] + Y[idx]
* i=0,...,N-1
* b=0,...,B-1
* idx=Indices[i]
*/
template <typename T_INDEX>
__global__ void AxpySliceKernel(
const float* weight0,
const TIndex N,
const TIndex B,
const TIndex slice_size,
const float** alpha,
const float** X,
const T_INDEX* Indices,
float* Y,
const TIndex M) {
// This implementation requires that the first weight is 1.0
CUDA_KERNEL_ASSERT(weight0[0] == 1.0);
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T_INDEX idx = Indices[i];
float* y_offset = Y + (idx * slice_size);
for (int b = 0; b < B; b++) {
float a = *alpha[b];
const float* x_offset = X[b] + (i * slice_size);
for (int j = threadIdx.x; j < slice_size; j += blockDim.x) {
atomicAdd(&y_offset[j], a * x_offset[j]);
}
}
}
}
template <>
bool ScatterWeightedSumOp<float, CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));
}
template <>
template <typename Index>
bool ScatterWeightedSumOp<float,CUDAContext>::DoRunWithType() {
DCHECK_EQ(InputSize() % 2, 1);
auto& X0 = Input(0);
auto& weight0 = Input(1);
auto& indices = Input(2);
auto* output = Output(0);
CAFFE_ENFORCE_EQ(&X0, output, "In place operation is required");
DCHECK_GT(X0.size(), 0);
DCHECK_GT(X0.ndim(), 0) << "X0 has to be at least the vector";
DCHECK_EQ(weight0.size(), 1);
TIndex M = X0.size();
TIndex N = X0.dim(0);
TIndex K = indices.size();
TIndex block_size = M / N;
T* data = output->template mutable_data<T>();
// In order to have all device pointers of x_i (and weight_i similarly)
// consecutively in device memory, copy pointers to a host vector and then
// copy back into a device array.
const TIndex B = (InputSize() - 3) / 2;
x_data_host_.Resize(B);
weights_host_.Resize(B);
x_data_device_.Resize(B);
weights_device_.Resize(B);
const float** x_data_host = x_data_host_.mutable_data<const float*>();
const float** weights_host = weights_host_.mutable_data<const float*>();
const float** x_data_device = x_data_device_.mutable_data<const float*>();
const float** weights_device = weights_device_.mutable_data<const float*>();
for (int inp = 3; inp < InputSize(); inp += 2) {
int idx = (inp - 3) / 2;
x_data_host[idx] = static_cast<const float*>(Input(inp).raw_data());
weights_host[idx] = static_cast<const float*>(Input(inp + 1).raw_data());
}
context_.Copy<const float*, CPUContext, CUDAContext>(
B, x_data_host, x_data_device);
context_.Copy<const float*, CPUContext, CUDAContext>(
B, weights_host, weights_device);
AxpySliceKernel<<<
std::min<TIndex>(K, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
weight0.template data<float>(),
K,
B,
block_size,
weights_device,
x_data_device,
indices.template data<Index>(),
data,
M);
return true;
}
REGISTER_CUDA_OPERATOR(
ScatterWeightedSum,
ScatterWeightedSumOp<float, CUDAContext>);
#if THRUST_VERSION >= 100800
__global__ void remap_kernel(
thrust::device_ptr<int> second_order,
thrust::device_ptr<int> order,
int* output,
int N,
int K) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= K)
return;
int idx = second_order[i];
output[order[idx]] = i;
// Maybe cuda 1D kernel?
for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {
output[order[idx]] = i;
}
return;
}
template <>
template <typename T>
void UniqueOp<CUDAContext>::DoRun() {
auto& inputTensor = Input(0);
// use dim32 to enforce that it's fine to have remapping of type int
int N = inputTensor.dim32(0);
CAFFE_ENFORCE_EQ(inputTensor.ndim(), 1, "Input should be a vector");
auto* uniqueTensor = Output(UNIQUE);
int* remapping = nullptr;
if (REMAPPING < OutputSize()) {
auto* remappingTensor = Output(REMAPPING);
remappingTensor->ResizeLike(inputTensor);
remapping = remappingTensor->template mutable_data<int>();
}
const T* input = inputTensor.template data<T>();
thrust_unique_buffer_.Resize(N);
auto* buffer = thrust_unique_buffer_.template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
inputTensor.meta(), N, input, buffer);
// Create two vector of {0, 1, ..., N-1} on CUDA device
thrust::device_vector<int> order1(N), order2(N);
thrust::sequence(
thrust::cuda::par.on(context_.cuda_stream()),
order1.begin(),
order1.end());
thrust::sequence(
thrust::cuda::par.on(context_.cuda_stream()),
order2.begin(),
order2.end());
// Sort the input along with order vector. So now we know where each element
// is permutated to. For example:
// input1 = 1,3,5,1,5,7,9
// order1 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,1,3,5,5,7,9
// order1 = 0,3,1,2,4,5,6
thrust::sort_by_key(
thrust::cuda::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order1.begin());
// Use consequent unique op to get another order_buffer
// input2 = 1,1,3,5,5,7,9
// order2 = 0,1,2,3,4,5,6
// Now we have:
// output = 1,3,5,7,9
// order2 = 0,2,3,5,6
auto new_last = thrust::unique_by_key(
thrust::cuda::par.on(context_.cuda_stream()),
buffer,
buffer + N,
order2.begin());
int K = new_last.first - buffer;
uniqueTensor->Resize(K);
T* unique = uniqueTensor->template mutable_data<T>();
context_.template CopyItems<CUDAContext, CUDAContext>(
thrust_unique_buffer_.meta(), K, buffer, unique);
// Compute the remapping. For example, for the number 1, if we look at
// order2[0] and order2[1], we know that input2[0:2) are all 1. They are all
// remapped to 0 in final input. And from order1, we know where they come
// from. The rest is easy.
if (remapping != nullptr) {
// record remap
remap_kernel<<<
CAFFE_GET_BLOCKS(K),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
order2.data(), order1.data(), remapping, N, K);
}
}
namespace {
REGISTER_CUDA_OPERATOR(Unique, UniqueOp<CUDAContext>);
} // namespace
#endif // THRUST_VERSION >= 100800
} // namespace caffe2
|
a27580e32132419110e30d5ca15d619368e98ab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUAPI.h"
#include "CUFLU.h"
#ifdef GPU
extern real (*d_Flu_Array_F_In )[FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ];
extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ PS2*PS2*PS2 ];
extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ PS2*PS2 ];
extern double (*d_Corner_Array_F)[3];
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_F_Out)[ PS2*PS2*PS2 ];
#endif
// global memory arrays in different models
#if ( MODEL == HYDRO )
#if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
extern real (*d_PriVar) [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ];
extern real (*d_Slope_PPM_x)[NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM ];
extern real (*d_Slope_PPM_y)[NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM ];
extern real (*d_Slope_PPM_z)[NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM ];
extern real (*d_FC_Var_xL) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_xR) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_yL) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_yR) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_zL) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_zR) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Flux_x) [NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ];
extern real (*d_FC_Flux_y) [NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ];
extern real (*d_FC_Flux_z) [NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ];
#endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
#elif ( MODEL == MHD )
#warning : WAIT MHD !!!
#elif ( MODEL != ELBDM )
#warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
#endif // MODEL
extern hipStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemFree_Fluid
// Description : Free the GPU and CPU memory previously allocated by CUAPI_MemAllocate_Fluid()
//
// Parameter : GPU_NStream : Number of CUDA streams for the asynchronous memory copy
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemFree_Fluid( const int GPU_NStream )
{
// free the device memory (in all models)
if ( d_Flu_Array_F_In != NULL ) CUDA_CHECK_ERROR( hipFree( d_Flu_Array_F_In ) );
if ( d_Flu_Array_F_Out != NULL ) CUDA_CHECK_ERROR( hipFree( d_Flu_Array_F_Out ) );
if ( d_Flux_Array != NULL ) CUDA_CHECK_ERROR( hipFree( d_Flux_Array ) );
if ( d_Corner_Array_F != NULL ) CUDA_CHECK_ERROR( hipFree( d_Corner_Array_F ) );
# ifdef DUAL_ENERGY
if ( d_DE_Array_F_Out != NULL ) CUDA_CHECK_ERROR( hipFree( d_DE_Array_F_Out ) );
# endif
d_Flu_Array_F_In = NULL;
d_Flu_Array_F_Out = NULL;
d_Flux_Array = NULL;
d_Corner_Array_F = NULL;
# ifdef DUAL_ENERGY
d_DE_Array_F_Out = NULL;
# endif
// free the device memory (in different models)
# if ( MODEL == HYDRO )
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
if ( d_PriVar != NULL ) CUDA_CHECK_ERROR( hipFree( d_PriVar ) );
if ( d_Slope_PPM_x != NULL ) CUDA_CHECK_ERROR( hipFree( d_Slope_PPM_x ) );
if ( d_Slope_PPM_y != NULL ) CUDA_CHECK_ERROR( hipFree( d_Slope_PPM_y ) );
if ( d_Slope_PPM_z != NULL ) CUDA_CHECK_ERROR( hipFree( d_Slope_PPM_z ) );
if ( d_FC_Var_xL != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Var_xL ) );
if ( d_FC_Var_xR != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Var_xR ) );
if ( d_FC_Var_yL != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Var_yL ) );
if ( d_FC_Var_yR != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Var_yR ) );
if ( d_FC_Var_zL != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Var_zL ) );
if ( d_FC_Var_zR != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Var_zR ) );
if ( d_FC_Flux_x != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Flux_x ) );
if ( d_FC_Flux_y != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Flux_y ) );
if ( d_FC_Flux_z != NULL ) CUDA_CHECK_ERROR( hipFree( d_FC_Flux_z ) );
d_PriVar = NULL;
d_Slope_PPM_x = NULL;
d_Slope_PPM_y = NULL;
d_Slope_PPM_z = NULL;
d_FC_Var_xL = NULL;
d_FC_Var_xR = NULL;
d_FC_Var_yL = NULL;
d_FC_Var_yR = NULL;
d_FC_Var_zL = NULL;
d_FC_Var_zR = NULL;
d_FC_Flux_x = NULL;
d_FC_Flux_y = NULL;
d_FC_Flux_z = NULL;
# endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# elif ( MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif // MODEL
// free the host memory allocated by CUDA
for (int t=0; t<2; t++)
{
if ( h_Flu_Array_F_In [t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_F_In [t] ) );
if ( h_Flu_Array_F_Out[t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_F_Out[t] ) );
if ( h_Flux_Array [t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_Flux_Array [t] ) );
if ( h_Corner_Array_F [t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_Corner_Array_F [t] ) );
# ifdef DUAL_ENERGY
if ( h_DE_Array_F_Out [t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_DE_Array_F_Out [t] ) );
# endif
h_Flu_Array_F_In [t] = NULL;
h_Flu_Array_F_Out [t] = NULL;
h_Flux_Array [t] = NULL;
h_Corner_Array_F [t] = NULL;
# ifdef DUAL_ENERGY
h_DE_Array_F_Out [t] = NULL;
# endif
} // for (int t=0; t<2; t++)
// destroy streams
if ( Stream != NULL )
{
for (int s=0; s<GPU_NStream; s++)
{
CUDA_CHECK_ERROR( hipStreamDestroy( Stream[s] ) );
}
delete [] Stream;
Stream = NULL;
}
} // FUNCTION : CUAPI_MemFree_Fluid
#endif // #ifdef GPU
| a27580e32132419110e30d5ca15d619368e98ab2.cu | #include "CUAPI.h"
#include "CUFLU.h"
#ifdef GPU
extern real (*d_Flu_Array_F_In )[FLU_NIN ][ FLU_NXT*FLU_NXT*FLU_NXT ];
extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ PS2*PS2*PS2 ];
extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ PS2*PS2 ];
extern double (*d_Corner_Array_F)[3];
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_F_Out)[ PS2*PS2*PS2 ];
#endif
// global memory arrays in different models
#if ( MODEL == HYDRO )
#if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
extern real (*d_PriVar) [NCOMP_TOTAL][ FLU_NXT*FLU_NXT*FLU_NXT ];
extern real (*d_Slope_PPM_x)[NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM ];
extern real (*d_Slope_PPM_y)[NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM ];
extern real (*d_Slope_PPM_z)[NCOMP_TOTAL][ N_SLOPE_PPM*N_SLOPE_PPM*N_SLOPE_PPM ];
extern real (*d_FC_Var_xL) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_xR) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_yL) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_yR) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_zL) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Var_zR) [NCOMP_TOTAL][ N_FC_VAR*N_FC_VAR*N_FC_VAR ];
extern real (*d_FC_Flux_x) [NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ];
extern real (*d_FC_Flux_y) [NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ];
extern real (*d_FC_Flux_z) [NCOMP_TOTAL][ N_FC_FLUX*N_FC_FLUX*N_FC_FLUX ];
#endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
#elif ( MODEL == MHD )
#warning : WAIT MHD !!!
#elif ( MODEL != ELBDM )
#warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
#endif // MODEL
extern cudaStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemFree_Fluid
// Description : Free the GPU and CPU memory previously allocated by CUAPI_MemAllocate_Fluid()
//
// Parameter : GPU_NStream : Number of CUDA streams for the asynchronous memory copy
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemFree_Fluid( const int GPU_NStream )
{
// free the device memory (in all models)
if ( d_Flu_Array_F_In != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_F_In ) );
if ( d_Flu_Array_F_Out != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_F_Out ) );
if ( d_Flux_Array != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Flux_Array ) );
if ( d_Corner_Array_F != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Corner_Array_F ) );
# ifdef DUAL_ENERGY
if ( d_DE_Array_F_Out != NULL ) CUDA_CHECK_ERROR( cudaFree( d_DE_Array_F_Out ) );
# endif
d_Flu_Array_F_In = NULL;
d_Flu_Array_F_Out = NULL;
d_Flux_Array = NULL;
d_Corner_Array_F = NULL;
# ifdef DUAL_ENERGY
d_DE_Array_F_Out = NULL;
# endif
// free the device memory (in different models)
# if ( MODEL == HYDRO )
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
if ( d_PriVar != NULL ) CUDA_CHECK_ERROR( cudaFree( d_PriVar ) );
if ( d_Slope_PPM_x != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Slope_PPM_x ) );
if ( d_Slope_PPM_y != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Slope_PPM_y ) );
if ( d_Slope_PPM_z != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Slope_PPM_z ) );
if ( d_FC_Var_xL != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Var_xL ) );
if ( d_FC_Var_xR != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Var_xR ) );
if ( d_FC_Var_yL != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Var_yL ) );
if ( d_FC_Var_yR != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Var_yR ) );
if ( d_FC_Var_zL != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Var_zL ) );
if ( d_FC_Var_zR != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Var_zR ) );
if ( d_FC_Flux_x != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Flux_x ) );
if ( d_FC_Flux_y != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Flux_y ) );
if ( d_FC_Flux_z != NULL ) CUDA_CHECK_ERROR( cudaFree( d_FC_Flux_z ) );
d_PriVar = NULL;
d_Slope_PPM_x = NULL;
d_Slope_PPM_y = NULL;
d_Slope_PPM_z = NULL;
d_FC_Var_xL = NULL;
d_FC_Var_xR = NULL;
d_FC_Var_yL = NULL;
d_FC_Var_yR = NULL;
d_FC_Var_zL = NULL;
d_FC_Var_zR = NULL;
d_FC_Flux_x = NULL;
d_FC_Flux_y = NULL;
d_FC_Flux_z = NULL;
# endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# elif ( MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif // MODEL
// free the host memory allocated by CUDA
for (int t=0; t<2; t++)
{
if ( h_Flu_Array_F_In [t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_F_In [t] ) );
if ( h_Flu_Array_F_Out[t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_F_Out[t] ) );
if ( h_Flux_Array [t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_Flux_Array [t] ) );
if ( h_Corner_Array_F [t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_Corner_Array_F [t] ) );
# ifdef DUAL_ENERGY
if ( h_DE_Array_F_Out [t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_DE_Array_F_Out [t] ) );
# endif
h_Flu_Array_F_In [t] = NULL;
h_Flu_Array_F_Out [t] = NULL;
h_Flux_Array [t] = NULL;
h_Corner_Array_F [t] = NULL;
# ifdef DUAL_ENERGY
h_DE_Array_F_Out [t] = NULL;
# endif
} // for (int t=0; t<2; t++)
// destroy streams
if ( Stream != NULL )
{
for (int s=0; s<GPU_NStream; s++)
{
CUDA_CHECK_ERROR( cudaStreamDestroy( Stream[s] ) );
}
delete [] Stream;
Stream = NULL;
}
} // FUNCTION : CUAPI_MemFree_Fluid
#endif // #ifdef GPU
|
19ef837fcecbaa33b201c96c432719ed3de8fba6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <math.h>
#include <hipcub/hipcub.hpp>
#include "open3d/core/nns/FixedRadiusSearch.h"
#include "open3d/core/nns/MemoryAllocation.h"
#include "open3d/core/nns/NeighborSearchCommon.h"
#include "open3d/utility/Helper.h"
#include "open3d/utility/MiniVec.h"
namespace open3d {
namespace core {
namespace nns {
namespace {
template <class T>
using Vec3 = utility::MiniVec<T, 3>;
/// Computes the distance of two points and tests if the distance is below a
/// threshold.
///
/// \tparam METRIC The distance metric. One of L1, L2, Linf.
/// \tparam T Floating point type for the distances.
///
/// \param p1 A 3D point
/// \param p2 Another 3D point
/// \param dist Output parameter for the distance.
/// \param threshold The scalar threshold.
///
/// \return Returns true if the distance is <= threshold.
///
template <int METRIC = L2, class T>
inline __device__ bool NeighborTest(const Vec3<T>& p1,
const Vec3<T>& p2,
T* dist,
T threshold) {
bool result = false;
if (METRIC == Linf) {
Vec3<T> d = (p1 - p2).abs();
*dist = d[0] > d[1] ? d[0] : d[1];
*dist = *dist > d[2] ? *dist : d[2];
} else if (METRIC == L1) {
Vec3<T> d = (p1 - p2).abs();
*dist = (d[0] + d[1] + d[2]);
} else {
Vec3<T> d = p1 - p2;
*dist = d.dot(d);
}
result = *dist <= threshold;
return result;
}
/// Kernel for CountHashTableEntries
template <class T>
__global__ void CountHashTableEntriesKernel(uint32_t* count_table,
size_t hash_table_size,
T inv_voxel_size,
const T* const __restrict__ points,
size_t num_points) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_points) return;
Vec3<T> pos(&points[idx * 3]);
Vec3<int> voxel_index = ComputeVoxelIndex(pos, inv_voxel_size);
size_t hash = SpatialHash(voxel_index) % hash_table_size;
atomicAdd(&count_table[hash + 1], 1);
}
/// Counts for each hash entry the number of points that map to this entry.
///
/// \param count_table Pointer to the table for counting.
/// The first element will not be used, i.e. the
/// number of points for the first hash entry is in count_table[1].
/// This array must be initialized before calling this function.
///
/// \param count_table_size This is the size of the hash table + 1.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
template <class T>
void CountHashTableEntries(const hipStream_t& stream,
uint32_t* count_table,
size_t count_table_size,
T inv_voxel_size,
const T* points,
size_t num_points) {
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_points, block.x);
if (grid.x)
hipLaunchKernelGGL(( CountHashTableEntriesKernel<T>), dim3(grid), dim3(block), 0, stream,
count_table, count_table_size - 1, inv_voxel_size, points,
num_points);
}
/// Kernel for ComputePointIndexTable
template <class T>
__global__ void ComputePointIndexTableKernel(
int64_t* __restrict__ point_index_table,
uint32_t* __restrict__ count_tmp,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
T inv_voxel_size,
const T* const __restrict__ points,
const size_t points_start_idx,
const size_t points_end_idx) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x + points_start_idx;
if (idx >= points_end_idx) return;
Vec3<T> pos(&points[idx * 3]);
Vec3<int> voxel_index = ComputeVoxelIndex(pos, inv_voxel_size);
size_t hash = SpatialHash(voxel_index[0], voxel_index[1], voxel_index[2]) %
hash_table_size;
point_index_table[hash_table_cell_splits[hash] +
atomicAdd(&count_tmp[hash], 1)] = idx;
}
/// Writes the index of the points to the hash cells.
///
/// \param point_index_table The output array storing the point indices for
/// all cells. Start and end of each cell is defined by
/// \p hash_table_prefix_sum
///
/// \param count_tmp Temporary memory of size \p hash_table_cell_splits_size
/// .
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size The size of the hash table.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
template <class T>
void ComputePointIndexTable(
const hipStream_t& stream,
int64_t* __restrict__ point_index_table,
uint32_t* __restrict__ count_tmp,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_cell_splits_size,
T inv_voxel_size,
const T* const __restrict__ points,
size_t points_start_idx,
size_t points_end_idx) {
hipMemsetAsync(count_tmp, 0,
sizeof(uint32_t) * hash_table_cell_splits_size, stream);
size_t num_points = points_end_idx - points_start_idx;
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_points, block.x);
if (grid.x)
hipLaunchKernelGGL(( ComputePointIndexTableKernel<T>), dim3(grid), dim3(block), 0, stream,
point_index_table, count_tmp, hash_table_cell_splits,
hash_table_cell_splits_size - 1, inv_voxel_size, points,
points_start_idx, points_end_idx);
}
/// Kernel for CountNeighbors
template <int METRIC, class T>
__global__ void CountNeighborsKernel(
int64_t* __restrict__ neighbors_count,
const int64_t* const __restrict__ point_index_table,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
const T* const __restrict__ query_points,
size_t num_queries,
const T* const __restrict__ points,
const T inv_voxel_size,
const T radius,
const T threshold) {
int query_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (query_idx >= num_queries) return;
int count = 0; // counts the number of neighbors for this query point
Vec3<T> query_pos(query_points[query_idx * 3 + 0],
query_points[query_idx * 3 + 1],
query_points[query_idx * 3 + 2]);
Vec3<int> voxel_index = ComputeVoxelIndex(query_pos, inv_voxel_size);
int hash = SpatialHash(voxel_index[0], voxel_index[1], voxel_index[2]) %
hash_table_size;
int bins_to_visit[8] = {hash, -1, -1, -1, -1, -1, -1, -1};
for (int dz = -1; dz <= 1; dz += 2)
for (int dy = -1; dy <= 1; dy += 2)
for (int dx = -1; dx <= 1; dx += 2) {
Vec3<T> p = query_pos + radius * Vec3<T>(T(dx), T(dy), T(dz));
voxel_index = ComputeVoxelIndex(p, inv_voxel_size);
hash = SpatialHash(voxel_index[0], voxel_index[1],
voxel_index[2]) %
hash_table_size;
// insert without duplicates
for (int i = 0; i < 8; ++i) {
if (bins_to_visit[i] == hash) {
break;
} else if (bins_to_visit[i] == -1) {
bins_to_visit[i] = hash;
break;
}
}
}
for (int bin_i = 0; bin_i < 8; ++bin_i) {
int bin = bins_to_visit[bin_i];
if (bin == -1) break;
size_t begin_idx = hash_table_cell_splits[bin];
size_t end_idx = hash_table_cell_splits[bin + 1];
for (size_t j = begin_idx; j < end_idx; ++j) {
int64_t idx = point_index_table[j];
Vec3<T> p(&points[idx * 3 + 0]);
T dist;
if (NeighborTest<METRIC>(p, query_pos, &dist, threshold)) ++count;
}
}
neighbors_count[query_idx] = count;
}
/// Count the number of neighbors for each query point
///
/// \param neighbors_count Output array for counting the number of neighbors.
/// The size of the array is \p num_queries.
///
/// \param point_index_table The array storing the point indices for all
/// cells. Start and end of each cell is defined by \p
/// hash_table_cell_splits
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size This is the length of the
/// hash_table_cell_splits array.
///
/// \param query_points Array with the 3D query positions. This may be the
/// same array as \p points.
///
/// \param num_queries The number of query points.
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param radius The search radius.
///
/// \param metric One of L1, L2, Linf. Defines the distance metric for the
/// search.
///
/// \param ignore_query_point If true then points with the same position as
/// the query point will be ignored.
///
template <class T>
void CountNeighbors(const hipStream_t& stream,
int64_t* neighbors_count,
const int64_t* const point_index_table,
const int64_t* const hash_table_cell_splits,
size_t hash_table_cell_splits_size,
const T* const query_points,
size_t num_queries,
const T* const points,
const T inv_voxel_size,
const T radius,
const Metric metric) {
// const bool ignore_query_point) {
const T threshold = (metric == L2 ? radius * radius : radius);
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_queries, block.x);
if (grid.x) {
#define FN_PARAMETERS \
neighbors_count, point_index_table, hash_table_cell_splits, \
hash_table_cell_splits_size - 1, query_points, num_queries, \
points, inv_voxel_size, radius, threshold
#define CALL_TEMPLATE(METRIC) \
if (METRIC == metric) { \
hipLaunchKernelGGL(( CountNeighborsKernel<METRIC, T>) \
, dim3(grid), dim3(block), 0, stream, FN_PARAMETERS); \
}
CALL_TEMPLATE(L1)
CALL_TEMPLATE(L2)
CALL_TEMPLATE(Linf)
#undef CALL_TEMPLATE
#undef FN_PARAMETERS
}
}
/// Kernel for WriteNeighborsIndicesAndDistances
template <class T, int METRIC, bool RETURN_DISTANCES>
__global__ void WriteNeighborsIndicesAndDistancesKernel(
int64_t* __restrict__ indices,
T* __restrict__ distances,
const int64_t* const __restrict__ neighbors_row_splits,
const int64_t* const __restrict__ point_index_table,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
const T* const __restrict__ query_points,
size_t num_queries,
const T* const __restrict__ points,
const T inv_voxel_size,
const T radius,
const T threshold) {
int query_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (query_idx >= num_queries) return;
int count = 0; // counts the number of neighbors for this query point
size_t indices_offset = neighbors_row_splits[query_idx];
Vec3<T> query_pos(query_points[query_idx * 3 + 0],
query_points[query_idx * 3 + 1],
query_points[query_idx * 3 + 2]);
Vec3<int> voxel_index = ComputeVoxelIndex(query_pos, inv_voxel_size);
int hash = SpatialHash(voxel_index) % hash_table_size;
int bins_to_visit[8] = {hash, -1, -1, -1, -1, -1, -1, -1};
for (int dz = -1; dz <= 1; dz += 2) {
for (int dy = -1; dy <= 1; dy += 2) {
for (int dx = -1; dx <= 1; dx += 2) {
Vec3<T> p = query_pos + radius * Vec3<T>(T(dx), T(dy), T(dz));
voxel_index = ComputeVoxelIndex(p, inv_voxel_size);
hash = SpatialHash(voxel_index) % hash_table_size;
// insert without duplicates
for (int i = 0; i < 8; ++i) {
if (bins_to_visit[i] == hash) {
break;
} else if (bins_to_visit[i] == -1) {
bins_to_visit[i] = hash;
break;
}
}
}
}
}
for (int bin_i = 0; bin_i < 8; ++bin_i) {
int bin = bins_to_visit[bin_i];
if (bin == -1) break;
size_t begin_idx = hash_table_cell_splits[bin];
size_t end_idx = hash_table_cell_splits[bin + 1];
for (size_t j = begin_idx; j < end_idx; ++j) {
int64_t idx = point_index_table[j];
Vec3<T> p(&points[idx * 3 + 0]);
T dist;
if (NeighborTest<METRIC>(p, query_pos, &dist, threshold)) {
indices[indices_offset + count] = idx;
if (RETURN_DISTANCES) {
distances[indices_offset + count] = dist;
}
++count;
}
}
}
}
/// Write indices and distances of neighbors for each query point
///
/// \param indices Output array with the neighbors indices.
///
/// \param distances Output array with the neighbors distances. May be null
/// if return_distances is false.
///
/// \param neighbors_row_splits This is the prefix sum which describes
/// start and end of the neighbors and distances for each query point.
///
/// \param point_index_table The array storing the point indices for all
/// cells. Start and end of each cell is defined by \p
/// hash_table_cell_splits
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size This is the length of the
/// hash_table_cell_splits array.
///
/// \param query_points Array with the 3D query positions. This may be the
/// same array as \p points.
///
/// \param num_queries The number of query points.
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param radius The search radius.
///
/// \param metric One of L1, L2, Linf. Defines the distance metric for the
/// search.
///
/// \param ignore_query_point If true then points with the same position as
/// the query point will be ignored.
///
/// \param return_distances If true then this function will return the
/// distances for each neighbor to its query point in the same format
/// as the indices.
/// Note that for the L2 metric the squared distances will be returned!!
template <class T>
void WriteNeighborsIndicesAndDistances(
const hipStream_t& stream,
int64_t* indices,
T* distances,
const int64_t* const neighbors_row_splits,
const int64_t* const point_index_table,
const int64_t* const hash_table_cell_splits,
size_t hash_table_cell_splits_size,
const T* const query_points,
size_t num_queries,
const T* const points,
const T inv_voxel_size,
const T radius,
const Metric metric,
const bool return_distances) {
const T threshold = (metric == L2 ? radius * radius : radius);
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_queries, block.x);
if (grid.x) {
#define FN_PARAMETERS \
indices, distances, neighbors_row_splits, point_index_table, \
hash_table_cell_splits, hash_table_cell_splits_size - 1, \
query_points, num_queries, points, inv_voxel_size, radius, \
threshold
#define CALL_TEMPLATE(METRIC, RETURN_DISTANCES) \
if (METRIC == metric && RETURN_DISTANCES == return_distances) { \
hipLaunchKernelGGL(( WriteNeighborsIndicesAndDistancesKernel<T, METRIC, RETURN_DISTANCES>) \
, dim3(grid), dim3(block), 0, stream, FN_PARAMETERS); \
}
#define CALL_TEMPLATE2(METRIC) \
CALL_TEMPLATE(METRIC, true) \
CALL_TEMPLATE(METRIC, false)
#define CALL_TEMPLATE3 \
CALL_TEMPLATE2(L1) \
CALL_TEMPLATE2(L2) \
CALL_TEMPLATE2(Linf)
CALL_TEMPLATE3
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef FN_PARAMETERS
}
}
/// Kernel for WriteNeighborsHybrid
template <class T, int METRIC, bool RETURN_DISTANCES>
__global__ void WriteNeighborsHybridKernel(
int64_t* __restrict__ indices,
T* __restrict__ distances,
const int64_t* const __restrict__ point_index_table,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
const T* const __restrict__ query_points,
size_t num_queries,
const T* const __restrict__ points,
const T inv_voxel_size,
const T radius,
const T threshold,
const int max_knn) {
int query_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (query_idx >= num_queries) return;
int count = 0; // counts the number of neighbors for this query point
size_t indices_offset = max_knn * query_idx;
Vec3<T> query_pos(query_points[query_idx * 3 + 0],
query_points[query_idx * 3 + 1],
query_points[query_idx * 3 + 2]);
Vec3<int> voxel_index = ComputeVoxelIndex(query_pos, inv_voxel_size);
int hash = SpatialHash(voxel_index) % hash_table_size;
int bins_to_visit[8] = {hash, -1, -1, -1, -1, -1, -1, -1};
for (int dz = -1; dz <= 1; dz += 2) {
for (int dy = -1; dy <= 1; dy += 2) {
for (int dx = -1; dx <= 1; dx += 2) {
Vec3<T> p = query_pos + radius * Vec3<T>(T(dx), T(dy), T(dz));
voxel_index = ComputeVoxelIndex(p, inv_voxel_size);
hash = SpatialHash(voxel_index) % hash_table_size;
// insert without duplicates
for (int i = 0; i < 8; ++i) {
if (bins_to_visit[i] == hash) {
break;
} else if (bins_to_visit[i] == -1) {
bins_to_visit[i] = hash;
break;
}
}
}
}
}
int max_index;
T max_value;
for (int bin_i = 0; bin_i < 8; ++bin_i) {
int bin = bins_to_visit[bin_i];
if (bin == -1) break;
size_t begin_idx = hash_table_cell_splits[bin];
size_t end_idx = hash_table_cell_splits[bin + 1];
for (size_t j = begin_idx; j < end_idx; ++j) {
int64_t idx = point_index_table[j];
Vec3<T> p(&points[idx * 3 + 0]);
T dist;
if (NeighborTest<METRIC>(p, query_pos, &dist, threshold)) {
// If count if less than max_knn, record idx and dist.
if (count < max_knn) {
indices[indices_offset + count] = idx;
distances[indices_offset + count] = dist;
// Update max_index and max_value.
if (count == 0 || max_value < dist) {
max_index = count;
max_value = dist;
}
// Increase count
++count;
} else {
// If dist is smaller than current max_value.
if (max_value > dist) {
// Replace idx and dist at current max_index.
indices[indices_offset + max_index] = idx;
distances[indices_offset + max_index] = dist;
// Update max_value
max_value = dist;
// Find max_index.
for (auto k = 0; k < max_knn; ++k) {
if (distances[indices_offset + k] > max_value) {
max_index = k;
max_value = distances[indices_offset + k];
}
}
}
}
}
}
}
// bubble sort
for (int i = 0; i < count - 1; ++i) {
for (int j = 0; j < count - i - 1; ++j) {
if (distances[indices_offset + j] >
distances[indices_offset + j + 1]) {
T dist_tmp = distances[indices_offset + j];
int64_t ind_tmp = indices[indices_offset + j];
distances[indices_offset + j] =
distances[indices_offset + j + 1];
indices[indices_offset + j] = indices[indices_offset + j + 1];
distances[indices_offset + j + 1] = dist_tmp;
indices[indices_offset + j + 1] = ind_tmp;
}
}
}
}
/// Write indices and distances for each query point in hybrid search mode.
///
/// \param indices Output array with the neighbors indices.
///
/// \param distances Output array with the neighbors distances. May be null
/// if return_distances is false.
///
/// \param point_index_table The array storing the point indices for all
/// cells. Start and end of each cell is defined by \p
/// hash_table_cell_splits
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size This is the length of the
/// hash_table_cell_splits array.
///
/// \param query_points Array with the 3D query positions. This may be the
/// same array as \p points.
///
/// \param num_queries The number of query points.
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param radius The search radius.
///
/// \param metric One of L1, L2, Linf. Defines the distance metric for the
/// search.
///
/// \param ignore_query_point If true then points with the same position as
/// the query point will be ignored.
///
/// \param return_distances If true then this function will return the
/// distances for each neighbor to its query point in the same format
/// as the indices.
/// Note that for the L2 metric the squared distances will be returned!!
template <class T>
void WriteNeighborsHybrid(const hipStream_t& stream,
int64_t* indices,
T* distances,
const int64_t* const point_index_table,
const int64_t* const hash_table_cell_splits,
size_t hash_table_cell_splits_size,
const T* const query_points,
size_t num_queries,
const T* const points,
const T inv_voxel_size,
const T radius,
const int max_knn,
const Metric metric,
const bool return_distances) {
const T threshold = (metric == L2 ? radius * radius : radius);
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_queries, block.x);
if (grid.x) {
#define FN_PARAMETERS \
indices, distances, point_index_table, hash_table_cell_splits, \
hash_table_cell_splits_size - 1, query_points, num_queries, \
points, inv_voxel_size, radius, threshold, max_knn
#define CALL_TEMPLATE(METRIC, RETURN_DISTANCES) \
if (METRIC == metric && RETURN_DISTANCES == return_distances) { \
hipLaunchKernelGGL(( WriteNeighborsHybridKernel<T, METRIC, RETURN_DISTANCES>) \
, dim3(grid), dim3(block), 0, stream, FN_PARAMETERS); \
}
#define CALL_TEMPLATE2(METRIC) \
CALL_TEMPLATE(METRIC, true) \
CALL_TEMPLATE(METRIC, false)
#define CALL_TEMPLATE3 \
CALL_TEMPLATE2(L1) \
CALL_TEMPLATE2(L2) \
CALL_TEMPLATE2(Linf)
CALL_TEMPLATE3
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef FN_PARAMETERS
}
}
} // namespace
template <class T>
void BuildSpatialHashTableCUDA(void* temp,
size_t& temp_size,
const size_t num_points,
const T* const points,
const T radius,
const size_t points_row_splits_size,
const int64_t* points_row_splits,
const int64_t* hash_table_splits,
const size_t hash_table_cell_splits_size,
int64_t* hash_table_cell_splits,
int64_t* hash_table_index) {
const bool get_temp_size = !temp;
const hipStream_t stream = 0;
int texture_alignment = 512;
if (get_temp_size) {
temp = (char*)1; // worst case pointer alignment
temp_size = std::numeric_limits<int64_t>::max();
}
MemoryAllocation mem_temp(temp, temp_size, texture_alignment);
std::pair<uint32_t*, size_t> count_tmp =
mem_temp.Alloc<uint32_t>(hash_table_cell_splits_size);
const int batch_size = points_row_splits_size - 1;
const T voxel_size = 2 * radius;
const T inv_voxel_size = 1 / voxel_size;
// count number of points per hash entry
if (!get_temp_size) {
hipMemsetAsync(count_tmp.first, 0, sizeof(uint32_t) * count_tmp.second,
stream);
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const size_t num_points_i =
points_row_splits[i + 1] - points_row_splits[i];
const T* const points_i = points + 3 * points_row_splits[i];
CountHashTableEntries(stream, count_tmp.first + first_cell_idx,
hash_table_size + 1, inv_voxel_size, points_i,
num_points_i);
}
}
// compute prefix sum of the hash entry counts and store in
// hash_table_cell_splits
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
hipcub::DeviceScan::InclusiveSum(inclusive_scan_temp.first,
inclusive_scan_temp.second,
count_tmp.first, hash_table_cell_splits,
count_tmp.second, stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
hipcub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
count_tmp.first, hash_table_cell_splits, count_tmp.second,
stream);
}
mem_temp.Free(inclusive_scan_temp);
}
// now compute the global indices which allows us to lookup the point index
// for the entries in the hash cell
if (!get_temp_size) {
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const size_t points_start_idx = points_row_splits[i];
const size_t points_end_idx = points_row_splits[i + 1];
ComputePointIndexTable(stream, hash_table_index, count_tmp.first,
hash_table_cell_splits + first_cell_idx,
hash_table_size + 1, inv_voxel_size, points,
points_start_idx, points_end_idx);
}
}
mem_temp.Free(count_tmp);
if (get_temp_size) {
// return the memory peak as the required temporary memory size.
temp_size = mem_temp.MaxUsed();
return;
}
}
template <class T>
void SortPairs(void* temp,
size_t& temp_size,
int64_t num_indices,
int64_t num_segments,
const int64_t* query_neighbors_row_splits,
int64_t* indices_unsorted,
T* distances_unsorted,
int64_t* indices_sorted,
T* distances_sorted) {
const bool get_temp_size = !temp;
int texture_alignment = 512;
if (get_temp_size) {
temp = (char*)1; // worst case pointer alignment
temp_size = std::numeric_limits<int64_t>::max();
}
MemoryAllocation mem_temp(temp, temp_size, texture_alignment);
std::pair<void*, size_t> sort_temp(nullptr, 0);
hipcub::DeviceSegmentedRadixSort::SortPairs(
sort_temp.first, sort_temp.second, distances_unsorted,
distances_sorted, indices_unsorted, indices_sorted, num_indices,
num_segments, query_neighbors_row_splits,
query_neighbors_row_splits + 1);
sort_temp = mem_temp.Alloc(sort_temp.second);
if (!get_temp_size) {
hipcub::DeviceSegmentedRadixSort::SortPairs(
sort_temp.first, sort_temp.second, distances_unsorted,
distances_sorted, indices_unsorted, indices_sorted, num_indices,
num_segments, query_neighbors_row_splits,
query_neighbors_row_splits + 1);
}
mem_temp.Free(sort_temp);
if (get_temp_size) {
// return the memory peak as the required temporary memory size.
temp_size = mem_temp.MaxUsed();
return;
}
}
template <class T>
void FixedRadiusSearchCUDA(void* temp,
size_t& temp_size,
int64_t* query_neighbors_row_splits,
size_t num_points,
const T* const points,
size_t num_queries,
const T* const queries,
const T radius,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<T>& output_allocator) {
const bool get_temp_size = !temp;
const hipStream_t stream = 0;
int texture_alignment = 512;
const Metric metric = Metric::L2;
if (get_temp_size) {
temp = (char*)1; // worst case pointer alignment
temp_size = std::numeric_limits<int64_t>::max();
}
// return empty output arrays if there are no points
if ((0 == num_points || 0 == num_queries) && !get_temp_size) {
hipMemsetAsync(query_neighbors_row_splits, 0,
sizeof(int64_t) * (num_queries + 1), stream);
int64_t* indices_ptr;
output_allocator.AllocIndices(&indices_ptr, 0);
T* distances_ptr;
output_allocator.AllocDistances(&distances_ptr, 0);
return;
}
MemoryAllocation mem_temp(temp, temp_size, texture_alignment);
const int batch_size = points_row_splits_size - 1;
const T voxel_size = 2 * radius;
const T inv_voxel_size = 1 / voxel_size;
std::pair<int64_t*, size_t> query_neighbors_count =
mem_temp.Alloc<int64_t>(num_queries);
// we need this value to compute the size of the index array
if (!get_temp_size) {
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const size_t queries_start_idx = queries_row_splits[i];
const T* const queries_i = queries + 3 * queries_row_splits[i];
const size_t num_queries_i =
queries_row_splits[i + 1] - queries_row_splits[i];
CountNeighbors(
stream, query_neighbors_count.first + queries_start_idx,
hash_table_index, hash_table_cell_splits + first_cell_idx,
hash_table_size + 1, queries_i, num_queries_i, points,
inv_voxel_size, radius, metric);
}
}
// we need this value to compute the size of the index array
int64_t last_prefix_sum_entry = 0;
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
hipcub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
query_neighbors_count.first, query_neighbors_row_splits + 1,
num_queries, stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
// set first element to zero
hipMemsetAsync(query_neighbors_row_splits, 0, sizeof(int64_t),
stream);
hipcub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
query_neighbors_count.first, query_neighbors_row_splits + 1,
num_queries, stream);
// get the last value
hipMemcpyAsync(&last_prefix_sum_entry,
query_neighbors_row_splits + num_queries,
sizeof(int64_t), hipMemcpyDeviceToHost, stream);
// wait for the async copies
while (hipErrorNotReady == hipStreamQuery(stream)) { /*empty*/
}
}
mem_temp.Free(inclusive_scan_temp);
}
mem_temp.Free(query_neighbors_count);
if (get_temp_size) {
// return the memory peak as the required temporary memory size.
temp_size = mem_temp.MaxUsed();
return;
}
if (!get_temp_size) {
// allocate the output array for the neighbor indices
const size_t num_indices = last_prefix_sum_entry;
int64_t* indices_ptr;
T* distances_ptr;
output_allocator.AllocIndices(&indices_ptr, num_indices);
output_allocator.AllocDistances(&distances_ptr, num_indices);
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const T* const queries_i = queries + 3 * queries_row_splits[i];
const size_t num_queries_i =
queries_row_splits[i + 1] - queries_row_splits[i];
WriteNeighborsIndicesAndDistances(
stream, indices_ptr, distances_ptr,
query_neighbors_row_splits + queries_row_splits[i],
hash_table_index, hash_table_cell_splits + first_cell_idx,
hash_table_size + 1, queries_i, num_queries_i, points,
inv_voxel_size, radius, metric, true);
}
}
}
//// Hybrid Search
template <class T>
void HybridSearchCUDA(size_t num_points,
const T* const points,
size_t num_queries,
const T* const queries,
const T radius,
const int max_knn,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<T>& output_allocator) {
const hipStream_t stream = 0;
const Metric metric = Metric::L2;
// return empty output arrays if there are no points
if (0 == num_points || 0 == num_queries) {
int64_t* indices_ptr;
output_allocator.AllocIndices(&indices_ptr, 0);
T* distances_ptr;
output_allocator.AllocDistances(&distances_ptr, 0);
return;
}
const int batch_size = points_row_splits_size - 1;
const T voxel_size = 2 * radius;
const T inv_voxel_size = 1 / voxel_size;
// Allocate output pointers.
const size_t num_indices = num_queries * max_knn;
int64_t* indices_ptr;
output_allocator.AllocIndices(&indices_ptr, num_indices, -1);
T* distances_ptr;
output_allocator.AllocDistances(&distances_ptr, num_indices, 0);
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const T* const queries_i = queries + 3 * queries_row_splits[i];
const size_t num_queries_i =
queries_row_splits[i + 1] - queries_row_splits[i];
WriteNeighborsHybrid(
stream, indices_ptr, distances_ptr, hash_table_index,
hash_table_cell_splits + first_cell_idx, hash_table_size + 1,
queries_i, num_queries_i, points, inv_voxel_size, radius,
max_knn, metric, true);
}
}
////
template void BuildSpatialHashTableCUDA(
void* temp,
size_t& temp_size,
const size_t num_points,
const float* const points,
const float radius,
const size_t points_row_splits_size,
const int64_t* points_row_splits,
const int64_t* hash_table_splits,
const size_t hash_table_cell_splits_size,
int64_t* hash_table_cell_splits,
int64_t* hash_table_index);
template void BuildSpatialHashTableCUDA(
void* temp,
size_t& temp_size,
const size_t num_points,
const double* const points,
const double radius,
const size_t points_row_splits_size,
const int64_t* points_row_splits,
const int64_t* hash_table_splits,
const size_t hash_table_cell_splits_size,
int64_t* hash_table_cell_splits,
int64_t* hash_table_index);
template void SortPairs(void* temp,
size_t& temp_size,
int64_t num_indices,
int64_t num_segments,
const int64_t* query_neighbors_row_splits,
int64_t* indices_unsorted,
float* distances_unsorted,
int64_t* indices_sorted,
float* distances_sorted);
template void SortPairs(void* temp,
size_t& temp_size,
int64_t num_indices,
int64_t num_segments,
const int64_t* query_neighbors_row_splits,
int64_t* indices_unsorted,
double* distances_unsorted,
int64_t* indices_sorted,
double* distances_sorted);
template void FixedRadiusSearchCUDA(
void* temp,
size_t& temp_size,
int64_t* query_neighbors_row_splits,
size_t num_points,
const float* const points,
size_t num_queries,
const float* const queries,
const float radius,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<float>& output_allocator);
template void FixedRadiusSearchCUDA(
void* temp,
size_t& temp_size,
int64_t* query_neighbors_row_splits,
size_t num_points,
const double* const points,
size_t num_queries,
const double* const queries,
const double radius,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<double>& output_allocator);
template void HybridSearchCUDA(
size_t num_points,
const float* const points,
size_t num_queries,
const float* const queries,
const float radius,
const int max_knn,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<float>& output_allocator);
template void HybridSearchCUDA(
size_t num_points,
const double* const points,
size_t num_queries,
const double* const queries,
const double radius,
const int max_knn,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<double>& output_allocator);
} // namespace nns
} // namespace core
} // namespace open3d
| 19ef837fcecbaa33b201c96c432719ed3de8fba6.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <math.h>
#include <cub/cub.cuh>
#include "open3d/core/nns/FixedRadiusSearch.h"
#include "open3d/core/nns/MemoryAllocation.h"
#include "open3d/core/nns/NeighborSearchCommon.h"
#include "open3d/utility/Helper.h"
#include "open3d/utility/MiniVec.h"
namespace open3d {
namespace core {
namespace nns {
namespace {
template <class T>
using Vec3 = utility::MiniVec<T, 3>;
/// Computes the distance of two points and tests if the distance is below a
/// threshold.
///
/// \tparam METRIC The distance metric. One of L1, L2, Linf.
/// \tparam T Floating point type for the distances.
///
/// \param p1 A 3D point
/// \param p2 Another 3D point
/// \param dist Output parameter for the distance.
/// \param threshold The scalar threshold.
///
/// \return Returns true if the distance is <= threshold.
///
template <int METRIC = L2, class T>
inline __device__ bool NeighborTest(const Vec3<T>& p1,
const Vec3<T>& p2,
T* dist,
T threshold) {
bool result = false;
if (METRIC == Linf) {
Vec3<T> d = (p1 - p2).abs();
*dist = d[0] > d[1] ? d[0] : d[1];
*dist = *dist > d[2] ? *dist : d[2];
} else if (METRIC == L1) {
Vec3<T> d = (p1 - p2).abs();
*dist = (d[0] + d[1] + d[2]);
} else {
Vec3<T> d = p1 - p2;
*dist = d.dot(d);
}
result = *dist <= threshold;
return result;
}
/// Kernel for CountHashTableEntries
template <class T>
__global__ void CountHashTableEntriesKernel(uint32_t* count_table,
size_t hash_table_size,
T inv_voxel_size,
const T* const __restrict__ points,
size_t num_points) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_points) return;
Vec3<T> pos(&points[idx * 3]);
Vec3<int> voxel_index = ComputeVoxelIndex(pos, inv_voxel_size);
size_t hash = SpatialHash(voxel_index) % hash_table_size;
atomicAdd(&count_table[hash + 1], 1);
}
/// Counts for each hash entry the number of points that map to this entry.
///
/// \param count_table Pointer to the table for counting.
/// The first element will not be used, i.e. the
/// number of points for the first hash entry is in count_table[1].
/// This array must be initialized before calling this function.
///
/// \param count_table_size This is the size of the hash table + 1.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
template <class T>
void CountHashTableEntries(const cudaStream_t& stream,
uint32_t* count_table,
size_t count_table_size,
T inv_voxel_size,
const T* points,
size_t num_points) {
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_points, block.x);
if (grid.x)
CountHashTableEntriesKernel<T><<<grid, block, 0, stream>>>(
count_table, count_table_size - 1, inv_voxel_size, points,
num_points);
}
/// Kernel for ComputePointIndexTable
template <class T>
__global__ void ComputePointIndexTableKernel(
int64_t* __restrict__ point_index_table,
uint32_t* __restrict__ count_tmp,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
T inv_voxel_size,
const T* const __restrict__ points,
const size_t points_start_idx,
const size_t points_end_idx) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x + points_start_idx;
if (idx >= points_end_idx) return;
Vec3<T> pos(&points[idx * 3]);
Vec3<int> voxel_index = ComputeVoxelIndex(pos, inv_voxel_size);
size_t hash = SpatialHash(voxel_index[0], voxel_index[1], voxel_index[2]) %
hash_table_size;
point_index_table[hash_table_cell_splits[hash] +
atomicAdd(&count_tmp[hash], 1)] = idx;
}
/// Writes the index of the points to the hash cells.
///
/// \param point_index_table The output array storing the point indices for
/// all cells. Start and end of each cell is defined by
/// \p hash_table_prefix_sum
///
/// \param count_tmp Temporary memory of size \p hash_table_cell_splits_size
/// .
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size The size of the hash table.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
template <class T>
void ComputePointIndexTable(
const cudaStream_t& stream,
int64_t* __restrict__ point_index_table,
uint32_t* __restrict__ count_tmp,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_cell_splits_size,
T inv_voxel_size,
const T* const __restrict__ points,
size_t points_start_idx,
size_t points_end_idx) {
cudaMemsetAsync(count_tmp, 0,
sizeof(uint32_t) * hash_table_cell_splits_size, stream);
size_t num_points = points_end_idx - points_start_idx;
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_points, block.x);
if (grid.x)
ComputePointIndexTableKernel<T><<<grid, block, 0, stream>>>(
point_index_table, count_tmp, hash_table_cell_splits,
hash_table_cell_splits_size - 1, inv_voxel_size, points,
points_start_idx, points_end_idx);
}
/// Kernel for CountNeighbors
template <int METRIC, class T>
__global__ void CountNeighborsKernel(
int64_t* __restrict__ neighbors_count,
const int64_t* const __restrict__ point_index_table,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
const T* const __restrict__ query_points,
size_t num_queries,
const T* const __restrict__ points,
const T inv_voxel_size,
const T radius,
const T threshold) {
int query_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (query_idx >= num_queries) return;
int count = 0; // counts the number of neighbors for this query point
Vec3<T> query_pos(query_points[query_idx * 3 + 0],
query_points[query_idx * 3 + 1],
query_points[query_idx * 3 + 2]);
Vec3<int> voxel_index = ComputeVoxelIndex(query_pos, inv_voxel_size);
int hash = SpatialHash(voxel_index[0], voxel_index[1], voxel_index[2]) %
hash_table_size;
int bins_to_visit[8] = {hash, -1, -1, -1, -1, -1, -1, -1};
for (int dz = -1; dz <= 1; dz += 2)
for (int dy = -1; dy <= 1; dy += 2)
for (int dx = -1; dx <= 1; dx += 2) {
Vec3<T> p = query_pos + radius * Vec3<T>(T(dx), T(dy), T(dz));
voxel_index = ComputeVoxelIndex(p, inv_voxel_size);
hash = SpatialHash(voxel_index[0], voxel_index[1],
voxel_index[2]) %
hash_table_size;
// insert without duplicates
for (int i = 0; i < 8; ++i) {
if (bins_to_visit[i] == hash) {
break;
} else if (bins_to_visit[i] == -1) {
bins_to_visit[i] = hash;
break;
}
}
}
for (int bin_i = 0; bin_i < 8; ++bin_i) {
int bin = bins_to_visit[bin_i];
if (bin == -1) break;
size_t begin_idx = hash_table_cell_splits[bin];
size_t end_idx = hash_table_cell_splits[bin + 1];
for (size_t j = begin_idx; j < end_idx; ++j) {
int64_t idx = point_index_table[j];
Vec3<T> p(&points[idx * 3 + 0]);
T dist;
if (NeighborTest<METRIC>(p, query_pos, &dist, threshold)) ++count;
}
}
neighbors_count[query_idx] = count;
}
/// Count the number of neighbors for each query point
///
/// \param neighbors_count Output array for counting the number of neighbors.
/// The size of the array is \p num_queries.
///
/// \param point_index_table The array storing the point indices for all
/// cells. Start and end of each cell is defined by \p
/// hash_table_cell_splits
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size This is the length of the
/// hash_table_cell_splits array.
///
/// \param query_points Array with the 3D query positions. This may be the
/// same array as \p points.
///
/// \param num_queries The number of query points.
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param radius The search radius.
///
/// \param metric One of L1, L2, Linf. Defines the distance metric for the
/// search.
///
/// \param ignore_query_point If true then points with the same position as
/// the query point will be ignored.
///
template <class T>
void CountNeighbors(const cudaStream_t& stream,
int64_t* neighbors_count,
const int64_t* const point_index_table,
const int64_t* const hash_table_cell_splits,
size_t hash_table_cell_splits_size,
const T* const query_points,
size_t num_queries,
const T* const points,
const T inv_voxel_size,
const T radius,
const Metric metric) {
// const bool ignore_query_point) {
const T threshold = (metric == L2 ? radius * radius : radius);
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_queries, block.x);
if (grid.x) {
#define FN_PARAMETERS \
neighbors_count, point_index_table, hash_table_cell_splits, \
hash_table_cell_splits_size - 1, query_points, num_queries, \
points, inv_voxel_size, radius, threshold
#define CALL_TEMPLATE(METRIC) \
if (METRIC == metric) { \
CountNeighborsKernel<METRIC, T> \
<<<grid, block, 0, stream>>>(FN_PARAMETERS); \
}
CALL_TEMPLATE(L1)
CALL_TEMPLATE(L2)
CALL_TEMPLATE(Linf)
#undef CALL_TEMPLATE
#undef FN_PARAMETERS
}
}
/// Kernel for WriteNeighborsIndicesAndDistances
template <class T, int METRIC, bool RETURN_DISTANCES>
__global__ void WriteNeighborsIndicesAndDistancesKernel(
int64_t* __restrict__ indices,
T* __restrict__ distances,
const int64_t* const __restrict__ neighbors_row_splits,
const int64_t* const __restrict__ point_index_table,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
const T* const __restrict__ query_points,
size_t num_queries,
const T* const __restrict__ points,
const T inv_voxel_size,
const T radius,
const T threshold) {
int query_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (query_idx >= num_queries) return;
int count = 0; // counts the number of neighbors for this query point
size_t indices_offset = neighbors_row_splits[query_idx];
Vec3<T> query_pos(query_points[query_idx * 3 + 0],
query_points[query_idx * 3 + 1],
query_points[query_idx * 3 + 2]);
Vec3<int> voxel_index = ComputeVoxelIndex(query_pos, inv_voxel_size);
int hash = SpatialHash(voxel_index) % hash_table_size;
int bins_to_visit[8] = {hash, -1, -1, -1, -1, -1, -1, -1};
for (int dz = -1; dz <= 1; dz += 2) {
for (int dy = -1; dy <= 1; dy += 2) {
for (int dx = -1; dx <= 1; dx += 2) {
Vec3<T> p = query_pos + radius * Vec3<T>(T(dx), T(dy), T(dz));
voxel_index = ComputeVoxelIndex(p, inv_voxel_size);
hash = SpatialHash(voxel_index) % hash_table_size;
// insert without duplicates
for (int i = 0; i < 8; ++i) {
if (bins_to_visit[i] == hash) {
break;
} else if (bins_to_visit[i] == -1) {
bins_to_visit[i] = hash;
break;
}
}
}
}
}
for (int bin_i = 0; bin_i < 8; ++bin_i) {
int bin = bins_to_visit[bin_i];
if (bin == -1) break;
size_t begin_idx = hash_table_cell_splits[bin];
size_t end_idx = hash_table_cell_splits[bin + 1];
for (size_t j = begin_idx; j < end_idx; ++j) {
int64_t idx = point_index_table[j];
Vec3<T> p(&points[idx * 3 + 0]);
T dist;
if (NeighborTest<METRIC>(p, query_pos, &dist, threshold)) {
indices[indices_offset + count] = idx;
if (RETURN_DISTANCES) {
distances[indices_offset + count] = dist;
}
++count;
}
}
}
}
/// Write indices and distances of neighbors for each query point
///
/// \param indices Output array with the neighbors indices.
///
/// \param distances Output array with the neighbors distances. May be null
/// if return_distances is false.
///
/// \param neighbors_row_splits This is the prefix sum which describes
/// start and end of the neighbors and distances for each query point.
///
/// \param point_index_table The array storing the point indices for all
/// cells. Start and end of each cell is defined by \p
/// hash_table_cell_splits
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size This is the length of the
/// hash_table_cell_splits array.
///
/// \param query_points Array with the 3D query positions. This may be the
/// same array as \p points.
///
/// \param num_queries The number of query points.
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param radius The search radius.
///
/// \param metric One of L1, L2, Linf. Defines the distance metric for the
/// search.
///
/// \param ignore_query_point If true then points with the same position as
/// the query point will be ignored.
///
/// \param return_distances If true then this function will return the
/// distances for each neighbor to its query point in the same format
/// as the indices.
/// Note that for the L2 metric the squared distances will be returned!!
template <class T>
void WriteNeighborsIndicesAndDistances(
const cudaStream_t& stream,
int64_t* indices,
T* distances,
const int64_t* const neighbors_row_splits,
const int64_t* const point_index_table,
const int64_t* const hash_table_cell_splits,
size_t hash_table_cell_splits_size,
const T* const query_points,
size_t num_queries,
const T* const points,
const T inv_voxel_size,
const T radius,
const Metric metric,
const bool return_distances) {
const T threshold = (metric == L2 ? radius * radius : radius);
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_queries, block.x);
if (grid.x) {
#define FN_PARAMETERS \
indices, distances, neighbors_row_splits, point_index_table, \
hash_table_cell_splits, hash_table_cell_splits_size - 1, \
query_points, num_queries, points, inv_voxel_size, radius, \
threshold
#define CALL_TEMPLATE(METRIC, RETURN_DISTANCES) \
if (METRIC == metric && RETURN_DISTANCES == return_distances) { \
WriteNeighborsIndicesAndDistancesKernel<T, METRIC, RETURN_DISTANCES> \
<<<grid, block, 0, stream>>>(FN_PARAMETERS); \
}
#define CALL_TEMPLATE2(METRIC) \
CALL_TEMPLATE(METRIC, true) \
CALL_TEMPLATE(METRIC, false)
#define CALL_TEMPLATE3 \
CALL_TEMPLATE2(L1) \
CALL_TEMPLATE2(L2) \
CALL_TEMPLATE2(Linf)
CALL_TEMPLATE3
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef FN_PARAMETERS
}
}
/// Kernel for WriteNeighborsHybrid
template <class T, int METRIC, bool RETURN_DISTANCES>
__global__ void WriteNeighborsHybridKernel(
int64_t* __restrict__ indices,
T* __restrict__ distances,
const int64_t* const __restrict__ point_index_table,
const int64_t* const __restrict__ hash_table_cell_splits,
size_t hash_table_size,
const T* const __restrict__ query_points,
size_t num_queries,
const T* const __restrict__ points,
const T inv_voxel_size,
const T radius,
const T threshold,
const int max_knn) {
int query_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (query_idx >= num_queries) return;
int count = 0; // counts the number of neighbors for this query point
size_t indices_offset = max_knn * query_idx;
Vec3<T> query_pos(query_points[query_idx * 3 + 0],
query_points[query_idx * 3 + 1],
query_points[query_idx * 3 + 2]);
Vec3<int> voxel_index = ComputeVoxelIndex(query_pos, inv_voxel_size);
int hash = SpatialHash(voxel_index) % hash_table_size;
int bins_to_visit[8] = {hash, -1, -1, -1, -1, -1, -1, -1};
for (int dz = -1; dz <= 1; dz += 2) {
for (int dy = -1; dy <= 1; dy += 2) {
for (int dx = -1; dx <= 1; dx += 2) {
Vec3<T> p = query_pos + radius * Vec3<T>(T(dx), T(dy), T(dz));
voxel_index = ComputeVoxelIndex(p, inv_voxel_size);
hash = SpatialHash(voxel_index) % hash_table_size;
// insert without duplicates
for (int i = 0; i < 8; ++i) {
if (bins_to_visit[i] == hash) {
break;
} else if (bins_to_visit[i] == -1) {
bins_to_visit[i] = hash;
break;
}
}
}
}
}
int max_index;
T max_value;
for (int bin_i = 0; bin_i < 8; ++bin_i) {
int bin = bins_to_visit[bin_i];
if (bin == -1) break;
size_t begin_idx = hash_table_cell_splits[bin];
size_t end_idx = hash_table_cell_splits[bin + 1];
for (size_t j = begin_idx; j < end_idx; ++j) {
int64_t idx = point_index_table[j];
Vec3<T> p(&points[idx * 3 + 0]);
T dist;
if (NeighborTest<METRIC>(p, query_pos, &dist, threshold)) {
// If count if less than max_knn, record idx and dist.
if (count < max_knn) {
indices[indices_offset + count] = idx;
distances[indices_offset + count] = dist;
// Update max_index and max_value.
if (count == 0 || max_value < dist) {
max_index = count;
max_value = dist;
}
// Increase count
++count;
} else {
// If dist is smaller than current max_value.
if (max_value > dist) {
// Replace idx and dist at current max_index.
indices[indices_offset + max_index] = idx;
distances[indices_offset + max_index] = dist;
// Update max_value
max_value = dist;
// Find max_index.
for (auto k = 0; k < max_knn; ++k) {
if (distances[indices_offset + k] > max_value) {
max_index = k;
max_value = distances[indices_offset + k];
}
}
}
}
}
}
}
// bubble sort
for (int i = 0; i < count - 1; ++i) {
for (int j = 0; j < count - i - 1; ++j) {
if (distances[indices_offset + j] >
distances[indices_offset + j + 1]) {
T dist_tmp = distances[indices_offset + j];
int64_t ind_tmp = indices[indices_offset + j];
distances[indices_offset + j] =
distances[indices_offset + j + 1];
indices[indices_offset + j] = indices[indices_offset + j + 1];
distances[indices_offset + j + 1] = dist_tmp;
indices[indices_offset + j + 1] = ind_tmp;
}
}
}
}
/// Write indices and distances for each query point in hybrid search mode.
///
/// \param indices Output array with the neighbors indices.
///
/// \param distances Output array with the neighbors distances. May be null
/// if return_distances is false.
///
/// \param point_index_table The array storing the point indices for all
/// cells. Start and end of each cell is defined by \p
/// hash_table_cell_splits
///
/// \param hash_table_cell_splits The row splits array describing the start
/// and end of each cell.
///
/// \param hash_table_cell_splits_size This is the length of the
/// hash_table_cell_splits array.
///
/// \param query_points Array with the 3D query positions. This may be the
/// same array as \p points.
///
/// \param num_queries The number of query points.
///
/// \param points Array with the 3D point positions.
///
/// \param num_points The number of points.
///
/// \param inv_voxel_size Reciproval of the voxel size
///
/// \param radius The search radius.
///
/// \param metric One of L1, L2, Linf. Defines the distance metric for the
/// search.
///
/// \param ignore_query_point If true then points with the same position as
/// the query point will be ignored.
///
/// \param return_distances If true then this function will return the
/// distances for each neighbor to its query point in the same format
/// as the indices.
/// Note that for the L2 metric the squared distances will be returned!!
template <class T>
void WriteNeighborsHybrid(const cudaStream_t& stream,
int64_t* indices,
T* distances,
const int64_t* const point_index_table,
const int64_t* const hash_table_cell_splits,
size_t hash_table_cell_splits_size,
const T* const query_points,
size_t num_queries,
const T* const points,
const T inv_voxel_size,
const T radius,
const int max_knn,
const Metric metric,
const bool return_distances) {
const T threshold = (metric == L2 ? radius * radius : radius);
const int BLOCKSIZE = 64;
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(0, 1, 1);
grid.x = utility::DivUp(num_queries, block.x);
if (grid.x) {
#define FN_PARAMETERS \
indices, distances, point_index_table, hash_table_cell_splits, \
hash_table_cell_splits_size - 1, query_points, num_queries, \
points, inv_voxel_size, radius, threshold, max_knn
#define CALL_TEMPLATE(METRIC, RETURN_DISTANCES) \
if (METRIC == metric && RETURN_DISTANCES == return_distances) { \
WriteNeighborsHybridKernel<T, METRIC, RETURN_DISTANCES> \
<<<grid, block, 0, stream>>>(FN_PARAMETERS); \
}
#define CALL_TEMPLATE2(METRIC) \
CALL_TEMPLATE(METRIC, true) \
CALL_TEMPLATE(METRIC, false)
#define CALL_TEMPLATE3 \
CALL_TEMPLATE2(L1) \
CALL_TEMPLATE2(L2) \
CALL_TEMPLATE2(Linf)
CALL_TEMPLATE3
#undef CALL_TEMPLATE
#undef CALL_TEMPLATE2
#undef CALL_TEMPLATE3
#undef FN_PARAMETERS
}
}
} // namespace
template <class T>
void BuildSpatialHashTableCUDA(void* temp,
size_t& temp_size,
const size_t num_points,
const T* const points,
const T radius,
const size_t points_row_splits_size,
const int64_t* points_row_splits,
const int64_t* hash_table_splits,
const size_t hash_table_cell_splits_size,
int64_t* hash_table_cell_splits,
int64_t* hash_table_index) {
const bool get_temp_size = !temp;
const cudaStream_t stream = 0;
int texture_alignment = 512;
if (get_temp_size) {
temp = (char*)1; // worst case pointer alignment
temp_size = std::numeric_limits<int64_t>::max();
}
MemoryAllocation mem_temp(temp, temp_size, texture_alignment);
std::pair<uint32_t*, size_t> count_tmp =
mem_temp.Alloc<uint32_t>(hash_table_cell_splits_size);
const int batch_size = points_row_splits_size - 1;
const T voxel_size = 2 * radius;
const T inv_voxel_size = 1 / voxel_size;
// count number of points per hash entry
if (!get_temp_size) {
cudaMemsetAsync(count_tmp.first, 0, sizeof(uint32_t) * count_tmp.second,
stream);
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const size_t num_points_i =
points_row_splits[i + 1] - points_row_splits[i];
const T* const points_i = points + 3 * points_row_splits[i];
CountHashTableEntries(stream, count_tmp.first + first_cell_idx,
hash_table_size + 1, inv_voxel_size, points_i,
num_points_i);
}
}
// compute prefix sum of the hash entry counts and store in
// hash_table_cell_splits
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
cub::DeviceScan::InclusiveSum(inclusive_scan_temp.first,
inclusive_scan_temp.second,
count_tmp.first, hash_table_cell_splits,
count_tmp.second, stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
count_tmp.first, hash_table_cell_splits, count_tmp.second,
stream);
}
mem_temp.Free(inclusive_scan_temp);
}
// now compute the global indices which allows us to lookup the point index
// for the entries in the hash cell
if (!get_temp_size) {
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const size_t points_start_idx = points_row_splits[i];
const size_t points_end_idx = points_row_splits[i + 1];
ComputePointIndexTable(stream, hash_table_index, count_tmp.first,
hash_table_cell_splits + first_cell_idx,
hash_table_size + 1, inv_voxel_size, points,
points_start_idx, points_end_idx);
}
}
mem_temp.Free(count_tmp);
if (get_temp_size) {
// return the memory peak as the required temporary memory size.
temp_size = mem_temp.MaxUsed();
return;
}
}
template <class T>
void SortPairs(void* temp,
size_t& temp_size,
int64_t num_indices,
int64_t num_segments,
const int64_t* query_neighbors_row_splits,
int64_t* indices_unsorted,
T* distances_unsorted,
int64_t* indices_sorted,
T* distances_sorted) {
const bool get_temp_size = !temp;
int texture_alignment = 512;
if (get_temp_size) {
temp = (char*)1; // worst case pointer alignment
temp_size = std::numeric_limits<int64_t>::max();
}
MemoryAllocation mem_temp(temp, temp_size, texture_alignment);
std::pair<void*, size_t> sort_temp(nullptr, 0);
cub::DeviceSegmentedRadixSort::SortPairs(
sort_temp.first, sort_temp.second, distances_unsorted,
distances_sorted, indices_unsorted, indices_sorted, num_indices,
num_segments, query_neighbors_row_splits,
query_neighbors_row_splits + 1);
sort_temp = mem_temp.Alloc(sort_temp.second);
if (!get_temp_size) {
cub::DeviceSegmentedRadixSort::SortPairs(
sort_temp.first, sort_temp.second, distances_unsorted,
distances_sorted, indices_unsorted, indices_sorted, num_indices,
num_segments, query_neighbors_row_splits,
query_neighbors_row_splits + 1);
}
mem_temp.Free(sort_temp);
if (get_temp_size) {
// return the memory peak as the required temporary memory size.
temp_size = mem_temp.MaxUsed();
return;
}
}
template <class T>
void FixedRadiusSearchCUDA(void* temp,
size_t& temp_size,
int64_t* query_neighbors_row_splits,
size_t num_points,
const T* const points,
size_t num_queries,
const T* const queries,
const T radius,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<T>& output_allocator) {
const bool get_temp_size = !temp;
const cudaStream_t stream = 0;
int texture_alignment = 512;
const Metric metric = Metric::L2;
if (get_temp_size) {
temp = (char*)1; // worst case pointer alignment
temp_size = std::numeric_limits<int64_t>::max();
}
// return empty output arrays if there are no points
if ((0 == num_points || 0 == num_queries) && !get_temp_size) {
cudaMemsetAsync(query_neighbors_row_splits, 0,
sizeof(int64_t) * (num_queries + 1), stream);
int64_t* indices_ptr;
output_allocator.AllocIndices(&indices_ptr, 0);
T* distances_ptr;
output_allocator.AllocDistances(&distances_ptr, 0);
return;
}
MemoryAllocation mem_temp(temp, temp_size, texture_alignment);
const int batch_size = points_row_splits_size - 1;
const T voxel_size = 2 * radius;
const T inv_voxel_size = 1 / voxel_size;
std::pair<int64_t*, size_t> query_neighbors_count =
mem_temp.Alloc<int64_t>(num_queries);
// we need this value to compute the size of the index array
if (!get_temp_size) {
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const size_t queries_start_idx = queries_row_splits[i];
const T* const queries_i = queries + 3 * queries_row_splits[i];
const size_t num_queries_i =
queries_row_splits[i + 1] - queries_row_splits[i];
CountNeighbors(
stream, query_neighbors_count.first + queries_start_idx,
hash_table_index, hash_table_cell_splits + first_cell_idx,
hash_table_size + 1, queries_i, num_queries_i, points,
inv_voxel_size, radius, metric);
}
}
// we need this value to compute the size of the index array
int64_t last_prefix_sum_entry = 0;
{
std::pair<void*, size_t> inclusive_scan_temp(nullptr, 0);
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
query_neighbors_count.first, query_neighbors_row_splits + 1,
num_queries, stream);
inclusive_scan_temp = mem_temp.Alloc(inclusive_scan_temp.second);
if (!get_temp_size) {
// set first element to zero
cudaMemsetAsync(query_neighbors_row_splits, 0, sizeof(int64_t),
stream);
cub::DeviceScan::InclusiveSum(
inclusive_scan_temp.first, inclusive_scan_temp.second,
query_neighbors_count.first, query_neighbors_row_splits + 1,
num_queries, stream);
// get the last value
cudaMemcpyAsync(&last_prefix_sum_entry,
query_neighbors_row_splits + num_queries,
sizeof(int64_t), cudaMemcpyDeviceToHost, stream);
// wait for the async copies
while (cudaErrorNotReady == cudaStreamQuery(stream)) { /*empty*/
}
}
mem_temp.Free(inclusive_scan_temp);
}
mem_temp.Free(query_neighbors_count);
if (get_temp_size) {
// return the memory peak as the required temporary memory size.
temp_size = mem_temp.MaxUsed();
return;
}
if (!get_temp_size) {
// allocate the output array for the neighbor indices
const size_t num_indices = last_prefix_sum_entry;
int64_t* indices_ptr;
T* distances_ptr;
output_allocator.AllocIndices(&indices_ptr, num_indices);
output_allocator.AllocDistances(&distances_ptr, num_indices);
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const T* const queries_i = queries + 3 * queries_row_splits[i];
const size_t num_queries_i =
queries_row_splits[i + 1] - queries_row_splits[i];
WriteNeighborsIndicesAndDistances(
stream, indices_ptr, distances_ptr,
query_neighbors_row_splits + queries_row_splits[i],
hash_table_index, hash_table_cell_splits + first_cell_idx,
hash_table_size + 1, queries_i, num_queries_i, points,
inv_voxel_size, radius, metric, true);
}
}
}
//// Hybrid Search
template <class T>
void HybridSearchCUDA(size_t num_points,
const T* const points,
size_t num_queries,
const T* const queries,
const T radius,
const int max_knn,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<T>& output_allocator) {
const cudaStream_t stream = 0;
const Metric metric = Metric::L2;
// return empty output arrays if there are no points
if (0 == num_points || 0 == num_queries) {
int64_t* indices_ptr;
output_allocator.AllocIndices(&indices_ptr, 0);
T* distances_ptr;
output_allocator.AllocDistances(&distances_ptr, 0);
return;
}
const int batch_size = points_row_splits_size - 1;
const T voxel_size = 2 * radius;
const T inv_voxel_size = 1 / voxel_size;
// Allocate output pointers.
const size_t num_indices = num_queries * max_knn;
int64_t* indices_ptr;
output_allocator.AllocIndices(&indices_ptr, num_indices, -1);
T* distances_ptr;
output_allocator.AllocDistances(&distances_ptr, num_indices, 0);
for (int i = 0; i < batch_size; ++i) {
const size_t hash_table_size =
hash_table_splits[i + 1] - hash_table_splits[i];
const size_t first_cell_idx = hash_table_splits[i];
const T* const queries_i = queries + 3 * queries_row_splits[i];
const size_t num_queries_i =
queries_row_splits[i + 1] - queries_row_splits[i];
WriteNeighborsHybrid(
stream, indices_ptr, distances_ptr, hash_table_index,
hash_table_cell_splits + first_cell_idx, hash_table_size + 1,
queries_i, num_queries_i, points, inv_voxel_size, radius,
max_knn, metric, true);
}
}
////
template void BuildSpatialHashTableCUDA(
void* temp,
size_t& temp_size,
const size_t num_points,
const float* const points,
const float radius,
const size_t points_row_splits_size,
const int64_t* points_row_splits,
const int64_t* hash_table_splits,
const size_t hash_table_cell_splits_size,
int64_t* hash_table_cell_splits,
int64_t* hash_table_index);
template void BuildSpatialHashTableCUDA(
void* temp,
size_t& temp_size,
const size_t num_points,
const double* const points,
const double radius,
const size_t points_row_splits_size,
const int64_t* points_row_splits,
const int64_t* hash_table_splits,
const size_t hash_table_cell_splits_size,
int64_t* hash_table_cell_splits,
int64_t* hash_table_index);
template void SortPairs(void* temp,
size_t& temp_size,
int64_t num_indices,
int64_t num_segments,
const int64_t* query_neighbors_row_splits,
int64_t* indices_unsorted,
float* distances_unsorted,
int64_t* indices_sorted,
float* distances_sorted);
template void SortPairs(void* temp,
size_t& temp_size,
int64_t num_indices,
int64_t num_segments,
const int64_t* query_neighbors_row_splits,
int64_t* indices_unsorted,
double* distances_unsorted,
int64_t* indices_sorted,
double* distances_sorted);
template void FixedRadiusSearchCUDA(
void* temp,
size_t& temp_size,
int64_t* query_neighbors_row_splits,
size_t num_points,
const float* const points,
size_t num_queries,
const float* const queries,
const float radius,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<float>& output_allocator);
template void FixedRadiusSearchCUDA(
void* temp,
size_t& temp_size,
int64_t* query_neighbors_row_splits,
size_t num_points,
const double* const points,
size_t num_queries,
const double* const queries,
const double radius,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<double>& output_allocator);
template void HybridSearchCUDA(
size_t num_points,
const float* const points,
size_t num_queries,
const float* const queries,
const float radius,
const int max_knn,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<float>& output_allocator);
template void HybridSearchCUDA(
size_t num_points,
const double* const points,
size_t num_queries,
const double* const queries,
const double radius,
const int max_knn,
const size_t points_row_splits_size,
const int64_t* const points_row_splits,
const size_t queries_row_splits_size,
const int64_t* const queries_row_splits,
const int64_t* const hash_table_splits,
size_t hash_table_cell_splits_size,
const int64_t* const hash_table_cell_splits,
const int64_t* const hash_table_index,
NeighborSearchAllocator<double>& output_allocator);
} // namespace nns
} // namespace core
} // namespace open3d
|
388cf30b409e50aed6220ef85c09742888cd7479.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
//
// gpu-poly
//
//
// Polygon functions for the GPU
//
#include <cstdlib>
#include <cstring>
#include <cstdio>
#include "spatial.cuh"
#define CLIP 0
#define SUBJ 1
//
// Determines of the specified point is in the specified polygon
//
//
__device__ bool PointInPoly(vertex point, vertex *poly, int polySize)
{
bool inPoly = false;
int i, j;
for(i = 0, j = polySize - 1; i < polySize; j = i++) {
if( ((poly[i].y > point.y) != (poly[j].y > point.y)) &&
(point.x < (poly[j].x - poly[i].x) * ( point.y - poly[i].y) /
(poly[j].y - poly[i].y) + poly[i].x) ) {
inPoly = !inPoly;
}
}
return inPoly;
}
//
// Trace the subject polygon and determine the vertices interior
// to the clip polygon using the even/odd rule. In other words,
// determine if the first vertex of subj is inside the clip poly,
// then toggle internal every time an intersection vertex is
// encountered. (Intersection vertices have a non-zero alpha value)
//
__device__ void MarkEntry(vertex *clip, int clipSize, vertex *subj, int subjSize)
{
bool in = PointInPoly(subj[0], clip, clipSize);
int i = subj[0].next;
subj[0].internal = in;
while( i < subjSize ) {
if( subj[i].alpha != 0.0f ) {
in = !in;
}
// Need to mark exit points also
if( subj[i].alpha != 0.0f && in == false )
subj[i].internal = !in;
else
subj[i].internal = in;
i = subj[i].next;
}
}
//
// Combine the points and arrange the intersection points by alpha value.
// Note!!! - This function needs to be called by a single thread. There's
// probably a better way to do this...//
//
__device__ void CombinePoints(vertex *newArray, vertex *srcArray,
int srcSize, int &newSize)
{
int index = 0, srcIdx = 0, next, prev, base = 0;
newSize = 0;
// Hold on to index of last non-intersect point... Place new point at
// the end of the array, Adjust next index of intersection points.
while( srcIdx < srcSize ) {
if( srcArray[srcIdx].x != -1 ) {
// Copy to new array in order.
newArray[index] = srcArray[srcIdx++];
newArray[index].next = index + 1;
newSize++;
if( newArray[index].alpha != 0 ) {
// Intersection point, set index to be in alpha order.
// Start at base and insert new point in appropriate spot.
next = newArray[base].next;
prev = base;
while( next < index ) {
if( newArray[index].alpha < newArray[next].alpha ) {
newArray[prev].next = index;
newArray[index].next = next;
newArray[index - 1].next = index + 1;
break;
}
prev = next;
next = newArray[next].next;
}
} else {
// Not an intersection point, use as next base
base = index;
}
index++;
} else
srcIdx++;
}
}
//
// Adjusts the link to place intersection points in the proper order. Also
// skip over empty vertices. Changed to this method so that the linkTag will
// refer to the proper vertex of the other polygon when extracting the intersecting
// or union polygon.
//
// NOTE! This function needs to be called with a single thread
//
void __device__ LinkPoints(vertex *verts, int size, int stride)
{
int index = 0, next, prev, base = 0, last = 0;
// Hold on to index of last non-intersect point... Insert index of
// intersection point in the proper place...
while(index < size ) {
if( verts[index].x != -1 ) {
verts[last].next = index;
if( verts[index].alpha != 0 ) {
// Intersection point, adjust next pointer
next = verts[base].next;
prev = base;
while( next < index ) {
if( verts[index].alpha < verts[next].alpha ) {
verts[prev].next = index;
verts[index].next = next;
break;
}
prev = next;
next = verts[next].next;
}
if( next >= index )
last = index;
} else {
// Not an intersection point, use as base.
base = index;
last = index;
}
}
index++;
}
verts[size - 1].next = size;
}
//
// Calculates the euclidean distance between (x1, y1) and
// (x2, y2)
//
//
__device__ float Dist(float x1, float y1, float x2, float y2)
{
return sqrtf((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
}
//
// Calculate the intersection between the two line segments p and q.
// The alpha value is a measure between 0 and 1 that indicated the
// distance from the respective point. Used to order the intersection
// points.
// Note! - This function is called by only one thread.
//
//
__device__ void Intersect(vertex *p1, vertex *p2, vertex *q1, vertex *q2,
float *xInt, float *yInt, float *alphaP, float *alphaQ)
{
float det;
*yInt = *xInt = *alphaP = *alphaQ = -1.0f;
// Check if lines are parallel
det = (p2->x - p1->x) * (q2->y - q1->y) -
(p2->y - p1->y) * (q2->x - q1->x);
if( det != 0 ) {
// Lines are not parallel
float tp, tq;
// Check if the segments actually intersect
tp = ((q1->x - p1->x) * (q2->y - q1->y) - (q1->y - p1->y) * (q2->x - q1->x)) / det;
tq = ((p2->y - p1->y) * (q1->x - p1->x) - (p2->x - p1->x) * (q1->y - p1->y)) / det;
if( tp >= 0 && tp <= 1 && tq >= 0 && tq <= 1 ) {
// Line segments intersect
// Calculate the actual intersection
*xInt = p1->x + tp * (p2->x - p1->x);
*yInt = p1->y + tp * (p2->y - p1->y);
*alphaP = Dist(p1->x, p1->y, *xInt, *yInt) / Dist(p1->x, p1->y, p2->x, p2->y);
*alphaQ = Dist(q1->x, q1->y, *xInt, *yInt) / Dist(q1->x, q1->y, q2->x, q2->y);
}
}
}
//
// Calculates the intersection, if it exists, of each pair of edges from
// clip and subj. Returns two polygons consisting of the original vertices
// and the intersection points.
//
//
__global__ void CalcIntersections(vertex *clip, int clipSize, vertex *subj, int subjSize,
vertex *newClip, vertex *newSubj, int *newPolySizes)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x,
subjStart, clipStart, index;
float xInt, yInt, alphaSubj, alphaClip;
// Calculate the intersections between clip and subj polygons
while( tid < (subjSize * clipSize) ) {
if( (tid % clipSize) == 0 ) {
newSubj[tid] = subj[tid / clipSize];
} else {
subjStart = tid / clipSize;
clipStart = ((tid % clipSize) - 1);
index = ( clipStart * subjSize) + (tid / clipSize) + 1;
Intersect(&subj[subjStart], &subj[subjStart + 1],
&clip[clipStart], &clip[clipStart + 1],
&xInt, &yInt, &alphaSubj, &alphaClip);
newSubj[tid].x = xInt;
newSubj[tid].y = yInt;
newSubj[tid].next = -1;
newSubj[tid].alpha = alphaSubj;
newClip[index].x = xInt;
newClip[index].y = yInt;
newClip[index].next = -1;
newClip[index].alpha = alphaClip;
// Link the the two polygons at the intersection
if( xInt != -1 ) {
newSubj[tid].linkTag = index;
newClip[index].linkTag = tid;
}
}
if( (tid % subjSize) == 0 ) {
newClip[tid] = clip[tid / subjSize];
}
tid += blockDim.x * gridDim.x;
}
tid = threadIdx.x + blockIdx.x * blockDim.x;
// Cleanup and order the intersections
if( tid == 0 ) {
//CombinePoints(newClip, newClip, ((clipSize - 1) * subjSize) + 1, newPolySizes[0]);
newPolySizes[CLIP] = ((clipSize - 1) * subjSize) + 1;
LinkPoints(newClip, newPolySizes[CLIP], subjSize);
}
if( tid == 1 ) {
//CombinePoints(newSubj, newSubj, ((subjSize - 1) * clipSize) + 1, newPolySizes[1]);
newPolySizes[SUBJ] = ((subjSize - 1) * clipSize) + 1;
LinkPoints(newSubj, newPolySizes[SUBJ], clipSize);
}
if( tid == 0 ) {
MarkEntry(newClip, newPolySizes[CLIP], newSubj, newPolySizes[SUBJ]);
}
if( tid == 1 ) {
MarkEntry(newSubj, newPolySizes[SUBJ], newClip, newPolySizes[CLIP]);
}
}
//
// Allocates device memory for the 4 polygons. Clip, Subj
// clip w/ intersections, subj w/ intersections.
// e.g. |clip| = m , |subj|= n , |intClip| = n*m , |intSubj| = n*m
bool AllocateDevMem(int *&size, vertex *&clip, vertex *&subj, vertex *&intClip, vertex *&intSubj,
int clipSize, int subjSize)
{
bool result = true;
hipError_t devResult;
devResult = hipMalloc((void**)&clip, clipSize * sizeof(vertex));
if( devResult != hipSuccess ) {
result = false;
}
if( result ) {
devResult = hipMalloc((void**)&subj, subjSize * sizeof(vertex));
if( devResult != hipSuccess ) {
result = false;
}
}
if( result ) {
devResult = hipMalloc((void**)&intSubj, subjSize * clipSize * sizeof(vertex));
if( devResult != hipSuccess ) {
result = false;
}
}
if( result ) {
devResult = hipMalloc((void**)&intClip, subjSize * clipSize * sizeof(vertex));
if( devResult != hipSuccess ) {
result = false;
}
}
if( result ) {
devResult = hipMalloc((void**)&size, 2 * sizeof(int));
if( devResult != hipSuccess ) {
result = false;
}
}
return result;
}
bool FreeDevMem(vertex **clip, vertex **subj, vertex **intClip, vertex **intSubj)
{
hipFree(*clip);
*clip = NULL;
hipFree(*subj);
*subj = NULL;
hipFree(*intClip);
*intClip = NULL;
hipFree(*intSubj);
*intSubj = NULL;
return true;
}
// Allocate host memory for the two polygon
// |clip| = n*m |subj| = n*m
bool AllocateIntBuffers(vertex **clip, vertex **subj, int clipSize, int subjSize)
{
bool result = true;
*clip = (vertex*)malloc(clipSize * subjSize * sizeof(vertex));
if( *clip == NULL ) {
result = false;
}
if( result ) {
*subj = (vertex*)malloc(clipSize * subjSize * sizeof(vertex));
if( *subj == NULL ) {
result = false;
}
}
return result;
}
bool CopyData(vertex *dest, vertex *src, int size, enum hipMemcpyKind dir)
{
bool result = true;
hipError_t devResult;
devResult = hipMemcpy(dest, src, size, dir);
if( devResult != hipSuccess )
result = false;
return result;
}
//
// Adds a vertex to the specified polygon by reallocating the
// buffer. May want to change this to work with chunks of memory
// rather than a single vertex at a time.
//
bool AddVertex(vertex *&poly, int& size, vertex *vert)
{
bool result = true;
vertex *temp = (vertex*)realloc(poly, (size + 1) * sizeof(vertex));
if( temp != NULL ) {
poly = temp;
poly[size++] = *vert;
} else {
result = false;
}
return result;
}
//
// Builds the intersection polygon by tracing the subject
// polygon from the first intersection point. When the next
// vertex is not internal we jump to the corresponding vertex
// in the clip polygon and continue form there. This pattern is
// continued (back and forth between clip & subj for internal
// vertices) until we reach the starting vertex.
//
//
bool BuildIntPoly(vertex *&intPoly, int& intPolySize,
vertex *subj, int subjSize,
vertex *clip, int clipSize)
{
bool result = true, inSubj = true;
vertex *curVert = subj, *firstVert;
// int i = 0 ;
// Find first intersection point in the subj poly
while( curVert->internal == false ) {
curVert = &subj[curVert->next];
printf("idx = %d\n",curVert->next);
}
firstVert = curVert;
printf("(%f,%f)\n", curVert->x, curVert->y);
if( AddVertex(intPoly, intPolySize, curVert) ) {
do {
if( inSubj ) {
if( subj[curVert->next].internal ) {
curVert = &subj[curVert->next];
} else {
curVert = &clip[clip[curVert->linkTag].next];
inSubj = false;
}
} else {
if( clip[curVert->next].internal ) {
curVert = &clip[curVert->next];
} else {
curVert = &subj[subj[curVert->linkTag].next];
inSubj = true;
}
}
if( !AddVertex(intPoly, intPolySize, curVert) ) {
result = false;
break;
}
printf("(%f,%f)\n", curVert->x, curVert->y);
} while( !(curVert->x == firstVert->x &&
curVert->y == firstVert->y) );
} else {
result = false;
}
return result;
}
//
// Builds the union polygon similar to the way the
// intersection polygon is built. Though instead of
// looking for internal vertices, we want external ones.
//
//
bool BuildUnionPoly(vertex *&unionPoly, int& unionPolySize,
vertex *subj, int subjSize,
vertex *clip, int clipSize)
{
bool result = true, inSubj = true;
vertex *curVert = subj, *firstVert;
// Find first intersection point in the subj poly
while( curVert->internal == false ) {
curVert = &subj[curVert->next];
}
firstVert = curVert;
if( AddVertex(unionPoly, unionPolySize, curVert) ) {
do {
if( inSubj ) {
if( !subj[curVert->next].internal ) {
curVert = &subj[curVert->next];
} else {
curVert = &clip[clip[curVert->linkTag].next];
inSubj = false;
}
} else {
if( !clip[curVert->next].internal ) {
curVert = &clip[curVert->next];
} else {
curVert = &subj[subj[curVert->linkTag].next];
inSubj = true;
}
}
if( !AddVertex(unionPoly, unionPolySize, curVert) ) {
result = false;
break;
}
} while( !(curVert->x == firstVert->x &&
curVert->y == firstVert->y) );
} else {
result = false;
}
return result;
}
//
// Calculate the intersection polygon
//
//
int gpuIntersect(VERTEX *subj, int subjSize, VERTEX *clip, int clipSize,
VERTEX **intPoly, int *intPolySize)
{
int result = 1;
vertex *devSubj, *devClip, *devIntSubj, *devIntClip,
*intClip, *intSubj;
int *devSize, size[2];
hipError_t devResult;
fprintf(stderr,"AllocateDevMem-----");
result = AllocateDevMem(devSize, devClip, devSubj, devIntClip, devIntSubj, clipSize, subjSize);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"AllocateIntBuffers-----");
if( result )
result = AllocateIntBuffers(&intClip, &intSubj, clipSize, subjSize);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"CopyData subj-----");
if( result )
result = CopyData(devSubj, subj, subjSize * sizeof(vertex), hipMemcpyHostToDevice);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"CopyData clip-----");
if( result )
result = CopyData(devClip, clip, clipSize * sizeof(vertex), hipMemcpyHostToDevice);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"CalcIntersection-----");
// Calulate the intersection points.
if( result ) {
hipLaunchKernelGGL(( CalcIntersections), dim3(1), dim3(128), 0, 0, devClip, clipSize, devSubj, subjSize,
devIntClip, devIntSubj, devSize);
devResult = hipMemcpy(size, devSize, 2 * sizeof(int), hipMemcpyDeviceToHost);
if( devResult != hipSuccess ) {
result = 0;
}
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
} else
if( result )
result = CopyData(intSubj, devIntSubj, clipSize * subjSize * sizeof(vertex), hipMemcpyDeviceToHost);
if( result )
result = CopyData(intClip, devIntClip, clipSize * subjSize * sizeof(vertex), hipMemcpyDeviceToHost);
for (int i =0 ; i< clipSize * subjSize; i++)
fprintf(stderr,"(%d,%d)\n",(int)intSubj[i].x,(int)intSubj[i].y);
fprintf(stderr,"\n");
for (int i =0 ; i< clipSize * subjSize; i++)
fprintf(stderr,"(%d,%d)\n",(int)intClip[i].x,(int)intClip[i].y);
exit(1);
fprintf(stderr,"BuildIntPoly-----");
if( result ) {
result = BuildIntPoly(*intPoly, *intPolySize, intSubj, size[SUBJ], intClip, size[CLIP]);
}
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"FreeDevMem-----");
result = FreeDevMem(&devClip, &devSubj, &devIntClip, &devIntSubj);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
return result;
}
//
// Calculate the union polygon
//
//
int gpuUnion(VERTEX *subj, int subjSize, VERTEX *clip, int clipSize,
VERTEX **intPoly, int *intPolySize)
{
int result = 1;
vertex *devSubj, *devClip, *devIntSubj, *devIntClip,
*intClip, *intSubj;
int *devSize, size[2];
hipError_t devResult;
result = AllocateDevMem(devSize, devClip, devSubj, devIntClip, devIntSubj, clipSize, subjSize);
if( result )
result = AllocateIntBuffers(&intClip, &intSubj, clipSize, subjSize);
if( result )
result = CopyData(devSubj, subj, subjSize * sizeof(vertex), hipMemcpyHostToDevice);
if( result )
result = CopyData(devClip, clip, clipSize * sizeof(vertex), hipMemcpyHostToDevice);
// Calulate the intersection points.
if( result ) {
hipLaunchKernelGGL(( CalcIntersections), dim3(1), dim3(128), 0, 0, devClip, clipSize, devSubj, subjSize,
devIntClip, devIntSubj, devSize);
devResult = hipMemcpy(size, devSize, 2 * sizeof(int), hipMemcpyDeviceToHost);
if( devResult != hipSuccess ) {
result = 0;
}
} else
if( result )
result = CopyData(intSubj, devIntSubj, clipSize * subjSize * sizeof(vertex), hipMemcpyDeviceToHost);
if( result )
result = CopyData(intClip, devIntClip, clipSize * subjSize * sizeof(vertex), hipMemcpyDeviceToHost);
if( result ) {
result = BuildUnionPoly(*intPoly, *intPolySize, intSubj, size[SUBJ], intClip, size[CLIP]);
}
result = FreeDevMem(&devClip, &devSubj, &devIntClip, &devIntSubj);
return result;
}
| 388cf30b409e50aed6220ef85c09742888cd7479.cu | //
//
// gpu-poly
//
//
// Polygon functions for the GPU
//
#include <cstdlib>
#include <cstring>
#include <cstdio>
#include "spatial.cuh"
#define CLIP 0
#define SUBJ 1
//
// Determines of the specified point is in the specified polygon
//
//
__device__ bool PointInPoly(vertex point, vertex *poly, int polySize)
{
bool inPoly = false;
int i, j;
for(i = 0, j = polySize - 1; i < polySize; j = i++) {
if( ((poly[i].y > point.y) != (poly[j].y > point.y)) &&
(point.x < (poly[j].x - poly[i].x) * ( point.y - poly[i].y) /
(poly[j].y - poly[i].y) + poly[i].x) ) {
inPoly = !inPoly;
}
}
return inPoly;
}
//
// Trace the subject polygon and determine the vertices interior
// to the clip polygon using the even/odd rule. In other words,
// determine if the first vertex of subj is inside the clip poly,
// then toggle internal every time an intersection vertex is
// encountered. (Intersection vertices have a non-zero alpha value)
//
__device__ void MarkEntry(vertex *clip, int clipSize, vertex *subj, int subjSize)
{
bool in = PointInPoly(subj[0], clip, clipSize);
int i = subj[0].next;
subj[0].internal = in;
while( i < subjSize ) {
if( subj[i].alpha != 0.0f ) {
in = !in;
}
// Need to mark exit points also
if( subj[i].alpha != 0.0f && in == false )
subj[i].internal = !in;
else
subj[i].internal = in;
i = subj[i].next;
}
}
//
// Combine the points and arrange the intersection points by alpha value.
// Note!!! - This function needs to be called by a single thread. There's
// probably a better way to do this...//
//
__device__ void CombinePoints(vertex *newArray, vertex *srcArray,
int srcSize, int &newSize)
{
int index = 0, srcIdx = 0, next, prev, base = 0;
newSize = 0;
// Hold on to index of last non-intersect point... Place new point at
// the end of the array, Adjust next index of intersection points.
while( srcIdx < srcSize ) {
if( srcArray[srcIdx].x != -1 ) {
// Copy to new array in order.
newArray[index] = srcArray[srcIdx++];
newArray[index].next = index + 1;
newSize++;
if( newArray[index].alpha != 0 ) {
// Intersection point, set index to be in alpha order.
// Start at base and insert new point in appropriate spot.
next = newArray[base].next;
prev = base;
while( next < index ) {
if( newArray[index].alpha < newArray[next].alpha ) {
newArray[prev].next = index;
newArray[index].next = next;
newArray[index - 1].next = index + 1;
break;
}
prev = next;
next = newArray[next].next;
}
} else {
// Not an intersection point, use as next base
base = index;
}
index++;
} else
srcIdx++;
}
}
//
// Adjusts the link to place intersection points in the proper order. Also
// skip over empty vertices. Changed to this method so that the linkTag will
// refer to the proper vertex of the other polygon when extracting the intersecting
// or union polygon.
//
// NOTE! This function needs to be called with a single thread
//
void __device__ LinkPoints(vertex *verts, int size, int stride)
{
int index = 0, next, prev, base = 0, last = 0;
// Hold on to index of last non-intersect point... Insert index of
// intersection point in the proper place...
while(index < size ) {
if( verts[index].x != -1 ) {
verts[last].next = index;
if( verts[index].alpha != 0 ) {
// Intersection point, adjust next pointer
next = verts[base].next;
prev = base;
while( next < index ) {
if( verts[index].alpha < verts[next].alpha ) {
verts[prev].next = index;
verts[index].next = next;
break;
}
prev = next;
next = verts[next].next;
}
if( next >= index )
last = index;
} else {
// Not an intersection point, use as base.
base = index;
last = index;
}
}
index++;
}
verts[size - 1].next = size;
}
//
// Calculates the euclidean distance between (x1, y1) and
// (x2, y2)
//
//
__device__ float Dist(float x1, float y1, float x2, float y2)
{
return sqrtf((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
}
//
// Calculate the intersection between the two line segments p and q.
// The alpha value is a measure between 0 and 1 that indicated the
// distance from the respective point. Used to order the intersection
// points.
// Note! - This function is called by only one thread.
//
//
__device__ void Intersect(vertex *p1, vertex *p2, vertex *q1, vertex *q2,
float *xInt, float *yInt, float *alphaP, float *alphaQ)
{
float det;
*yInt = *xInt = *alphaP = *alphaQ = -1.0f;
// Check if lines are parallel
det = (p2->x - p1->x) * (q2->y - q1->y) -
(p2->y - p1->y) * (q2->x - q1->x);
if( det != 0 ) {
// Lines are not parallel
float tp, tq;
// Check if the segments actually intersect
tp = ((q1->x - p1->x) * (q2->y - q1->y) - (q1->y - p1->y) * (q2->x - q1->x)) / det;
tq = ((p2->y - p1->y) * (q1->x - p1->x) - (p2->x - p1->x) * (q1->y - p1->y)) / det;
if( tp >= 0 && tp <= 1 && tq >= 0 && tq <= 1 ) {
// Line segments intersect
// Calculate the actual intersection
*xInt = p1->x + tp * (p2->x - p1->x);
*yInt = p1->y + tp * (p2->y - p1->y);
*alphaP = Dist(p1->x, p1->y, *xInt, *yInt) / Dist(p1->x, p1->y, p2->x, p2->y);
*alphaQ = Dist(q1->x, q1->y, *xInt, *yInt) / Dist(q1->x, q1->y, q2->x, q2->y);
}
}
}
//
// Calculates the intersection, if it exists, of each pair of edges from
// clip and subj. Returns two polygons consisting of the original vertices
// and the intersection points.
//
//
__global__ void CalcIntersections(vertex *clip, int clipSize, vertex *subj, int subjSize,
vertex *newClip, vertex *newSubj, int *newPolySizes)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x,
subjStart, clipStart, index;
float xInt, yInt, alphaSubj, alphaClip;
// Calculate the intersections between clip and subj polygons
while( tid < (subjSize * clipSize) ) {
if( (tid % clipSize) == 0 ) {
newSubj[tid] = subj[tid / clipSize];
} else {
subjStart = tid / clipSize;
clipStart = ((tid % clipSize) - 1);
index = ( clipStart * subjSize) + (tid / clipSize) + 1;
Intersect(&subj[subjStart], &subj[subjStart + 1],
&clip[clipStart], &clip[clipStart + 1],
&xInt, &yInt, &alphaSubj, &alphaClip);
newSubj[tid].x = xInt;
newSubj[tid].y = yInt;
newSubj[tid].next = -1;
newSubj[tid].alpha = alphaSubj;
newClip[index].x = xInt;
newClip[index].y = yInt;
newClip[index].next = -1;
newClip[index].alpha = alphaClip;
// Link the the two polygons at the intersection
if( xInt != -1 ) {
newSubj[tid].linkTag = index;
newClip[index].linkTag = tid;
}
}
if( (tid % subjSize) == 0 ) {
newClip[tid] = clip[tid / subjSize];
}
tid += blockDim.x * gridDim.x;
}
tid = threadIdx.x + blockIdx.x * blockDim.x;
// Cleanup and order the intersections
if( tid == 0 ) {
//CombinePoints(newClip, newClip, ((clipSize - 1) * subjSize) + 1, newPolySizes[0]);
newPolySizes[CLIP] = ((clipSize - 1) * subjSize) + 1;
LinkPoints(newClip, newPolySizes[CLIP], subjSize);
}
if( tid == 1 ) {
//CombinePoints(newSubj, newSubj, ((subjSize - 1) * clipSize) + 1, newPolySizes[1]);
newPolySizes[SUBJ] = ((subjSize - 1) * clipSize) + 1;
LinkPoints(newSubj, newPolySizes[SUBJ], clipSize);
}
if( tid == 0 ) {
MarkEntry(newClip, newPolySizes[CLIP], newSubj, newPolySizes[SUBJ]);
}
if( tid == 1 ) {
MarkEntry(newSubj, newPolySizes[SUBJ], newClip, newPolySizes[CLIP]);
}
}
//
// Allocates device memory for the 4 polygons. Clip, Subj
// clip w/ intersections, subj w/ intersections.
// e.g. |clip| = m , |subj|= n , |intClip| = n*m , |intSubj| = n*m
bool AllocateDevMem(int *&size, vertex *&clip, vertex *&subj, vertex *&intClip, vertex *&intSubj,
int clipSize, int subjSize)
{
bool result = true;
cudaError_t devResult;
devResult = cudaMalloc((void**)&clip, clipSize * sizeof(vertex));
if( devResult != cudaSuccess ) {
result = false;
}
if( result ) {
devResult = cudaMalloc((void**)&subj, subjSize * sizeof(vertex));
if( devResult != cudaSuccess ) {
result = false;
}
}
if( result ) {
devResult = cudaMalloc((void**)&intSubj, subjSize * clipSize * sizeof(vertex));
if( devResult != cudaSuccess ) {
result = false;
}
}
if( result ) {
devResult = cudaMalloc((void**)&intClip, subjSize * clipSize * sizeof(vertex));
if( devResult != cudaSuccess ) {
result = false;
}
}
if( result ) {
devResult = cudaMalloc((void**)&size, 2 * sizeof(int));
if( devResult != cudaSuccess ) {
result = false;
}
}
return result;
}
bool FreeDevMem(vertex **clip, vertex **subj, vertex **intClip, vertex **intSubj)
{
cudaFree(*clip);
*clip = NULL;
cudaFree(*subj);
*subj = NULL;
cudaFree(*intClip);
*intClip = NULL;
cudaFree(*intSubj);
*intSubj = NULL;
return true;
}
// Allocate host memory for the two polygon
// |clip| = n*m |subj| = n*m
bool AllocateIntBuffers(vertex **clip, vertex **subj, int clipSize, int subjSize)
{
bool result = true;
*clip = (vertex*)malloc(clipSize * subjSize * sizeof(vertex));
if( *clip == NULL ) {
result = false;
}
if( result ) {
*subj = (vertex*)malloc(clipSize * subjSize * sizeof(vertex));
if( *subj == NULL ) {
result = false;
}
}
return result;
}
bool CopyData(vertex *dest, vertex *src, int size, enum cudaMemcpyKind dir)
{
bool result = true;
cudaError_t devResult;
devResult = cudaMemcpy(dest, src, size, dir);
if( devResult != cudaSuccess )
result = false;
return result;
}
//
// Adds a vertex to the specified polygon by reallocating the
// buffer. May want to change this to work with chunks of memory
// rather than a single vertex at a time.
//
bool AddVertex(vertex *&poly, int& size, vertex *vert)
{
bool result = true;
vertex *temp = (vertex*)realloc(poly, (size + 1) * sizeof(vertex));
if( temp != NULL ) {
poly = temp;
poly[size++] = *vert;
} else {
result = false;
}
return result;
}
//
// Builds the intersection polygon by tracing the subject
// polygon from the first intersection point. When the next
// vertex is not internal we jump to the corresponding vertex
// in the clip polygon and continue form there. This pattern is
// continued (back and forth between clip & subj for internal
// vertices) until we reach the starting vertex.
//
//
bool BuildIntPoly(vertex *&intPoly, int& intPolySize,
vertex *subj, int subjSize,
vertex *clip, int clipSize)
{
bool result = true, inSubj = true;
vertex *curVert = subj, *firstVert;
// int i = 0 ;
// Find first intersection point in the subj poly
while( curVert->internal == false ) {
curVert = &subj[curVert->next];
printf("idx = %d\n",curVert->next);
}
firstVert = curVert;
printf("(%f,%f)\n", curVert->x, curVert->y);
if( AddVertex(intPoly, intPolySize, curVert) ) {
do {
if( inSubj ) {
if( subj[curVert->next].internal ) {
curVert = &subj[curVert->next];
} else {
curVert = &clip[clip[curVert->linkTag].next];
inSubj = false;
}
} else {
if( clip[curVert->next].internal ) {
curVert = &clip[curVert->next];
} else {
curVert = &subj[subj[curVert->linkTag].next];
inSubj = true;
}
}
if( !AddVertex(intPoly, intPolySize, curVert) ) {
result = false;
break;
}
printf("(%f,%f)\n", curVert->x, curVert->y);
} while( !(curVert->x == firstVert->x &&
curVert->y == firstVert->y) );
} else {
result = false;
}
return result;
}
//
// Builds the union polygon similar to the way the
// intersection polygon is built. Though instead of
// looking for internal vertices, we want external ones.
//
//
bool BuildUnionPoly(vertex *&unionPoly, int& unionPolySize,
vertex *subj, int subjSize,
vertex *clip, int clipSize)
{
bool result = true, inSubj = true;
vertex *curVert = subj, *firstVert;
// Find first intersection point in the subj poly
while( curVert->internal == false ) {
curVert = &subj[curVert->next];
}
firstVert = curVert;
if( AddVertex(unionPoly, unionPolySize, curVert) ) {
do {
if( inSubj ) {
if( !subj[curVert->next].internal ) {
curVert = &subj[curVert->next];
} else {
curVert = &clip[clip[curVert->linkTag].next];
inSubj = false;
}
} else {
if( !clip[curVert->next].internal ) {
curVert = &clip[curVert->next];
} else {
curVert = &subj[subj[curVert->linkTag].next];
inSubj = true;
}
}
if( !AddVertex(unionPoly, unionPolySize, curVert) ) {
result = false;
break;
}
} while( !(curVert->x == firstVert->x &&
curVert->y == firstVert->y) );
} else {
result = false;
}
return result;
}
//
// Calculate the intersection polygon
//
//
int gpuIntersect(VERTEX *subj, int subjSize, VERTEX *clip, int clipSize,
VERTEX **intPoly, int *intPolySize)
{
int result = 1;
vertex *devSubj, *devClip, *devIntSubj, *devIntClip,
*intClip, *intSubj;
int *devSize, size[2];
cudaError_t devResult;
fprintf(stderr,"AllocateDevMem-----");
result = AllocateDevMem(devSize, devClip, devSubj, devIntClip, devIntSubj, clipSize, subjSize);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"AllocateIntBuffers-----");
if( result )
result = AllocateIntBuffers(&intClip, &intSubj, clipSize, subjSize);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"CopyData subj-----");
if( result )
result = CopyData(devSubj, subj, subjSize * sizeof(vertex), cudaMemcpyHostToDevice);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"CopyData clip-----");
if( result )
result = CopyData(devClip, clip, clipSize * sizeof(vertex), cudaMemcpyHostToDevice);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"CalcIntersection-----");
// Calulate the intersection points.
if( result ) {
CalcIntersections<<<1, 128>>>(devClip, clipSize, devSubj, subjSize,
devIntClip, devIntSubj, devSize);
devResult = cudaMemcpy(size, devSize, 2 * sizeof(int), cudaMemcpyDeviceToHost);
if( devResult != cudaSuccess ) {
result = 0;
}
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
} else
if( result )
result = CopyData(intSubj, devIntSubj, clipSize * subjSize * sizeof(vertex), cudaMemcpyDeviceToHost);
if( result )
result = CopyData(intClip, devIntClip, clipSize * subjSize * sizeof(vertex), cudaMemcpyDeviceToHost);
for (int i =0 ; i< clipSize * subjSize; i++)
fprintf(stderr,"(%d,%d)\n",(int)intSubj[i].x,(int)intSubj[i].y);
fprintf(stderr,"\n");
for (int i =0 ; i< clipSize * subjSize; i++)
fprintf(stderr,"(%d,%d)\n",(int)intClip[i].x,(int)intClip[i].y);
exit(1);
fprintf(stderr,"BuildIntPoly-----");
if( result ) {
result = BuildIntPoly(*intPoly, *intPolySize, intSubj, size[SUBJ], intClip, size[CLIP]);
}
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
fprintf(stderr,"FreeDevMem-----");
result = FreeDevMem(&devClip, &devSubj, &devIntClip, &devIntSubj);
result ? fprintf(stderr,"success.\n"): fprintf(stderr,"fail\n");
return result;
}
//
// Calculate the union polygon
//
//
int gpuUnion(VERTEX *subj, int subjSize, VERTEX *clip, int clipSize,
VERTEX **intPoly, int *intPolySize)
{
int result = 1;
vertex *devSubj, *devClip, *devIntSubj, *devIntClip,
*intClip, *intSubj;
int *devSize, size[2];
cudaError_t devResult;
result = AllocateDevMem(devSize, devClip, devSubj, devIntClip, devIntSubj, clipSize, subjSize);
if( result )
result = AllocateIntBuffers(&intClip, &intSubj, clipSize, subjSize);
if( result )
result = CopyData(devSubj, subj, subjSize * sizeof(vertex), cudaMemcpyHostToDevice);
if( result )
result = CopyData(devClip, clip, clipSize * sizeof(vertex), cudaMemcpyHostToDevice);
// Calulate the intersection points.
if( result ) {
CalcIntersections<<<1, 128>>>(devClip, clipSize, devSubj, subjSize,
devIntClip, devIntSubj, devSize);
devResult = cudaMemcpy(size, devSize, 2 * sizeof(int), cudaMemcpyDeviceToHost);
if( devResult != cudaSuccess ) {
result = 0;
}
} else
if( result )
result = CopyData(intSubj, devIntSubj, clipSize * subjSize * sizeof(vertex), cudaMemcpyDeviceToHost);
if( result )
result = CopyData(intClip, devIntClip, clipSize * subjSize * sizeof(vertex), cudaMemcpyDeviceToHost);
if( result ) {
result = BuildUnionPoly(*intPoly, *intPolySize, intSubj, size[SUBJ], intClip, size[CLIP]);
}
result = FreeDevMem(&devClip, &devSubj, &devIntClip, &devIntSubj);
return result;
}
|
d68ffccab17e601de3e3e877f2e05cb7af364f32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void get_sort_keys( unsigned char* d_hashes, uint32_t *d_sort_keys, uint32_t *d_sort_indices, uint32_t r, uint32_t num_keys)
{
uint32_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < num_keys) {
d_sort_indices[index] = index;
unsigned char* input = d_hashes+index*30*sizeof(unsigned char)+3*r;
uint32_t sort_key = input[0] << 16 | input[1] << 8 | input[2];
d_sort_keys[index] = sort_key;
}
} | d68ffccab17e601de3e3e877f2e05cb7af364f32.cu | #include "includes.h"
__global__ void get_sort_keys( unsigned char* d_hashes, uint32_t *d_sort_keys, uint32_t *d_sort_indices, uint32_t r, uint32_t num_keys)
{
uint32_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < num_keys) {
d_sort_indices[index] = index;
unsigned char* input = d_hashes+index*30*sizeof(unsigned char)+3*r;
uint32_t sort_key = input[0] << 16 | input[1] << 8 | input[2];
d_sort_keys[index] = sort_key;
}
} |
157d7c5a605c85bbcd812070aa8f713b0b22ba01.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "binomialOptions.h"
#include "realtype.h"
//Preprocessed input option data
typedef struct
{
real S;
real X;
real vDt;
real puByDf;
real pdByDf;
} __TOptionData;
// Overloaded shortcut functions for different precision modes
#ifndef DOUBLE_PRECISION
__device__ inline float expiryCallValue(float S, float X, float vDt, int i)
{
float d = S * __expf(vDt * (2.0f * i - NUM_STEPS)) - X;
return (d > 0.0F) ? d : 0.0F;
}
#else
__device__ inline double expiryCallValue(double S, double X, double vDt, int i)
{
double d = S * exp(vDt * (2.0 * i - NUM_STEPS)) - X;
return (d > 0.0) ? d : 0.0;
}
#endif
// GPU kernel
#define THREADBLOCK_SIZE 128
#define ELEMS_PER_THREAD (NUM_STEPS/THREADBLOCK_SIZE)
#if NUM_STEPS % THREADBLOCK_SIZE
#error Bad constants
#endif
__global__ void binomialOptionsKernel(const __TOptionData *__restrict d_OptionData,
real *__restrict d_CallValue)
{
__shared__ real call_exchange[THREADBLOCK_SIZE + 1];
const int tid = threadIdx.x;
const real S = d_OptionData[blockIdx.x].S;
const real X = d_OptionData[blockIdx.x].X;
const real vDt = d_OptionData[blockIdx.x].vDt;
const real puByDf = d_OptionData[blockIdx.x].puByDf;
const real pdByDf = d_OptionData[blockIdx.x].pdByDf;
real call[ELEMS_PER_THREAD + 1];
#pragma unroll
for(int i = 0; i < ELEMS_PER_THREAD; ++i)
call[i] = expiryCallValue(S, X, vDt, tid * ELEMS_PER_THREAD + i);
if (tid == 0)
call_exchange[THREADBLOCK_SIZE] = expiryCallValue(S, X, vDt, NUM_STEPS);
int final_it = max(0, tid * ELEMS_PER_THREAD - 1);
#pragma unroll 16
for(int i = NUM_STEPS; i > 0; --i)
{
call_exchange[tid] = call[0];
__syncthreads();
call[ELEMS_PER_THREAD] = call_exchange[tid + 1];
__syncthreads();
if (i > final_it)
{
#pragma unroll
for(int j = 0; j < ELEMS_PER_THREAD; ++j)
call[j] = puByDf * call[j + 1] + pdByDf * call[j];
}
}
if (tid == 0)
{
d_CallValue[blockIdx.x] = call[0];
}
}
// Host-side interface to GPU binomialOptions
extern "C" void binomialOptionsGPU(
real *callValue,
TOptionData *optionData,
int optN,
int numIterations
)
{
__TOptionData h_OptionData[MAX_OPTIONS];
for (int i = 0; i < optN; i++)
{
const real T = optionData[i].T;
const real R = optionData[i].R;
const real V = optionData[i].V;
const real dt = T / (real)NUM_STEPS;
const real vDt = V * sqrt(dt);
const real rDt = R * dt;
//Per-step interest and discount factors
const real If = exp(rDt);
const real Df = exp(-rDt);
//Values and pseudoprobabilities of upward and downward moves
const real u = exp(vDt);
const real d = exp(-vDt);
const real pu = (If - d) / (u - d);
const real pd = (real)1.0 - pu;
const real puByDf = pu * Df;
const real pdByDf = pd * Df;
h_OptionData[i].S = (real)optionData[i].S;
h_OptionData[i].X = (real)optionData[i].X;
h_OptionData[i].vDt = (real)vDt;
h_OptionData[i].puByDf = (real)puByDf;
h_OptionData[i].pdByDf = (real)pdByDf;
}
__TOptionData *d_OptionData;
hipMalloc ((void**)&d_OptionData, sizeof(__TOptionData) * MAX_OPTIONS);
hipMemcpy(d_OptionData, h_OptionData, optN * sizeof(__TOptionData), hipMemcpyHostToDevice);
real *d_CallValue;
hipMalloc ((void**)&d_CallValue, sizeof(real) * MAX_OPTIONS);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numIterations; i++)
hipLaunchKernelGGL(( binomialOptionsKernel), dim3(optN), dim3(THREADBLOCK_SIZE), 0, 0, d_OptionData, d_CallValue);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time : %f (us)\n", time * 1e-3f / numIterations);
hipMemcpy(callValue, d_CallValue, optN *sizeof(real), hipMemcpyDeviceToHost);
hipFree(d_OptionData);
hipFree(d_CallValue);
}
| 157d7c5a605c85bbcd812070aa8f713b0b22ba01.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <cuda.h>
#include "binomialOptions.h"
#include "realtype.h"
//Preprocessed input option data
typedef struct
{
real S;
real X;
real vDt;
real puByDf;
real pdByDf;
} __TOptionData;
// Overloaded shortcut functions for different precision modes
#ifndef DOUBLE_PRECISION
__device__ inline float expiryCallValue(float S, float X, float vDt, int i)
{
float d = S * __expf(vDt * (2.0f * i - NUM_STEPS)) - X;
return (d > 0.0F) ? d : 0.0F;
}
#else
__device__ inline double expiryCallValue(double S, double X, double vDt, int i)
{
double d = S * exp(vDt * (2.0 * i - NUM_STEPS)) - X;
return (d > 0.0) ? d : 0.0;
}
#endif
// GPU kernel
#define THREADBLOCK_SIZE 128
#define ELEMS_PER_THREAD (NUM_STEPS/THREADBLOCK_SIZE)
#if NUM_STEPS % THREADBLOCK_SIZE
#error Bad constants
#endif
__global__ void binomialOptionsKernel(const __TOptionData *__restrict d_OptionData,
real *__restrict d_CallValue)
{
__shared__ real call_exchange[THREADBLOCK_SIZE + 1];
const int tid = threadIdx.x;
const real S = d_OptionData[blockIdx.x].S;
const real X = d_OptionData[blockIdx.x].X;
const real vDt = d_OptionData[blockIdx.x].vDt;
const real puByDf = d_OptionData[blockIdx.x].puByDf;
const real pdByDf = d_OptionData[blockIdx.x].pdByDf;
real call[ELEMS_PER_THREAD + 1];
#pragma unroll
for(int i = 0; i < ELEMS_PER_THREAD; ++i)
call[i] = expiryCallValue(S, X, vDt, tid * ELEMS_PER_THREAD + i);
if (tid == 0)
call_exchange[THREADBLOCK_SIZE] = expiryCallValue(S, X, vDt, NUM_STEPS);
int final_it = max(0, tid * ELEMS_PER_THREAD - 1);
#pragma unroll 16
for(int i = NUM_STEPS; i > 0; --i)
{
call_exchange[tid] = call[0];
__syncthreads();
call[ELEMS_PER_THREAD] = call_exchange[tid + 1];
__syncthreads();
if (i > final_it)
{
#pragma unroll
for(int j = 0; j < ELEMS_PER_THREAD; ++j)
call[j] = puByDf * call[j + 1] + pdByDf * call[j];
}
}
if (tid == 0)
{
d_CallValue[blockIdx.x] = call[0];
}
}
// Host-side interface to GPU binomialOptions
extern "C" void binomialOptionsGPU(
real *callValue,
TOptionData *optionData,
int optN,
int numIterations
)
{
__TOptionData h_OptionData[MAX_OPTIONS];
for (int i = 0; i < optN; i++)
{
const real T = optionData[i].T;
const real R = optionData[i].R;
const real V = optionData[i].V;
const real dt = T / (real)NUM_STEPS;
const real vDt = V * sqrt(dt);
const real rDt = R * dt;
//Per-step interest and discount factors
const real If = exp(rDt);
const real Df = exp(-rDt);
//Values and pseudoprobabilities of upward and downward moves
const real u = exp(vDt);
const real d = exp(-vDt);
const real pu = (If - d) / (u - d);
const real pd = (real)1.0 - pu;
const real puByDf = pu * Df;
const real pdByDf = pd * Df;
h_OptionData[i].S = (real)optionData[i].S;
h_OptionData[i].X = (real)optionData[i].X;
h_OptionData[i].vDt = (real)vDt;
h_OptionData[i].puByDf = (real)puByDf;
h_OptionData[i].pdByDf = (real)pdByDf;
}
__TOptionData *d_OptionData;
cudaMalloc ((void**)&d_OptionData, sizeof(__TOptionData) * MAX_OPTIONS);
cudaMemcpy(d_OptionData, h_OptionData, optN * sizeof(__TOptionData), cudaMemcpyHostToDevice);
real *d_CallValue;
cudaMalloc ((void**)&d_CallValue, sizeof(real) * MAX_OPTIONS);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numIterations; i++)
binomialOptionsKernel<<<optN, THREADBLOCK_SIZE>>>(d_OptionData, d_CallValue);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time : %f (us)\n", time * 1e-3f / numIterations);
cudaMemcpy(callValue, d_CallValue, optN *sizeof(real), cudaMemcpyDeviceToHost);
cudaFree(d_OptionData);
cudaFree(d_CallValue);
}
|
6730b696ad6cec69431f97586fea92e2561da247.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <bits/stdc++.h>
#include "wb.h"
using namespace std;
#define BLUR_SIZE 5
#define CHANNELS 3
#define CEIL(a, b) ((a-1)/b +1)
__global__ void imageBlur(float *inputImageData, float *outputImageData, int height, int width) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x>=height || y>=width)
return;
for (int channel=0; channel<CHANNELS; channel++) {
float pixVal = 0;
int pixels = 0;
for (int blurrow = -BLUR_SIZE; blurrow <= BLUR_SIZE; ++blurrow) {
for (int blurcol = -BLUR_SIZE; blurcol <= BLUR_SIZE; ++blurcol) {
int currow = x + blurrow;
int curcol = y + blurcol;
if (currow > -1 && currow < height && curcol > -1 && curcol < width) {
pixVal += inputImageData[CHANNELS*(currow * width + curcol) + channel];
pixels++;
}
}
}
outputImageData[CHANNELS*(x * width + y) + channel] = (pixVal / pixels);
}
}
int main(int argc, char *argv[]) {
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
/* parse the input arguments */
wbArg_t args = wbArg_read(argc, argv);
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, CHANNELS);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
// Allocate data
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * CHANNELS * sizeof(float));
hipMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * CHANNELS * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
// Copy data
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * CHANNELS * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
// Kernel call
dim3 block(32, 32, 1);
dim3 grid(CEIL(imageHeight, 32), CEIL(imageWidth, 32), 1);
hipLaunchKernelGGL(( imageBlur) , dim3(grid), dim3(block), 0, 0, deviceInputImageData, deviceOutputImageData,
imageHeight, imageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
// Copy data back
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * CHANNELS * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Check solution
wbSolution(args, outputImage);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
}
| 6730b696ad6cec69431f97586fea92e2561da247.cu | #include <bits/stdc++.h>
#include "wb.h"
using namespace std;
#define BLUR_SIZE 5
#define CHANNELS 3
#define CEIL(a, b) ((a-1)/b +1)
__global__ void imageBlur(float *inputImageData, float *outputImageData, int height, int width) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x>=height || y>=width)
return;
for (int channel=0; channel<CHANNELS; channel++) {
float pixVal = 0;
int pixels = 0;
for (int blurrow = -BLUR_SIZE; blurrow <= BLUR_SIZE; ++blurrow) {
for (int blurcol = -BLUR_SIZE; blurcol <= BLUR_SIZE; ++blurcol) {
int currow = x + blurrow;
int curcol = y + blurcol;
if (currow > -1 && currow < height && curcol > -1 && curcol < width) {
pixVal += inputImageData[CHANNELS*(currow * width + curcol) + channel];
pixels++;
}
}
}
outputImageData[CHANNELS*(x * width + y) + channel] = (pixVal / pixels);
}
}
int main(int argc, char *argv[]) {
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
/* parse the input arguments */
wbArg_t args = wbArg_read(argc, argv);
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, CHANNELS);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
// Allocate data
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * CHANNELS * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * CHANNELS * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
// Copy data
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * CHANNELS * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
// Kernel call
dim3 block(32, 32, 1);
dim3 grid(CEIL(imageHeight, 32), CEIL(imageWidth, 32), 1);
imageBlur <<<grid, block>>> (deviceInputImageData, deviceOutputImageData,
imageHeight, imageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
// Copy data back
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * CHANNELS * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
// Check solution
wbSolution(args, outputImage);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
}
|
776ea438d3b6be9b92ca874b0ea89696cdc81c91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
#include <math.h>
#include <assert.h>
#include <xmmintrin.h>
#include <immintrin.h>
#include "cudnn.h"
#include "util.h"
#include "Kernel256_winograd.h"
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d:'%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
#define MY_KERNEL 0
#define d(input, i, j, Inz) ( input[Inz + i*768 + (j<<7)] )
__global__ void kernel_256_winograd_BtdB(float *pInputs, float *pOutputs) {
int Inx = blockIdx.x<<2, Iny0 = blockIdx.y<<2, Part = blockIdx.z, Iny1 = threadIdx.y, Inz = threadIdx.x;
int Iny = Iny0+Iny1, stride_r = 4096, stride_c = 256; // 4096 = 16*256
int c_glb_start = Inx*stride_r + Iny*stride_c + Inz + (Part<<7), c_input = Iny1*128 + Inz;
extern __shared__ float input[];
int stride_768[6] = {0, 768, 1536, 2304, 3072, 3840}; // 768 = 6*128
for (int i = 0; i < 6; i++) {
input[c_input + stride_768[i]] = pInputs[c_glb_start + i*stride_r];
}
__syncthreads();
float BTd[6];
switch(Iny1) {
case 0:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 0, j, Inz)*4 - d(input, 2, j, Inz)*5 + d(input, 4, j, Inz);
}
break;
case 1:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 + d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 2:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 - d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 3:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) + d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 4:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) - d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 5:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 3, j, Inz)*5 + d(input, 5, j, Inz);
}
break;
}
__syncthreads();
int tmp_offset = Iny1*768+Inz;
for (int i = 0; i < 6; i++) {
input[tmp_offset + i*128] = BTd[i];
}
__syncthreads();
float BTdB[6];
switch(Iny1) {
case 0:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 0, Inz) - 5*d(input, i, 2, Inz) + d(input, i, 4, Inz);
}
break;
case 1:
for (int i = 0; i < 6; i++) {
BTdB[i] = -4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) + d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 2:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) - d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 3:
for (int i = 0; i < 6; i++) {
BTdB[i] = -2*d(input, i, 1, Inz) - d(input, i, 2, Inz) + 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 4:
for (int i = 0; i < 6; i++) {
BTdB[i] = 2*d(input, i, 1, Inz) - d(input, i, 2, Inz) - 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 5:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 5*d(input, i, 3, Inz) + d(input, i, 5, Inz);
}
break;
}
__syncthreads();
for (int i = 0; i < 6; i++) {
pOutputs[(Iny1 + i*6)*4096 + (blockIdx.x*4+blockIdx.y)*256 + Inz + (Part<<7)] = BTdB[i];
}
}
__global__ void kernel_256_winograd_AtIA(float *pInputs, float *pBiases, float *pScales, float *pOutputs) {
int Tilex = blockIdx.x, Tiley = blockIdx.y, Iny = threadIdx.y, kz = blockIdx.z, Inx = threadIdx.x;
int c_input = Inx*6 + Iny;
__shared__ float bias, scale;
extern __shared__ float input[];
input[c_input] = pInputs[c_input*16*256 + (Tilex*4+Tiley)*256 + kz];
bias = pBiases[kz];
scale = pScales[kz];
__syncthreads();
float tmp = 0;
switch(Inx) {
case 0:
tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny];
break;
case 1:
tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny];
break;
case 2:
tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny];
break;
case 3:
tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny];
break;
}
__syncthreads();
input[c_input] = tmp;
__syncthreads();
if (Inx > 3 || (Tilex == 3 && Inx > 1)) return;
int x;
float o;
switch(Iny) {
case 0:
x = Inx*6;
o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*256 + kz] = o > 0 ? o : 0;
break;
case 1:
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*256 + kz] = o > 0 ? o : 0;
break;
case 2:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*256 + kz] = o > 0 ? o : 0;
break;
case 3:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*256 + kz] = o > 0 ? o : 0;
break;
}
}
__global__ void kernel_256_OuterProduct_256(float *A, float *B, float *C) {
int Tile = blockIdx.x, Part = blockIdx.y, tX = threadIdx.x, tY = threadIdx.y;
int c_input = tY*256 + tX, c_kernel = c_input, T_offset = (Tile<<12) + (Part<<11) + c_input, B_offset = (Tile<<16) + c_kernel;
extern __shared__ float input[];
float *kernel = input + 2048, *out = kernel + 8192;
int B_stride[32] = {0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4096, 4352, 4608, 4864, 5120, 5376, 5632, 5888, 6144, 6400, 6656, 6912, 7168, 7424, 7680, 7936};
out[c_input] = 0.0f;
out[c_input+1024] = 0;
input[c_input] = A[T_offset];
input[c_input+1024] = A[T_offset+1024];
for (int k = 0; k < 8; k++) {
int B_start = B_offset + (k<<13); // 32*64
kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024];
kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072];
kernel[c_kernel+4096] = B[B_start+4096], kernel[c_kernel+5120] = B[B_start+5120];
kernel[c_kernel+6144] = B[B_start+6144], kernel[c_kernel+7168] = B[B_start+7168];
__syncthreads();
float sum = 0, sum1 = 0;
int y_tmp = (tY<<8)+(k<<5), y_tmp1 = y_tmp+1024;
for (int j = 0; j < 32; j++) {
sum += input[y_tmp + j] * kernel[tX + B_stride[j]];
sum1 += input[y_tmp1 + j] * kernel[tX + B_stride[j]];
}
out[c_input] += sum;
out[c_input+1024] += sum1;
__syncthreads();
}
C[T_offset] = out[c_input];
C[T_offset+1024] = out[c_input+1024];
}
int kernel_256() {
float *input_ = get_parameter(inputName256, 16*16*256);
float *bias = get_parameter(biasName256, 256);
float *input, *output, *l_weights, *l_bias;
uint64_t nT1 = 0, nT2 = 0, nT1_cudnn = 0, nT2_cudnn = 0;
hipError_t s;
/////////////////////////////////
// My Kernel
/////////////////////////////////
float *kernel = get_parameter(weight_winograd_Name256, 36*256*256), *t_input, *ip;
int nInput = 16*16*256, nOutput = 16*16*256, nWeights = 36*256*256, nBias = 256, nTransInput = 16*6*6*256, nInnerProd = 16*6*6*256;
float *l_bnBias, *l_bnScale, *bnBias, *bnScale;
hipMalloc((void **) &input, nInput<<3);
hipMalloc((void **) &output, nOutput<<2);
hipMalloc((void **) &l_weights, nWeights<<2);
hipMalloc((void **) &l_bias, nBias<<2);
hipMalloc((void **) &t_input, nTransInput<<2);
hipMalloc((void **) &ip, nInnerProd<<2);
hipMemset((void *) input, 0, nInput<<3);
hipMemset((void *) output, 0, nOutput<<2);
hipMemset((void *) t_input, 0, nTransInput<<2);
hipMemset((void *) l_weights, 0, nWeights<<2);
hipMemset((void *) ip, 0, nInnerProd<<2);
hipMemcpy(input, input_, nInput<<2, hipMemcpyHostToDevice);
hipMemcpy(l_weights, kernel, nWeights<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bias, bias, nBias<<2, hipMemcpyHostToDevice);
bnBias = get_parameter(bnBias_winograd_Name256, 256);
bnScale = get_parameter(bnScale_winograd_Name256, 256);
hipMalloc((void **) &l_bnBias, nBias<<2);
hipMalloc((void **) &l_bnScale, nBias<<2);
hipMemcpy(l_bnBias, bnBias, nBias<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnScale, bnScale, nBias<<2, hipMemcpyHostToDevice);
float tmp[nOutput];
nT1 = getTimeMicroseconds64();
hipLaunchKernelGGL(( kernel_256_winograd_BtdB) , dim3(dim3(4, 4, 2)), dim3(dim3(128, 6)), (6*6*128)<<2 , 0, 0, input, t_input);
hipLaunchKernelGGL(( kernel_256_OuterProduct_256), dim3(dim3(36, 2)), dim3(dim3(256, 4)), (8*256 + 32*256 + 8*256)<<2 , 0, 0, t_input, l_weights, ip);
hipLaunchKernelGGL(( kernel_256_winograd_AtIA) , dim3(dim3(4, 4, 256)), dim3(dim3(6, 6)), ((6*6)<<2), 0, 0, ip, l_bnBias, l_bnScale, output);
//cudaCheckError();
hipDeviceSynchronize();
nT2 = getTimeMicroseconds64();
printf("TotalTime = %d us\n", nT2-nT1);
s = hipMemcpy(tmp, output, nOutput<<2, hipMemcpyDeviceToHost);
printf("%s\n", hipGetErrorName(s));
//cudaCheckError();
hipFree(t_input);
hipFree(output);
hipFree(l_weights);
hipFree(l_bias);
hipFree(ip);
free(kernel);
free(bnScale);
free(bnBias);
/////////////////////////////////
// cuDNN
/////////////////////////////////
kernel = get_parameter(weight_NCHW_Name256, 9*256*256);
bnBias = get_parameter(bnBiasName256, 256);
bnScale = get_parameter(bnScaleName256, 256);
float* eMean = get_parameter(eMeanName256, 256);
float* eVar = get_parameter(eVarName256, 256);
float *l_eMean, *l_eVar;
nInput = 16*16*256, nOutput = 14*14*256, nWeights = 3*3*256*256, nBias = 256;
hipMalloc((void **) &output, nOutput<<2);
hipMalloc((void **) &l_weights, nWeights<<2);
hipMalloc((void **) &l_bias, nBias<<2);
hipMemcpy(l_weights, kernel, nWeights<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bias, bias, nBias<<2, hipMemcpyHostToDevice);
hipMalloc((void **) &l_eMean, nBias<<2);
hipMalloc((void **) &l_eVar, nBias<<2);
hipMemcpy(l_bnBias, bnBias, nBias<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnScale, bnScale, nBias<<2, hipMemcpyHostToDevice);
hipMemcpy(l_eMean, eMean, nBias<<2, hipMemcpyHostToDevice);
hipMemcpy(l_eVar, eVar, nBias<<2, hipMemcpyHostToDevice);
hipMemset((void *) output, 0, nOutput<<2);
float tmp_cudnn[nOutput];
cudnnStatus_t status;
float one = 1.0, zero = 0.0;
int size;
cudnnHandle_t handle;
status = cudnnCreate(&handle);
if (status != CUDNN_STATUS_SUCCESS) printf("failed1\n");
cudnnTensorDescriptor_t xdesc, ydesc, bdesc;
cudnnFilterDescriptor_t wdesc; // CUDNN_TENSOR_NHWC, CUDNN_TENSOR_NCHW
status = cudnnCreateTensorDescriptor(&xdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed2\n");
status = cudnnSetTensor4dDescriptor(xdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 16, 16);
if (status != CUDNN_STATUS_SUCCESS) printf("failed3\n");
status = cudnnCreateTensorDescriptor(&ydesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed4\n");
status = cudnnSetTensor4dDescriptor(ydesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed5\n");
status = cudnnCreateFilterDescriptor(&wdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed6\n");
status = cudnnSetFilter4dDescriptor(wdesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3);
if (status != CUDNN_STATUS_SUCCESS) printf("failed7\n");
status = cudnnCreateTensorDescriptor(&bdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed8\n");
status = cudnnSetTensor4dDescriptor(bdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed9\n");
cudnnConvolutionDescriptor_t conv_desc;
status = cudnnCreateConvolutionDescriptor(&conv_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed10\n");
status = cudnnSetConvolution2dDescriptor(conv_desc, 0,0, 1,1,1,1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); //CUDNN_CONVOLUTION
if (status != CUDNN_STATUS_SUCCESS) printf("failed11\n");
cudnnActivationDescriptor_t act_desc;
status = cudnnCreateActivationDescriptor(&act_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed12\n");
status = cudnnSetActivationDescriptor(act_desc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0);
if (status != CUDNN_STATUS_SUCCESS) printf("failed13\n");
cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc;
status = cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed14\n");
status = cudnnSetTensor4dDescriptor(bnScaleBiasMeanVarDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed15\n");
cudnnConvolutionFwdAlgo_t algo = (cudnnConvolutionFwdAlgo_t)6;
status = cudnnGetConvolutionForwardWorkspaceSize(handle,
xdesc,
wdesc,
conv_desc,
ydesc,
algo,
(size_t *)&(size));
float *extra;
hipMalloc((void **) &extra, size);
nT1_cudnn = getTimeMicroseconds64();
status = cudnnConvolutionForward(handle, &one,
xdesc, input, wdesc, l_weights,
conv_desc, algo,
extra, size, &zero,
ydesc, output);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed1\n");
status = cudnnBatchNormalizationForwardInference(handle, CUDNN_BATCHNORM_SPATIAL,
&one, &zero,
ydesc, output, ydesc, output,
bnScaleBiasMeanVarDesc, l_bnScale, l_bnBias, l_eMean, l_eVar, CUDNN_BN_MIN_EPSILON);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed2\n");
status = cudnnActivationForward(handle, act_desc, &one,
ydesc, output, &zero,
ydesc, output);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed3\n");
hipDeviceSynchronize();
nT2_cudnn = getTimeMicroseconds64();
printf("cuDNN TotalTime = %d us\n", nT2_cudnn-nT1_cudnn);
s = hipMemcpy(tmp_cudnn, output, nOutput<<2, hipMemcpyDeviceToHost);
printf("%s\n", hipGetErrorName(s));
hipFree(extra);
hipFree(input);
hipFree(output);
hipFree(l_weights);
hipFree(l_bias);
hipFree(l_bnScale);
hipFree(l_bnBias);
hipFree(l_eMean);
hipFree(l_eVar);
free(bias);
free(kernel);
free(bnScale);
free(bnBias);
free(eMean);
free(eVar);
free(input_);
output_checker(tmp, tmp_cudnn, 14, 256, 1);
return ((nT2-nT1) << 16) | (nT2_cudnn-nT1_cudnn);
} | 776ea438d3b6be9b92ca874b0ea89696cdc81c91.cu | #include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
#include <math.h>
#include <assert.h>
#include <xmmintrin.h>
#include <immintrin.h>
#include "cudnn.h"
#include "util.h"
#include "Kernel256_winograd.h"
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d:'%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
#define MY_KERNEL 0
#define d(input, i, j, Inz) ( input[Inz + i*768 + (j<<7)] )
__global__ void kernel_256_winograd_BtdB(float *pInputs, float *pOutputs) {
int Inx = blockIdx.x<<2, Iny0 = blockIdx.y<<2, Part = blockIdx.z, Iny1 = threadIdx.y, Inz = threadIdx.x;
int Iny = Iny0+Iny1, stride_r = 4096, stride_c = 256; // 4096 = 16*256
int c_glb_start = Inx*stride_r + Iny*stride_c + Inz + (Part<<7), c_input = Iny1*128 + Inz;
extern __shared__ float input[];
int stride_768[6] = {0, 768, 1536, 2304, 3072, 3840}; // 768 = 6*128
for (int i = 0; i < 6; i++) {
input[c_input + stride_768[i]] = pInputs[c_glb_start + i*stride_r];
}
__syncthreads();
float BTd[6];
switch(Iny1) {
case 0:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 0, j, Inz)*4 - d(input, 2, j, Inz)*5 + d(input, 4, j, Inz);
}
break;
case 1:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 + d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 2:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 - d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 3:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) + d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 4:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) - d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 5:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 3, j, Inz)*5 + d(input, 5, j, Inz);
}
break;
}
__syncthreads();
int tmp_offset = Iny1*768+Inz;
for (int i = 0; i < 6; i++) {
input[tmp_offset + i*128] = BTd[i];
}
__syncthreads();
float BTdB[6];
switch(Iny1) {
case 0:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 0, Inz) - 5*d(input, i, 2, Inz) + d(input, i, 4, Inz);
}
break;
case 1:
for (int i = 0; i < 6; i++) {
BTdB[i] = -4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) + d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 2:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) - d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 3:
for (int i = 0; i < 6; i++) {
BTdB[i] = -2*d(input, i, 1, Inz) - d(input, i, 2, Inz) + 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 4:
for (int i = 0; i < 6; i++) {
BTdB[i] = 2*d(input, i, 1, Inz) - d(input, i, 2, Inz) - 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 5:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 5*d(input, i, 3, Inz) + d(input, i, 5, Inz);
}
break;
}
__syncthreads();
for (int i = 0; i < 6; i++) {
pOutputs[(Iny1 + i*6)*4096 + (blockIdx.x*4+blockIdx.y)*256 + Inz + (Part<<7)] = BTdB[i];
}
}
__global__ void kernel_256_winograd_AtIA(float *pInputs, float *pBiases, float *pScales, float *pOutputs) {
int Tilex = blockIdx.x, Tiley = blockIdx.y, Iny = threadIdx.y, kz = blockIdx.z, Inx = threadIdx.x;
int c_input = Inx*6 + Iny;
__shared__ float bias, scale;
extern __shared__ float input[];
input[c_input] = pInputs[c_input*16*256 + (Tilex*4+Tiley)*256 + kz];
bias = pBiases[kz];
scale = pScales[kz];
__syncthreads();
float tmp = 0;
switch(Inx) {
case 0:
tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny];
break;
case 1:
tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny];
break;
case 2:
tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny];
break;
case 3:
tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny];
break;
}
__syncthreads();
input[c_input] = tmp;
__syncthreads();
if (Inx > 3 || (Tilex == 3 && Inx > 1)) return;
int x;
float o;
switch(Iny) {
case 0:
x = Inx*6;
o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*256 + kz] = o > 0 ? o : 0;
break;
case 1:
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*256 + kz] = o > 0 ? o : 0;
break;
case 2:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*256 + kz] = o > 0 ? o : 0;
break;
case 3:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*256 + kz] = o > 0 ? o : 0;
break;
}
}
__global__ void kernel_256_OuterProduct_256(float *A, float *B, float *C) {
int Tile = blockIdx.x, Part = blockIdx.y, tX = threadIdx.x, tY = threadIdx.y;
int c_input = tY*256 + tX, c_kernel = c_input, T_offset = (Tile<<12) + (Part<<11) + c_input, B_offset = (Tile<<16) + c_kernel;
extern __shared__ float input[];
float *kernel = input + 2048, *out = kernel + 8192;
int B_stride[32] = {0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4096, 4352, 4608, 4864, 5120, 5376, 5632, 5888, 6144, 6400, 6656, 6912, 7168, 7424, 7680, 7936};
out[c_input] = 0.0f;
out[c_input+1024] = 0;
input[c_input] = A[T_offset];
input[c_input+1024] = A[T_offset+1024];
for (int k = 0; k < 8; k++) {
int B_start = B_offset + (k<<13); // 32*64
kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024];
kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072];
kernel[c_kernel+4096] = B[B_start+4096], kernel[c_kernel+5120] = B[B_start+5120];
kernel[c_kernel+6144] = B[B_start+6144], kernel[c_kernel+7168] = B[B_start+7168];
__syncthreads();
float sum = 0, sum1 = 0;
int y_tmp = (tY<<8)+(k<<5), y_tmp1 = y_tmp+1024;
for (int j = 0; j < 32; j++) {
sum += input[y_tmp + j] * kernel[tX + B_stride[j]];
sum1 += input[y_tmp1 + j] * kernel[tX + B_stride[j]];
}
out[c_input] += sum;
out[c_input+1024] += sum1;
__syncthreads();
}
C[T_offset] = out[c_input];
C[T_offset+1024] = out[c_input+1024];
}
int kernel_256() {
float *input_ = get_parameter(inputName256, 16*16*256);
float *bias = get_parameter(biasName256, 256);
float *input, *output, *l_weights, *l_bias;
uint64_t nT1 = 0, nT2 = 0, nT1_cudnn = 0, nT2_cudnn = 0;
cudaError_t s;
/////////////////////////////////
// My Kernel
/////////////////////////////////
float *kernel = get_parameter(weight_winograd_Name256, 36*256*256), *t_input, *ip;
int nInput = 16*16*256, nOutput = 16*16*256, nWeights = 36*256*256, nBias = 256, nTransInput = 16*6*6*256, nInnerProd = 16*6*6*256;
float *l_bnBias, *l_bnScale, *bnBias, *bnScale;
cudaMalloc((void **) &input, nInput<<3);
cudaMalloc((void **) &output, nOutput<<2);
cudaMalloc((void **) &l_weights, nWeights<<2);
cudaMalloc((void **) &l_bias, nBias<<2);
cudaMalloc((void **) &t_input, nTransInput<<2);
cudaMalloc((void **) &ip, nInnerProd<<2);
cudaMemset((void *) input, 0, nInput<<3);
cudaMemset((void *) output, 0, nOutput<<2);
cudaMemset((void *) t_input, 0, nTransInput<<2);
cudaMemset((void *) l_weights, 0, nWeights<<2);
cudaMemset((void *) ip, 0, nInnerProd<<2);
cudaMemcpy(input, input_, nInput<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_weights, kernel, nWeights<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_bias, bias, nBias<<2, cudaMemcpyHostToDevice);
bnBias = get_parameter(bnBias_winograd_Name256, 256);
bnScale = get_parameter(bnScale_winograd_Name256, 256);
cudaMalloc((void **) &l_bnBias, nBias<<2);
cudaMalloc((void **) &l_bnScale, nBias<<2);
cudaMemcpy(l_bnBias, bnBias, nBias<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_bnScale, bnScale, nBias<<2, cudaMemcpyHostToDevice);
float tmp[nOutput];
nT1 = getTimeMicroseconds64();
kernel_256_winograd_BtdB <<<dim3(4, 4, 2), dim3(128, 6), (6*6*128)<<2 >>> (input, t_input);
kernel_256_OuterProduct_256<<<dim3(36, 2), dim3(256, 4), (8*256 + 32*256 + 8*256)<<2 >>> (t_input, l_weights, ip);
kernel_256_winograd_AtIA <<<dim3(4, 4, 256), dim3(6, 6), ((6*6)<<2)>>> (ip, l_bnBias, l_bnScale, output);
//cudaCheckError();
cudaDeviceSynchronize();
nT2 = getTimeMicroseconds64();
printf("TotalTime = %d us\n", nT2-nT1);
s = cudaMemcpy(tmp, output, nOutput<<2, cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
//cudaCheckError();
cudaFree(t_input);
cudaFree(output);
cudaFree(l_weights);
cudaFree(l_bias);
cudaFree(ip);
free(kernel);
free(bnScale);
free(bnBias);
/////////////////////////////////
// cuDNN
/////////////////////////////////
kernel = get_parameter(weight_NCHW_Name256, 9*256*256);
bnBias = get_parameter(bnBiasName256, 256);
bnScale = get_parameter(bnScaleName256, 256);
float* eMean = get_parameter(eMeanName256, 256);
float* eVar = get_parameter(eVarName256, 256);
float *l_eMean, *l_eVar;
nInput = 16*16*256, nOutput = 14*14*256, nWeights = 3*3*256*256, nBias = 256;
cudaMalloc((void **) &output, nOutput<<2);
cudaMalloc((void **) &l_weights, nWeights<<2);
cudaMalloc((void **) &l_bias, nBias<<2);
cudaMemcpy(l_weights, kernel, nWeights<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_bias, bias, nBias<<2, cudaMemcpyHostToDevice);
cudaMalloc((void **) &l_eMean, nBias<<2);
cudaMalloc((void **) &l_eVar, nBias<<2);
cudaMemcpy(l_bnBias, bnBias, nBias<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_bnScale, bnScale, nBias<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_eMean, eMean, nBias<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_eVar, eVar, nBias<<2, cudaMemcpyHostToDevice);
cudaMemset((void *) output, 0, nOutput<<2);
float tmp_cudnn[nOutput];
cudnnStatus_t status;
float one = 1.0, zero = 0.0;
int size;
cudnnHandle_t handle;
status = cudnnCreate(&handle);
if (status != CUDNN_STATUS_SUCCESS) printf("failed1\n");
cudnnTensorDescriptor_t xdesc, ydesc, bdesc;
cudnnFilterDescriptor_t wdesc; // CUDNN_TENSOR_NHWC, CUDNN_TENSOR_NCHW
status = cudnnCreateTensorDescriptor(&xdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed2\n");
status = cudnnSetTensor4dDescriptor(xdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 16, 16);
if (status != CUDNN_STATUS_SUCCESS) printf("failed3\n");
status = cudnnCreateTensorDescriptor(&ydesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed4\n");
status = cudnnSetTensor4dDescriptor(ydesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 14, 14);
if (status != CUDNN_STATUS_SUCCESS) printf("failed5\n");
status = cudnnCreateFilterDescriptor(&wdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed6\n");
status = cudnnSetFilter4dDescriptor(wdesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3);
if (status != CUDNN_STATUS_SUCCESS) printf("failed7\n");
status = cudnnCreateTensorDescriptor(&bdesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed8\n");
status = cudnnSetTensor4dDescriptor(bdesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed9\n");
cudnnConvolutionDescriptor_t conv_desc;
status = cudnnCreateConvolutionDescriptor(&conv_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed10\n");
status = cudnnSetConvolution2dDescriptor(conv_desc, 0,0, 1,1,1,1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); //CUDNN_CONVOLUTION
if (status != CUDNN_STATUS_SUCCESS) printf("failed11\n");
cudnnActivationDescriptor_t act_desc;
status = cudnnCreateActivationDescriptor(&act_desc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed12\n");
status = cudnnSetActivationDescriptor(act_desc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0);
if (status != CUDNN_STATUS_SUCCESS) printf("failed13\n");
cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc;
status = cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc);
if (status != CUDNN_STATUS_SUCCESS) printf("failed14\n");
status = cudnnSetTensor4dDescriptor(bnScaleBiasMeanVarDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1);
if (status != CUDNN_STATUS_SUCCESS) printf("failed15\n");
cudnnConvolutionFwdAlgo_t algo = (cudnnConvolutionFwdAlgo_t)6;
status = cudnnGetConvolutionForwardWorkspaceSize(handle,
xdesc,
wdesc,
conv_desc,
ydesc,
algo,
(size_t *)&(size));
float *extra;
cudaMalloc((void **) &extra, size);
nT1_cudnn = getTimeMicroseconds64();
status = cudnnConvolutionForward(handle, &one,
xdesc, input, wdesc, l_weights,
conv_desc, algo,
extra, size, &zero,
ydesc, output);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed1\n");
status = cudnnBatchNormalizationForwardInference(handle, CUDNN_BATCHNORM_SPATIAL,
&one, &zero,
ydesc, output, ydesc, output,
bnScaleBiasMeanVarDesc, l_bnScale, l_bnBias, l_eMean, l_eVar, CUDNN_BN_MIN_EPSILON);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed2\n");
status = cudnnActivationForward(handle, act_desc, &one,
ydesc, output, &zero,
ydesc, output);
if (status != CUDNN_STATUS_SUCCESS) printf("Not Successed3\n");
cudaDeviceSynchronize();
nT2_cudnn = getTimeMicroseconds64();
printf("cuDNN TotalTime = %d us\n", nT2_cudnn-nT1_cudnn);
s = cudaMemcpy(tmp_cudnn, output, nOutput<<2, cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
cudaFree(extra);
cudaFree(input);
cudaFree(output);
cudaFree(l_weights);
cudaFree(l_bias);
cudaFree(l_bnScale);
cudaFree(l_bnBias);
cudaFree(l_eMean);
cudaFree(l_eVar);
free(bias);
free(kernel);
free(bnScale);
free(bnBias);
free(eMean);
free(eVar);
free(input_);
output_checker(tmp, tmp_cudnn, 14, 256, 1);
return ((nT2-nT1) << 16) | (nT2_cudnn-nT1_cudnn);
} |
eee4bdf4b6968ce6a3b8592e48f2613a5230d43c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "upsample_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t N = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
int c = 2;
int batch = 2;
int stride = 2;
int forward = XSIZE*YSIZE;
float scale = 2;
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
upsample_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,w,h,c,batch,stride,forward,scale,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
upsample_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,w,h,c,batch,stride,forward,scale,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
upsample_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,x,w,h,c,batch,stride,forward,scale,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | eee4bdf4b6968ce6a3b8592e48f2613a5230d43c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "upsample_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t N = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
int c = 2;
int batch = 2;
int stride = 2;
int forward = XSIZE*YSIZE;
float scale = 2;
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
upsample_kernel<<<gridBlock,threadBlock>>>(N,x,w,h,c,batch,stride,forward,scale,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
upsample_kernel<<<gridBlock,threadBlock>>>(N,x,w,h,c,batch,stride,forward,scale,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
upsample_kernel<<<gridBlock,threadBlock>>>(N,x,w,h,c,batch,stride,forward,scale,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b968ab3905cc43ffee1221205de17b7441d46933.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zgeqr2_batched.cu normal z -> c, Tue Feb 9 16:05:39 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define BLOCK_SIZE 256
#define dA(a_1,a_2) (dA + (a_1) + (a_2)*(local_lda))
#include "clarfg_devicesfunc_hip.cuh"
//==============================================================================
static __device__
void clarfx_device( int m, int n, magmaFloatComplex *v, magmaFloatComplex *tau,
magmaFloatComplex *dc, magma_int_t ldc, magmaFloatComplex* sum)
{
if (n <= 0) return;
if (MAGMA_C_EQUAL(*tau, MAGMA_C_ZERO) ) return; // check singularity
const int tx = threadIdx.x;
magmaFloatComplex lsum;
for (int k=0; k < n; k++)
{
/* perform w := v' * C */
if (tx < BLOCK_SIZE)
{
if (tx == 0)
lsum = dc[0+ldc*k]; //since V[0] should be one
else
lsum = MAGMA_C_ZERO;
for (int j = tx+1; j < m; j += BLOCK_SIZE) {
lsum += MAGMA_C_MUL( MAGMA_C_CONJ( v[j] ), dc[j+ldc*k] );
}
sum[tx] = lsum;
}
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
__syncthreads();
magmaFloatComplex z__1 = - MAGMA_C_CONJ(*tau) * sum[0];
/* C := C - v * w */
if (tx < BLOCK_SIZE)
{
for (int j = tx+1; j < m; j += BLOCK_SIZE)
dc[j+ldc*k] += z__1 * v[j];
}
if (tx == 0) dc[0+ldc*k] += z__1;
__syncthreads();
}
}
//==============================================================================
static __device__
void cgeqr2_device( magma_int_t m, magma_int_t n,
magmaFloatComplex* dA, magma_int_t lda,
magmaFloatComplex *dtau,
magmaFloatComplex *dv,
magmaFloatComplex *sum,
float *swork,
magmaFloatComplex *scale,
float *sscale)
{
//lapack clarfg, compute the norm, scale and generate the householder vector
clarfg_device(m, dv, &(dv[1]), 1, dtau, swork, sscale, scale);
__syncthreads();
//update the trailing matix with the householder
clarfx_device(m, n, dv, dtau, dA, lda, sum);
__syncthreads();
}
//==============================================================================
extern __shared__ magmaFloatComplex shared_data[];
__global__
void cgeqr2_sm_kernel_batched( int m, int n, magmaFloatComplex** dA_array, magma_int_t lda,
magmaFloatComplex **dtau_array)
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magmaFloatComplex* dtau = dtau_array[blockIdx.z];
magmaFloatComplex *sdata = (magmaFloatComplex*)shared_data;
const int tx = threadIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
__shared__ float swork[ BLOCK_SIZE ];
__shared__ float sscale;
//load data from global to shared memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
sdata[j + s * m] = dA[j + s * lda];
}
}
__syncthreads();
for (int s=0; s < min(m,n); s++)
{
cgeqr2_device( m-s, n-(s+1),
&(sdata[s+(s+1)*m]), m,
dtau+s,
&(sdata[s+s*m]),
sum,
swork,
&scale,
&sscale);
} // end of s
//copy back to global memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
dA[j + s * lda] = sdata[j + s * m];
}
}
}
//==============================================================================
__global__
void cgeqr2_column_sm_kernel_batched( int m, int n, magmaFloatComplex** dA_array, magma_int_t lda,
magmaFloatComplex **dtau_array)
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magmaFloatComplex* dtau = dtau_array[blockIdx.z];
magmaFloatComplex *sdata = (magmaFloatComplex*)shared_data;
__shared__ magmaFloatComplex scale;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
__shared__ float swork[ BLOCK_SIZE ];
__shared__ float sscale;
const int tx = threadIdx.x;
for (int s=0; s < min(m,n); s++)
{
//load one vector in shared memory: sdata
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
sdata[j] = dA[s + j + s * lda];
}
__syncthreads();
//sdata is written
cgeqr2_device(m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
sdata,
sum,
swork,
&scale,
&sscale);
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
dA[s + j + s * lda] = sdata[j];
}
__syncthreads();
}
}
__global__
void cgeqr2_kernel_batched( int m, int n, magmaFloatComplex** dA_array, magma_int_t lda,
magmaFloatComplex **dtau_array)
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magmaFloatComplex* dtau = dtau_array[blockIdx.z];
__shared__ magmaFloatComplex scale;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
__shared__ float swork[ BLOCK_SIZE ];
__shared__ float sscale;
for (int s=0; s < min(m,n); s++)
{
cgeqr2_device( m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
&(dA[s+s*lda]),
sum,
swork,
&scale,
&sscale );
}
}
//==============================================================================
/**
Purpose
-------
CGEQR2 computes a QR factorization of a complex m by n matrix A:
A = Q * R.
This version implements the right-looking QR with non-blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array on the GPU, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, the elements on and above the diagonal of the array
contain the min(M,N)-by-N upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of min(m,n) elementary reflectors (see Further
Details).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
To benefit from coalescent memory accesses LDDA must be
divisible by 16.
@param[out]
dtau_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a complex scalar, and v is a complex vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_cgeqrf_aux
********************************************************************/
extern "C" magma_int_t
magma_cgeqr2_batched(magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dtau_array,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t k;
/* Check arguments */
magma_int_t arginfo = 0;
if (m < 0)
arginfo = -1;
else if (n < 0)
arginfo = -2;
else if (ldda < max(1,m))
arginfo = -4;
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
k = min(m,n);
dim3 blocks(1, 1, batchCount);
dim3 threads(BLOCK_SIZE);
if (sizeof(magmaFloatComplex)*(m*k) <= 42000 /*sizeof(magmaFloatComplex) * 128 * k*/) // there are some static shared memory besides of dynamic ones
{
//load panel in shared memory and factorize it and copy back to gloabl memory
//intend for small panel to avoid overfill of shared memory.
//this kernel is composed of device routine and thus clean
hipLaunchKernelGGL(( cgeqr2_sm_kernel_batched), dim3(blocks), dim3(threads), sizeof(magmaFloatComplex)*(m*k), queue->cuda_stream() ,
m, k, dA_array, ldda, dtau_array);
}
else
{
//load one column vector in shared memory and householder it and used it to update trailing matrix which is global memory
// one vector is normally smaller than 48K shared memory
if (sizeof(magmaFloatComplex)*(m) < 42000)
hipLaunchKernelGGL(( cgeqr2_column_sm_kernel_batched), dim3(blocks), dim3(threads), sizeof(magmaFloatComplex)*(m), queue->cuda_stream() ,
m, k, dA_array, ldda, dtau_array);
else
//not use dynamic shared memory at all
hipLaunchKernelGGL(( cgeqr2_kernel_batched), dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, k, dA_array, ldda, dtau_array);
}
return arginfo;
}
//==============================================================================
| b968ab3905cc43ffee1221205de17b7441d46933.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zgeqr2_batched.cu normal z -> c, Tue Feb 9 16:05:39 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define BLOCK_SIZE 256
#define dA(a_1,a_2) (dA + (a_1) + (a_2)*(local_lda))
#include "clarfg_devicesfunc.cuh"
//==============================================================================
static __device__
void clarfx_device( int m, int n, magmaFloatComplex *v, magmaFloatComplex *tau,
magmaFloatComplex *dc, magma_int_t ldc, magmaFloatComplex* sum)
{
if (n <= 0) return;
if (MAGMA_C_EQUAL(*tau, MAGMA_C_ZERO) ) return; // check singularity
const int tx = threadIdx.x;
magmaFloatComplex lsum;
for (int k=0; k < n; k++)
{
/* perform w := v' * C */
if (tx < BLOCK_SIZE)
{
if (tx == 0)
lsum = dc[0+ldc*k]; //since V[0] should be one
else
lsum = MAGMA_C_ZERO;
for (int j = tx+1; j < m; j += BLOCK_SIZE) {
lsum += MAGMA_C_MUL( MAGMA_C_CONJ( v[j] ), dc[j+ldc*k] );
}
sum[tx] = lsum;
}
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
__syncthreads();
magmaFloatComplex z__1 = - MAGMA_C_CONJ(*tau) * sum[0];
/* C := C - v * w */
if (tx < BLOCK_SIZE)
{
for (int j = tx+1; j < m; j += BLOCK_SIZE)
dc[j+ldc*k] += z__1 * v[j];
}
if (tx == 0) dc[0+ldc*k] += z__1;
__syncthreads();
}
}
//==============================================================================
static __device__
void cgeqr2_device( magma_int_t m, magma_int_t n,
magmaFloatComplex* dA, magma_int_t lda,
magmaFloatComplex *dtau,
magmaFloatComplex *dv,
magmaFloatComplex *sum,
float *swork,
magmaFloatComplex *scale,
float *sscale)
{
//lapack clarfg, compute the norm, scale and generate the householder vector
clarfg_device(m, dv, &(dv[1]), 1, dtau, swork, sscale, scale);
__syncthreads();
//update the trailing matix with the householder
clarfx_device(m, n, dv, dtau, dA, lda, sum);
__syncthreads();
}
//==============================================================================
extern __shared__ magmaFloatComplex shared_data[];
__global__
void cgeqr2_sm_kernel_batched( int m, int n, magmaFloatComplex** dA_array, magma_int_t lda,
magmaFloatComplex **dtau_array)
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magmaFloatComplex* dtau = dtau_array[blockIdx.z];
magmaFloatComplex *sdata = (magmaFloatComplex*)shared_data;
const int tx = threadIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
__shared__ float swork[ BLOCK_SIZE ];
__shared__ float sscale;
//load data from global to shared memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
sdata[j + s * m] = dA[j + s * lda];
}
}
__syncthreads();
for (int s=0; s < min(m,n); s++)
{
cgeqr2_device( m-s, n-(s+1),
&(sdata[s+(s+1)*m]), m,
dtau+s,
&(sdata[s+s*m]),
sum,
swork,
&scale,
&sscale);
} // end of s
//copy back to global memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
dA[j + s * lda] = sdata[j + s * m];
}
}
}
//==============================================================================
__global__
void cgeqr2_column_sm_kernel_batched( int m, int n, magmaFloatComplex** dA_array, magma_int_t lda,
magmaFloatComplex **dtau_array)
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magmaFloatComplex* dtau = dtau_array[blockIdx.z];
magmaFloatComplex *sdata = (magmaFloatComplex*)shared_data;
__shared__ magmaFloatComplex scale;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
__shared__ float swork[ BLOCK_SIZE ];
__shared__ float sscale;
const int tx = threadIdx.x;
for (int s=0; s < min(m,n); s++)
{
//load one vector in shared memory: sdata
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
sdata[j] = dA[s + j + s * lda];
}
__syncthreads();
//sdata is written
cgeqr2_device(m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
sdata,
sum,
swork,
&scale,
&sscale);
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
dA[s + j + s * lda] = sdata[j];
}
__syncthreads();
}
}
__global__
void cgeqr2_kernel_batched( int m, int n, magmaFloatComplex** dA_array, magma_int_t lda,
magmaFloatComplex **dtau_array)
{
magmaFloatComplex* dA = dA_array[blockIdx.z];
magmaFloatComplex* dtau = dtau_array[blockIdx.z];
__shared__ magmaFloatComplex scale;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
__shared__ float swork[ BLOCK_SIZE ];
__shared__ float sscale;
for (int s=0; s < min(m,n); s++)
{
cgeqr2_device( m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
&(dA[s+s*lda]),
sum,
swork,
&scale,
&sscale );
}
}
//==============================================================================
/**
Purpose
-------
CGEQR2 computes a QR factorization of a complex m by n matrix A:
A = Q * R.
This version implements the right-looking QR with non-blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array on the GPU, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, the elements on and above the diagonal of the array
contain the min(M,N)-by-N upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of min(m,n) elementary reflectors (see Further
Details).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
To benefit from coalescent memory accesses LDDA must be
divisible by 16.
@param[out]
dtau_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a complex scalar, and v is a complex vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_cgeqrf_aux
********************************************************************/
extern "C" magma_int_t
magma_cgeqr2_batched(magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dtau_array,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t k;
/* Check arguments */
magma_int_t arginfo = 0;
if (m < 0)
arginfo = -1;
else if (n < 0)
arginfo = -2;
else if (ldda < max(1,m))
arginfo = -4;
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
k = min(m,n);
dim3 blocks(1, 1, batchCount);
dim3 threads(BLOCK_SIZE);
if (sizeof(magmaFloatComplex)*(m*k) <= 42000 /*sizeof(magmaFloatComplex) * 128 * k*/) // there are some static shared memory besides of dynamic ones
{
//load panel in shared memory and factorize it and copy back to gloabl memory
//intend for small panel to avoid overfill of shared memory.
//this kernel is composed of device routine and thus clean
cgeqr2_sm_kernel_batched<<< blocks, threads, sizeof(magmaFloatComplex)*(m*k), queue->cuda_stream() >>>
(m, k, dA_array, ldda, dtau_array);
}
else
{
//load one column vector in shared memory and householder it and used it to update trailing matrix which is global memory
// one vector is normally smaller than 48K shared memory
if (sizeof(magmaFloatComplex)*(m) < 42000)
cgeqr2_column_sm_kernel_batched<<< blocks, threads, sizeof(magmaFloatComplex)*(m), queue->cuda_stream() >>>
(m, k, dA_array, ldda, dtau_array);
else
//not use dynamic shared memory at all
cgeqr2_kernel_batched<<< blocks, threads, 0, queue->cuda_stream() >>>
(m, k, dA_array, ldda, dtau_array);
}
return arginfo;
}
//==============================================================================
|
e0de3c209d3b908ecf850c11b1b186f7d7b7a68a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************/
/** \file
\brief alloc voxel blocks within truncation distance of depth pixel
\details
\author Yizhong Zhang
\date 12/7/2013
*/
/***********************************************************/
#include "voxel_hashing_device.h"
#include "voxel_hashing_internal.h"
#include "voxel_block_hash_table.cuh"
#include <helper_math.h>
#include "device_utils.h"
#define GPRINT(a) {cudaSafeCall(hipDeviceSynchronize(), #a);printf("%s\n",#a);}
// ==================================================================
__global__ void initHashBucketAtomicLock(PtrSz<unsigned int> hash_bucket_atomic_lock){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx < hash_bucket_atomic_lock.size )
hash_bucket_atomic_lock[idx] = 0;
}
// ==================================================================
struct VoxelBlockAllocator : public VoxelBlockHashTable{
enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 };
PtrStepSz<float> depth; // the input depth map
dfusion::Intr intr; // intrinsic parameters of camera
int cols, rows; // resolution of depth image
dfusion::Mat33 Rc2w; // camera to world
float3 tc2w;
float trunc_dist; // truncation distance
__device__ __forceinline__ float3 get_point_in_camera_coord(int x, int y, float d) const {
return intr.uvd2xyz(x, y, d);
}
__device__ __forceinline__ void operator () () const {
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= cols || y >= rows)
return;
float d = depth.ptr (y)[x];
if( d < 0.3f ) // input depth < 0.3m, illegal depth
return;
// the following code traverses each voxel block that the line ray intersect near depth
// algorithm comes from the paper: A fast voxel traversal algorithm for ray tracing
float3 ray_origin = tc2w;
float3 ray_end = Rc2w * get_point_in_camera_coord (x, y, d) + tc2w;
float3 ray_direction = normalize (ray_end - ray_origin);
ray_origin = ray_end - ray_direction * trunc_dist;
ray_end = ray_end + ray_direction * trunc_dist;
ray_direction = ray_end - ray_origin;
// the following code traverses the voxels between ray_origin and ray_end
int X, Y, Z;
coord2block(X, Y, Z, ray_origin.x, ray_origin.y, ray_origin.z, block_size);
int stepX, stepY, stepZ;
stepX = sgn(ray_direction.x);
stepY = sgn(ray_direction.y);
stepZ = sgn(ray_direction.z);
float tMaxX, tMaxY, tMaxZ;
tMaxX = ( block_size*(X + (stepX+1)/2) - ray_origin.x ) / ray_direction.x;
tMaxY = ( block_size*(Y + (stepY+1)/2) - ray_origin.y ) / ray_direction.y;
tMaxZ = ( block_size*(Z + (stepZ+1)/2) - ray_origin.z ) / ray_direction.z;
if( isnan(tMaxX) || isinf(tMaxX) ) tMaxX = 1e6f;
if( isnan(tMaxY) || isinf(tMaxY) ) tMaxY = 1e6f;
if( isnan(tMaxZ) || isinf(tMaxZ) ) tMaxZ = 1e6f;
float tDeltaX, tDeltaY, tDeltaZ;
tDeltaX = stepX * block_size / ray_direction.x;
tDeltaY = stepY * block_size / ray_direction.y;
tDeltaZ = stepZ * block_size / ray_direction.z;
if( isnan(tDeltaX) || isinf(tDeltaX) ) tDeltaX = 1e6f;
if( isnan(tDeltaY) || isinf(tDeltaY) ) tDeltaY = 1e6f;
if( isnan(tDeltaZ) || isinf(tDeltaZ) ) tDeltaZ = 1e6f;
int count = 0;
while(count<50){// infinite loop guard
count ++;
// -------- inside this code segment, X Y Z is the cell we want to process --------
// we insert an entry into the hash table, but it is not guaranteed to insert successfully
// but it doesn't matter even if insert failed this time.
int entry_id;
InsertHashEntryStaggered(entry_id, X, Y, Z);
// ------------------------------------------------------------------------------------
if( tMaxX > 1.0f && tMaxY > 1.0f && tMaxZ > 1.0f )
break;
if( tMaxX < tMaxY && tMaxX < tMaxZ ){
X += stepX;
tMaxX += tDeltaX;
}
else if(tMaxY < tMaxZ){
Y += stepY;
tMaxY += tDeltaY;
}
else{
Z += stepZ;
tMaxZ += tDeltaZ;
}
}
}
};
__global__ void voxelBlockAllocKernel (const VoxelBlockAllocator allocator) {
allocator ();
}
// ==================================================================
__global__ void resetVoxelBlock(
PtrSz<VoxelBlock> voxel_block,
PtrSz<int> available_voxel_block,
int start_id )
{
int voxel_id = threadIdx.x;
int block_id = available_voxel_block[start_id + blockIdx.x];
voxel_block[block_id].voxel[voxel_id].sdf = 0.0f;
voxel_block[block_id].voxel[voxel_id].colorRGB[0] = 0;
voxel_block[block_id].voxel[voxel_id].colorRGB[1] = 0;
voxel_block[block_id].voxel[voxel_id].colorRGB[2] = 0;
voxel_block[block_id].voxel[voxel_id].weight = 0;
}
// ==================================================================
void allocVoxelBlock(
const PtrStepSz<float>& depth,
const dfusion::Intr& intr,
const dfusion::Mat33& Rc2w,
const float3& tc2w,
float block_size,
float trunc_dist,
DeviceArray<HashEntry>& hash_entry,
int bucket_size,
DeviceArray<unsigned int>& hash_bucket_atomic_lock,
DeviceArray<VoxelBlock>& voxel_block,
DeviceArray<int>& available_voxel_block,
DeviceArray<int>& hash_parameters,
int& voxel_block_number,
int3 chunk_dim,
float3 chunk_min_xyz,
float chunk_size,
DeviceArray<unsigned char>& chunk_on_CPU )
{
// setup block bucket atomic lock
int threadPerBlock = 256;
int blocksPerGrid = divUp(hash_bucket_atomic_lock.size(), threadPerBlock);
hipLaunchKernelGGL(( initHashBucketAtomicLock), dim3(blocksPerGrid), dim3(threadPerBlock), 0, 0, hash_bucket_atomic_lock);
cudaSafeCall(hipGetLastError(), "allocVoxelBlock::initHashBucketAtomicLock");
GPRINT(1);
// initial voxel block number
std::vector<int> param;
param.resize( hash_parameters.size() );
hash_parameters.download(param.data());
voxel_block_number = param[0];
GPRINT(2);
// alloc block
VoxelBlockAllocator allocator;
allocator.hash_table_size = hash_entry.size() / bucket_size;
allocator.bucket_size = bucket_size;
allocator.hash_entry = hash_entry;
allocator.hash_bucket_atomic_lock = hash_bucket_atomic_lock;
allocator.available_voxel_block = available_voxel_block;
allocator.hash_parameters = hash_parameters;
allocator.depth = depth;
allocator.intr = intr;
allocator.cols = depth.cols;
allocator.rows = depth.rows;
allocator.Rc2w = Rc2w;
allocator.tc2w = tc2w;
allocator.block_size = block_size;
allocator.trunc_dist = trunc_dist;
allocator.chunk_dim = chunk_dim;
allocator.chunk_min_xyz = chunk_min_xyz;
allocator.chunk_size = chunk_size;
allocator.chunk_on_CPU = chunk_on_CPU;
dim3 block (32, 8);
dim3 grid (divUp (depth.cols, block.x), divUp (depth.rows, block.y));
voxelBlockAllocKernel << <grid, block >> >(allocator);
cudaSafeCall(hipGetLastError(), "allocVoxelBlock::voxelBlockAllocKernel");
GPRINT(3);
// clear the data of new allocated voxel blocks
// in the previous function, only allocation is performed for hash table
// so the new allocated voxel blocks are from voxel_block_number to new voxel_block_number
param.resize( hash_parameters.size() );
hash_parameters.download(param.data());
GPRINT(4);
int start_id = voxel_block_number;
int end_id = param[0];
if( end_id > start_id ){
int threadPerBlock = BLOCK_DIM*BLOCK_DIM*BLOCK_DIM;
int blocksPerGrid = end_id - start_id;
hipLaunchKernelGGL(( resetVoxelBlock), dim3(blocksPerGrid), dim3(threadPerBlock), 0, 0,
voxel_block, available_voxel_block, start_id);
cudaSafeCall(hipGetLastError(), "allocVoxelBlock::resetVoxelBlock");
voxel_block_number = end_id;
}
}
| e0de3c209d3b908ecf850c11b1b186f7d7b7a68a.cu | /***********************************************************/
/** \file
\brief alloc voxel blocks within truncation distance of depth pixel
\details
\author Yizhong Zhang
\date 12/7/2013
*/
/***********************************************************/
#include "voxel_hashing_device.h"
#include "voxel_hashing_internal.h"
#include "voxel_block_hash_table.cuh"
#include <helper_math.h>
#include "device_utils.h"
#define GPRINT(a) {cudaSafeCall(cudaThreadSynchronize(), #a);printf("%s\n",#a);}
// ==================================================================
__global__ void initHashBucketAtomicLock(PtrSz<unsigned int> hash_bucket_atomic_lock){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx < hash_bucket_atomic_lock.size )
hash_bucket_atomic_lock[idx] = 0;
}
// ==================================================================
struct VoxelBlockAllocator : public VoxelBlockHashTable{
enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 };
PtrStepSz<float> depth; // the input depth map
dfusion::Intr intr; // intrinsic parameters of camera
int cols, rows; // resolution of depth image
dfusion::Mat33 Rc2w; // camera to world
float3 tc2w;
float trunc_dist; // truncation distance
__device__ __forceinline__ float3 get_point_in_camera_coord(int x, int y, float d) const {
return intr.uvd2xyz(x, y, d);
}
__device__ __forceinline__ void operator () () const {
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= cols || y >= rows)
return;
float d = depth.ptr (y)[x];
if( d < 0.3f ) // input depth < 0.3m, illegal depth
return;
// the following code traverses each voxel block that the line ray intersect near depth
// algorithm comes from the paper: A fast voxel traversal algorithm for ray tracing
float3 ray_origin = tc2w;
float3 ray_end = Rc2w * get_point_in_camera_coord (x, y, d) + tc2w;
float3 ray_direction = normalize (ray_end - ray_origin);
ray_origin = ray_end - ray_direction * trunc_dist;
ray_end = ray_end + ray_direction * trunc_dist;
ray_direction = ray_end - ray_origin;
// the following code traverses the voxels between ray_origin and ray_end
int X, Y, Z;
coord2block(X, Y, Z, ray_origin.x, ray_origin.y, ray_origin.z, block_size);
int stepX, stepY, stepZ;
stepX = sgn(ray_direction.x);
stepY = sgn(ray_direction.y);
stepZ = sgn(ray_direction.z);
float tMaxX, tMaxY, tMaxZ;
tMaxX = ( block_size*(X + (stepX+1)/2) - ray_origin.x ) / ray_direction.x;
tMaxY = ( block_size*(Y + (stepY+1)/2) - ray_origin.y ) / ray_direction.y;
tMaxZ = ( block_size*(Z + (stepZ+1)/2) - ray_origin.z ) / ray_direction.z;
if( isnan(tMaxX) || isinf(tMaxX) ) tMaxX = 1e6f;
if( isnan(tMaxY) || isinf(tMaxY) ) tMaxY = 1e6f;
if( isnan(tMaxZ) || isinf(tMaxZ) ) tMaxZ = 1e6f;
float tDeltaX, tDeltaY, tDeltaZ;
tDeltaX = stepX * block_size / ray_direction.x;
tDeltaY = stepY * block_size / ray_direction.y;
tDeltaZ = stepZ * block_size / ray_direction.z;
if( isnan(tDeltaX) || isinf(tDeltaX) ) tDeltaX = 1e6f;
if( isnan(tDeltaY) || isinf(tDeltaY) ) tDeltaY = 1e6f;
if( isnan(tDeltaZ) || isinf(tDeltaZ) ) tDeltaZ = 1e6f;
int count = 0;
while(count<50){// infinite loop guard
count ++;
// -------- inside this code segment, X Y Z is the cell we want to process --------
// we insert an entry into the hash table, but it is not guaranteed to insert successfully
// but it doesn't matter even if insert failed this time.
int entry_id;
InsertHashEntryStaggered(entry_id, X, Y, Z);
// ------------------------------------------------------------------------------------
if( tMaxX > 1.0f && tMaxY > 1.0f && tMaxZ > 1.0f )
break;
if( tMaxX < tMaxY && tMaxX < tMaxZ ){
X += stepX;
tMaxX += tDeltaX;
}
else if(tMaxY < tMaxZ){
Y += stepY;
tMaxY += tDeltaY;
}
else{
Z += stepZ;
tMaxZ += tDeltaZ;
}
}
}
};
__global__ void voxelBlockAllocKernel (const VoxelBlockAllocator allocator) {
allocator ();
}
// ==================================================================
__global__ void resetVoxelBlock(
PtrSz<VoxelBlock> voxel_block,
PtrSz<int> available_voxel_block,
int start_id )
{
int voxel_id = threadIdx.x;
int block_id = available_voxel_block[start_id + blockIdx.x];
voxel_block[block_id].voxel[voxel_id].sdf = 0.0f;
voxel_block[block_id].voxel[voxel_id].colorRGB[0] = 0;
voxel_block[block_id].voxel[voxel_id].colorRGB[1] = 0;
voxel_block[block_id].voxel[voxel_id].colorRGB[2] = 0;
voxel_block[block_id].voxel[voxel_id].weight = 0;
}
// ==================================================================
void allocVoxelBlock(
const PtrStepSz<float>& depth,
const dfusion::Intr& intr,
const dfusion::Mat33& Rc2w,
const float3& tc2w,
float block_size,
float trunc_dist,
DeviceArray<HashEntry>& hash_entry,
int bucket_size,
DeviceArray<unsigned int>& hash_bucket_atomic_lock,
DeviceArray<VoxelBlock>& voxel_block,
DeviceArray<int>& available_voxel_block,
DeviceArray<int>& hash_parameters,
int& voxel_block_number,
int3 chunk_dim,
float3 chunk_min_xyz,
float chunk_size,
DeviceArray<unsigned char>& chunk_on_CPU )
{
// setup block bucket atomic lock
int threadPerBlock = 256;
int blocksPerGrid = divUp(hash_bucket_atomic_lock.size(), threadPerBlock);
initHashBucketAtomicLock<<<blocksPerGrid, threadPerBlock>>>(hash_bucket_atomic_lock);
cudaSafeCall(cudaGetLastError(), "allocVoxelBlock::initHashBucketAtomicLock");
GPRINT(1);
// initial voxel block number
std::vector<int> param;
param.resize( hash_parameters.size() );
hash_parameters.download(param.data());
voxel_block_number = param[0];
GPRINT(2);
// alloc block
VoxelBlockAllocator allocator;
allocator.hash_table_size = hash_entry.size() / bucket_size;
allocator.bucket_size = bucket_size;
allocator.hash_entry = hash_entry;
allocator.hash_bucket_atomic_lock = hash_bucket_atomic_lock;
allocator.available_voxel_block = available_voxel_block;
allocator.hash_parameters = hash_parameters;
allocator.depth = depth;
allocator.intr = intr;
allocator.cols = depth.cols;
allocator.rows = depth.rows;
allocator.Rc2w = Rc2w;
allocator.tc2w = tc2w;
allocator.block_size = block_size;
allocator.trunc_dist = trunc_dist;
allocator.chunk_dim = chunk_dim;
allocator.chunk_min_xyz = chunk_min_xyz;
allocator.chunk_size = chunk_size;
allocator.chunk_on_CPU = chunk_on_CPU;
dim3 block (32, 8);
dim3 grid (divUp (depth.cols, block.x), divUp (depth.rows, block.y));
voxelBlockAllocKernel << <grid, block >> >(allocator);
cudaSafeCall(cudaGetLastError(), "allocVoxelBlock::voxelBlockAllocKernel");
GPRINT(3);
// clear the data of new allocated voxel blocks
// in the previous function, only allocation is performed for hash table
// so the new allocated voxel blocks are from voxel_block_number to new voxel_block_number
param.resize( hash_parameters.size() );
hash_parameters.download(param.data());
GPRINT(4);
int start_id = voxel_block_number;
int end_id = param[0];
if( end_id > start_id ){
int threadPerBlock = BLOCK_DIM*BLOCK_DIM*BLOCK_DIM;
int blocksPerGrid = end_id - start_id;
resetVoxelBlock<<<blocksPerGrid, threadPerBlock>>>(
voxel_block, available_voxel_block, start_id);
cudaSafeCall(cudaGetLastError(), "allocVoxelBlock::resetVoxelBlock");
voxel_block_number = end_id;
}
}
|
9a597f3e88f61120ba20269d0b647bdfbc037d28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define EPSILON (1e-6)
namespace caffe {
template <typename Dtype>
__global__ void kernel_norm(int m, int k, const Dtype* D, Dtype* diagDtD) {
CUDA_KERNEL_LOOP(index, k) {
Dtype res = (Dtype)0.;
for (int j =0; j < m; ++j)
res += D[index+j*k] * D[index+j*k];
diagDtD[index] = res;
}
}
template <typename Dtype>
__global__ void kernel_vector_to_column(int m, int k, int j, const Dtype* v,
Dtype* A) {
CUDA_KERNEL_LOOP(index, m) {
A[j+index*k] = v[index];
}
}
template <typename Dtype>
__global__ void kernel_column_to_vector(int m, int k, int j, const Dtype* A,
Dtype* v) {
CUDA_KERNEL_LOOP(index, m) {
v[index] = A[j+index*k];
}
}
// Versions of caffe_gpu_scal that take scale factor as reference, so we can
// pass a pointer to the GPU.
// Remember, before calling this function, we need to call
// hipblasSetPointerMode(HIPBLAS_POINTER_MODE_DEVICE);
// and, afterwards, we need to call
// hipblasSetPointerMode(HIPBLAS_POINTER_MODE_HOST);
template <typename Dtype>
void caffe_gpu_scal(const int N, const Dtype* alpha, Dtype *X);
template <>
void caffe_gpu_scal<float>(const int N, const float* alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double* alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, alpha, X, 1));
}
// Normalize if norm > 1
template <typename Dtype>
__global__ void kernel_conditional_normalize(const int N, const Dtype* norm, Dtype* v) {
CUDA_KERNEL_LOOP(index, N) {
if (*norm > (Dtype)1.)
v[index] /= sqrt(*norm);
}
}
// Swap 2 columns in a matrix
template <typename Dtype>
__global__ void kernel_swap_columns(int m, int k, int j0, int j1, Dtype* A) {
CUDA_KERNEL_LOOP(index, m) {
Dtype tmp = A[j0+index*k];
A[j0+index*k] = A[j1+index*k];
A[j1+index*k] = tmp;
}
}
// Swap 2 vectors
template <typename Dtype>
__global__ void kernel_swap_vectors(const int N, Dtype* v0, Dtype* v1) {
CUDA_KERNEL_LOOP(index, N) {
Dtype tmp = v0[index];
v0[index] = v1[index];
v1[index] = tmp;
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::normalize_dictionary_gpu(int m, int k, Dtype* D) {
// Normalize in a temporary matrix Z (D transposed)
Dtype* Z = Z_buffer_.mutable_gpu_data();
transpose_gpu(m, k, D, Z);
// Normalize columns whose norm is greater than 1
Dtype* norm = tmp_buffer_.mutable_gpu_data();
// Tell cuBLAS that vector "norm" is in device memory
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE);
for (int i = 0; i < k; ++i) {
Dtype* vi = Z + i*m;
caffe_gpu_dot(m, vi, vi, norm);
hipLaunchKernelGGL(( kernel_conditional_normalize<Dtype>), dim3(CAFFE_GET_BLOCKS(m)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, m, norm, vi);
}
// Switch pointer mode back to host
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_HOST);
transpose_gpu(k, m, Z, D);
}
// Same as forward_preprocess_cpu, but assuming that matrix Vt has been computed
// in previous iteration
template <typename Dtype>
void DictionaryLayer<Dtype>::fast_preprocess_gpu() {
Dtype* Dflag = this->blobs_[bias_idx_ + 1]->mutable_cpu_data();
int m = kernel_dim_;
int k = num_output_;
int r = rank_;
// Normalize dictionary (make sure that the norm for each column is <= 1)
// Orthonormalize dictionary (make sure that D^T*D=diag(D^T*D))
if (!is_dict_normalized_ || (*Dflag)) {
Dtype* D = this->blobs_[0]->mutable_gpu_data();
if (orthogonalize_)
NOT_IMPLEMENTED; //orthogonalize_dictionary_gpu(m, k, D, &dict_order_[0]);
else
normalize_dictionary_gpu(m, k, D);
// Precompute SVD and pseudoinverse of D
// We assume that matrix D has not changed much since previous iteration,
// so we use previous vectors in Vt and refine
// Note: we use Ddagger as temporary storage
Dtype* work = Ddagger_buffer_.mutable_gpu_data(); // mxk
Dtype* tmp = tmp_buffer_.mutable_gpu_data();
caffe_gpu_memcpy(m*k*sizeof(Dtype), D, work);
// Low-rank approximation on D
Dtype* W = SV_buffer_.mutable_gpu_data(); // rx1
Dtype* U = U_buffer_.mutable_gpu_data(); // mxr
Dtype* Vt = this->blobs_[bias_idx_ + 2]->mutable_gpu_data(); // r*k
Dtype* u = tmp;
Dtype* prev_u = tmp + m;
vector<Dtype> hostW(r);
for (int ri = 0; ri < r; ++ri) {
// Copy column ri of U into u
hipLaunchKernelGGL(( kernel_column_to_vector<Dtype>), dim3(CAFFE_GET_BLOCKS(m)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, m, r, ri, U, u);
Dtype* v = Vt + ri*k;
const int max_iter = 10;
for (int iter = 0; iter < max_iter; ++iter) {
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, 1, k, (Dtype)1., work, v,
(Dtype)0., u);
// Tell cuBLAS that vector W is in device memory
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE);
caffe_gpu_dot<Dtype>(m, u, u, &W[ri]);
// Do W[ri] = 1/sqrt(W[ri]) on device memory
caffe_gpu_powx(1, &W[ri], -(Dtype)0.5, &W[ri]);
caffe_gpu_scal(m, &W[ri], u);
// Copy u back into corresponding column in U
hipLaunchKernelGGL(( kernel_vector_to_column<Dtype>), dim3(CAFFE_GET_BLOCKS(m)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, m, r, ri, u, U);
// The constants provided to gemm are in host memory
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_HOST);
caffe_gpu_gemm(CblasTrans, CblasNoTrans, k, 1, m, (Dtype)1., work, u,
(Dtype)0., v);
// Tell cuBLAS that vector W is in device memory
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE);
caffe_gpu_dot<Dtype>(k, v, v, &W[ri]);
// Do W[ri] = 1/sqrt(W[ri]) on device memory
caffe_gpu_powx(1, &W[ri], -(Dtype)0.5, &W[ri]);
caffe_gpu_scal(k, &W[ri], v);
// Set W[ri] to s
caffe_gpu_powx(1, &W[ri], -(Dtype)1., &W[ri]);
// Switch pointer mode back to host
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_HOST);
// Check for convergence
caffe_gpu_sub(m, u, prev_u, prev_u);
Dtype delta = (Dtype)0.;
caffe_gpu_dot<Dtype>(m, prev_u, prev_u, &delta);
delta /= m;
if (delta < 2 * EPSILON || iter == max_iter-1) {
//LOG(INFO) << "Converged after " << iter << " iterations, " <<
// " delta = " << delta;
break;
}
caffe_gpu_memcpy(m*sizeof(Dtype), u, prev_u);
}
caffe_gpu_memcpy(sizeof(Dtype), &W[ri], &hostW[ri]);
// Check that singular vectors are in non-increasing order
int ri1 = ri;
for (int i = 0; i < ri; ++i) {
if (hostW[i] < hostW[ri]) {
ri1 = i;
break;
}
}
if (ri1 != ri) {
//LOG(INFO) << "Swapping vectors W[" << ri << "] = " << W[ri] <<
// " and W[" << ri1 << "] = " << W[ri1];
std::swap(hostW[ri], hostW[ri1]);
// Swap u[ri] and u[ri1]
hipLaunchKernelGGL(( kernel_swap_columns<Dtype>), dim3(CAFFE_GET_BLOCKS(m)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, m, r, ri, ri1, U);
hipLaunchKernelGGL(( kernel_swap_vectors<Dtype>), dim3(CAFFE_GET_BLOCKS(k)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, k, Vt + ri*k, Vt + ri1*k);
// Inflate
caffe_gpu_memcpy(m*k*sizeof(Dtype), D, work);
// Re-deflate
for (int i = 0; i < ri1; ++i) {
// Copy column ri of U into u
hipLaunchKernelGGL(( kernel_column_to_vector<Dtype>), dim3(CAFFE_GET_BLOCKS(m)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, m, r, i, U, u);
Dtype* v = Vt + i*k;
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, 1, -(Dtype)hostW[i], u, v,
(Dtype)1., work);
}
// Recompute new singular vector
ri = ri1-1;
}
else {
// Deflate
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, 1, -(Dtype)hostW[ri],
u, v, (Dtype)1., work);
}
}
// Reconstruct D from rank r approximation
caffe_gpu_memcpy(r*k*sizeof(Dtype), Vt, tmp);
for (int i = 0; i < r; ++i) {
//hostW[i] = ::max(hostW[i], (Dtype)EPSILON);
caffe_gpu_scal(k, hostW[i], tmp + i*k);
}
Dtype* Dlr = Dlow_rank_.mutable_gpu_data();
caffe_gpu_memcpy(m*k*sizeof(Dtype), D, work);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, r, (Dtype)1., U, tmp,
(Dtype)0., D);
caffe_gpu_sub(m*k, D, work, Dlr); // Now Dlr contains the gradient
// Precompute pseudoinverse of D
Dtype* Ddagger = Ddagger_buffer_.mutable_gpu_data();
Dtype* Vt_sn2 = Vt_sn2_buffer_.mutable_gpu_data();
Dtype* Vt_s2 = this->blobs_[bias_idx_ + 3]->mutable_gpu_data();
caffe_gpu_memcpy(r*k*sizeof(Dtype), Vt, Vt_sn2);
caffe_gpu_memcpy(r*k*sizeof(Dtype), Vt, Vt_s2);
for (int i = 0; i < r; ++i) {
if (hostW[i] > EPSILON) {
caffe_gpu_scal(k, (Dtype)1./(hostW[i]*hostW[i]), Vt_sn2 + i*k);
caffe_gpu_scal(k, hostW[i]*hostW[i], Vt_s2 + i*k);
}
else {
caffe_gpu_scal(k, (Dtype)0., Vt_sn2 + i*k);
caffe_gpu_scal(k, (Dtype)0., Vt_s2 + i*k);
}
}
caffe_gpu_gemm(CblasTrans, CblasNoTrans, k, k, r, (Dtype)1., Vt, Vt_sn2,
(Dtype)0., tmp);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, k, (Dtype)1., D, tmp,
(Dtype)0., Ddagger);
is_dict_normalized_ = true;
*Dflag = (Dtype)0.;
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//Forward_cpu(bottom, top);
// Perform normalization and decomposition (if necessary)
// if (first_time_ || this->phase_ == TEST) {
// forward_preprocess_cpu();
// first_time_ = false;
// }
// else
fast_preprocess_gpu();
// Perform sparse coding (and optionally dictionary learning) on each input vector
const Dtype* D = this->blobs_[0]->gpu_data();
//const Dtype* Dlr = this->Dlow_rank_.gpu_data();
const Dtype* Vt = this->blobs_[bias_idx_ + 2]->gpu_data();
const Dtype* Vt_s2 = this->blobs_[bias_idx_ + 3]->gpu_data();
for (int i = 0; i < top.size()/2; ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[2*i]->mutable_gpu_data();
double loss = 0.;
//LOG(INFO) << "Input norm = " << sqrt(caffe_cpu_dot<Dtype>(bottom[i]->count(),
// bottom_data, bottom_data)/bottom[i]->count());
for (int n = 0; n < this->num_; ++n) {
// Perform forward sparse coding
loss += this->forward_gpu_sparse_coding(bottom_data + bottom[i]->offset(n),
D, Vt, Vt_s2, top_data + top[2*i]->offset(n));
if (this->bias_term_) {
const Dtype* bias = this->blobs_[bias_idx_]->gpu_data();
this->forward_gpu_bias(top_data + top[i]->offset(n), bias);
}
}
// Put objective value in second output
top_data = top[2*i+1]->mutable_cpu_data();
*top_data = Dtype(loss/num_);
}
}
// A is mxk, B is kxm
template <typename Dtype>
__global__ void transpose_kernel(int n, int m, int k, const Dtype* A, Dtype* B) {
CUDA_KERNEL_LOOP(index, n) {
int idx_row = index / m;
int idx_col = index % m;
B[index] = A[idx_row + idx_col*k];
}
}
// Perform B = A^T
template<typename Dtype>
void DictionaryLayer<Dtype>::transpose_gpu(int m, int k, const Dtype* A, Dtype* B) {
int N = m*k;
hipLaunchKernelGGL(( transpose_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, m, k, A, B);
}
template <typename Dtype>
__global__ void kernel_compute_diag_weights(int n, Dtype lambda,
const Dtype* vec_alpha, Dtype* diag) {
CUDA_KERNEL_LOOP(index, n) {
diag[index] = 2*lambda/(fabs(vec_alpha[index])+EPSILON);
}
}
template <typename Dtype>
__global__ void kernel_hard_threshold(int n, Dtype lambda, Dtype* vec_alpha) {
CUDA_KERNEL_LOOP(index, n) {
if (fabs(vec_alpha[index]) < lambda)
vec_alpha[index] = (Dtype)0.;
}
}
// Perform sparse coding on the GPU, estimate the loss and add it to the
// previous loss value
template <typename Dtype>
double DictionaryLayer<Dtype>::forward_gpu_sparse_coding(const Dtype* input,
const Dtype* D, const Dtype* Vt, const Dtype *Vt_s2, Dtype* output,
bool skip_im2col) {
const Dtype* col_buff = input;
if (!is_1x1_) {
if (!skip_im2col) {
conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
}
col_buff = col_buffer_.gpu_data();
}
// Perform sparse coding for each input vector
int m = kernel_dim_;
int k = num_output_;
Dtype* vec_d = vec_d_buffer_.mutable_gpu_data(); // D^T * x
Dtype* vec_r = vec_r_buffer_.mutable_gpu_data(); // Residual vector
Dtype* vec_p = vec_p_buffer_.mutable_gpu_data(); // Descent direction
Dtype* vec_w = vec_w_buffer_.mutable_gpu_data(); // Vector w
Dtype* sparse_codes = sparse_codes_buffer_.mutable_gpu_data();
Dtype* diag = tmp_buffer_.mutable_gpu_data() + ::max(k,m);
Dtype* tmp = tmp_buffer_.mutable_gpu_data() + 2*::max(k,m);
// Initialize loss
double loss = 0.;
for (int i = 0; i < conv_out_spatial_dim_; ++i)
{
const Dtype* x = col_buff + i*m; // Input sample
Dtype* vec_alpha = sparse_codes + i*k; // Sparse code vector
// Initialize sparse code
caffe_gpu_set<Dtype>(k, (Dtype)1., vec_alpha);
// Perform num_iter_irls iterations of iteratively reweighted
// least squares using the previous result as starting value
for (int iter_irls = 0; iter_irls < num_iter_irls_; ++iter_irls)
{
// Build matrix w = diag(2*lambda/fabs(alpha[])
hipLaunchKernelGGL(( kernel_compute_diag_weights<Dtype>), dim3(CAFFE_GET_BLOCKS(k)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, k, (Dtype)lambda_, vec_alpha, diag);
// Build vector d = D^T * x
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, k, 1, m,
(Dtype)1., D, x,
(Dtype)0., vec_d);
// Perform conjugate gradient descent to approximately solve
// C * alpha = d for alpha
// Note: We do not compute matrix C explicitly, since
// C * alpha = diag(2*lambda/fabs(alpha[]) .* alpha + V * Vt_s2 * alpha
// C * alpha = tmp + V * Vt_s2 * alpha
// is more efficient to compute
conjugate_gradient_gpu(k, rank_, diag, Vt, Vt_s2, vec_d, vec_alpha,
num_iter_cg_, vec_p, vec_r, vec_w, tmp);
}
// Apply hard threshold to sparse codes
hipLaunchKernelGGL(( kernel_hard_threshold), dim3(CAFFE_GET_BLOCKS(k)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
k, (Dtype)lambda_, vec_alpha);
loss += objective_function_gpu(m, k, D, x, vec_alpha);
}
// Sparse codes are in pixel-first order, we need to transpose them so they
// are in channel-first order
transpose_gpu(conv_out_spatial_dim_, num_output_, sparse_codes, output);
return loss/conv_out_spatial_dim_;;
}
template <typename Dtype>
void DictionaryLayer<Dtype>::forward_gpu_bias(Dtype* output,
const Dtype* bias) {
int k = num_output_;
caffe_gpu_add(k, bias, output, output);
}
template <typename Dtype>
double DictionaryLayer<Dtype>::objective_function_gpu(int m, int k,
const Dtype* D, const Dtype* x, const Dtype* alpha) {
// Compute objective function
// Cost(alpha) = 0.5*||x_t-D*alpha||_2^2 + lambda*||alpha||_1
Dtype* tmp = tmp_buffer_.mutable_gpu_data();
caffe_gpu_memcpy(m*sizeof(Dtype), x, tmp);
caffe_gpu_gemv<Dtype>(CblasNoTrans, m, k,
(Dtype)(-1.), D, alpha,
(Dtype)1., tmp);
Dtype cost = (Dtype)0.;
//caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, 1, m, (Dtype)1., tmp,
// tmp, (Dtype)0., cost);
caffe_gpu_dot<Dtype>(m, tmp, tmp, &cost);
//caffe_gpu_scale<Dtype>(1, 0.5, cost, cost);
Dtype asum = (Dtype)0.;
// Sum of absolute values of elements in alpha
caffe_gpu_asum<Dtype>(k, alpha, &asum);
//caffe_gpu_scale<Dtype>(1, Dtype(lambda_), asum, asum);
// Add cost and asum to loss
//caffe_gpu_add<Dtype>(1, asum, loss, loss);
//caffe_gpu_add<Dtype>(1, cost, loss, loss);
cost = 0.5 * cost + lambda_ * asum;
//LOG(INFO) << "COST = " << cost;
return cost;
}
// Compute C * x = w .* x + V * Vt2 * x
// (kx1) (kx1) (k*r)(r*k)(k*1)
template <typename Dtype>
void DictionaryLayer<Dtype>::compute_Cx_gpu(int k, int r, const Dtype* w,
const Dtype* Vt, const Dtype* Vt2, const Dtype* x,
Dtype* tmp, Dtype* Cx) {
caffe_gpu_mul(k, w, x, Cx);
caffe_gpu_gemv<Dtype>(CblasNoTrans, r, k, (Dtype)1., Vt2, x, (Dtype)0.,
tmp);
caffe_gpu_gemv<Dtype>(CblasTrans, r, k, (Dtype)1., Vt, tmp, (Dtype)1., Cx);
}
// Perform one step of CGD in a single kernel
template <typename Dtype>
__global__ void kernel_step_cg(int k, Dtype* w, Dtype* p, Dtype* r,
Dtype* x, Dtype* ret_norm_r) {
__shared__ Dtype prev_norm_r;
__shared__ Dtype dot_p_w[1024];
__shared__ Dtype norm_r[1024];
if (threadIdx.x == 0)
prev_norm_r = *ret_norm_r;
// Compute dot_p_w
dot_p_w[threadIdx.x] = (Dtype)0.;
CUDA_KERNEL_LOOP(j, k) {
dot_p_w[threadIdx.x] += w[j]*p[j];
}
__syncthreads();
// Reduce sum and leave result in dot_p_w[0]
if (threadIdx.x < 512) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 32];
__syncthreads();
if (threadIdx.x < 16) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x < 8) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x < 4) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x < 2) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x < 1) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 1];
__syncthreads();
// Compute alpha
Dtype alpha = prev_norm_r / dot_p_w[0];
// Update x and r
CUDA_KERNEL_LOOP(j, k) {
x[j] += alpha*p[j];
r[j] -= alpha*w[j];
}
__syncthreads();
// Compute norm of r
norm_r[threadIdx.x] = (Dtype)0.;
CUDA_KERNEL_LOOP(j, k) {
norm_r[threadIdx.x] += r[j]*r[j];
}
// Reduce sum and leave result in norm_r[0]
if (threadIdx.x < 512) norm_r[threadIdx.x] += norm_r[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) norm_r[threadIdx.x] += norm_r[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) norm_r[threadIdx.x] += norm_r[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) norm_r[threadIdx.x] += norm_r[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) norm_r[threadIdx.x] += norm_r[threadIdx.x + 32];
__syncthreads();
if (threadIdx.x < 16) norm_r[threadIdx.x] += norm_r[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x < 8) norm_r[threadIdx.x] += norm_r[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x < 4) norm_r[threadIdx.x] += norm_r[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x < 2) norm_r[threadIdx.x] += norm_r[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x < 1) norm_r[threadIdx.x] += norm_r[threadIdx.x + 1];
__syncthreads();
// Compute beta
Dtype beta = norm_r[0] / prev_norm_r;
// Update p
CUDA_KERNEL_LOOP(j, k) {
p[j] = r[j] + beta*p[j];
}
// Return new norm of r
if (threadIdx.x == 0)
*ret_norm_r = norm_r[0];
}
template <typename Dtype>
void DictionaryLayer<Dtype>::conjugate_gradient_gpu(int k, int r,
const Dtype* weights, const Dtype* Vt, const Dtype* Vt2, const Dtype* d,
Dtype* x, int num_iter, Dtype* temp_p, Dtype* temp_r, Dtype* temp_w,
Dtype* tmp) {
// Temporay scalar variables on GPU
thrust::device_ptr<Dtype> norm_r(tmp++);
// Initialize the residual
compute_Cx_gpu(k, r, weights, Vt, Vt2, x, tmp, temp_r);
caffe_gpu_sub(k, d, temp_r, temp_r);
// Compute norm of the residual
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE);
caffe_gpu_dot<Dtype>(k, temp_r, temp_r, norm_r.get());
hipblasSetPointerMode(Caffe::cublas_handle(), HIPBLAS_POINTER_MODE_HOST);
if (fabs(*norm_r) < EPSILON) {
return; // Accept initial solution
}
// Initialize the descent direction
caffe_gpu_memcpy(k*sizeof(Dtype), temp_r, temp_p);
// Perform num_iter_cg iterations of conjugate gradient descent
for (int iter_cg = 0; iter_cg < num_iter; ++iter_cg)
{
// w = C * p
compute_Cx_gpu(k, r, weights, Vt, Vt2, temp_p, tmp, temp_w);
// Invoke kernel that does
// dot_p_w = sum_j(p[j]*w[j])
// alpha = prev_norm_r / dot_p_w;
// x = x + alpha*p
// r = r - alpha*w
// norm_r = sum_j(r[j]*r[j])
// beta = norm_r / prev_norm_r
// p = r + beta*p
// and returns norm_r
// Our kernel only has 1 block to allow thread synchronization
hipLaunchKernelGGL(( kernel_step_cg<Dtype>), dim3(1), dim3(1024), 0, 0, k, temp_w, temp_p,
temp_r, x, norm_r.get());
if (fabs(*norm_r) < EPSILON) {
return;
}
}
}
template <typename Dtype>
__global__ void kernel_mod_gradient(int k, const Dtype* alpha,
const Dtype* alpha_diff, Dtype* mod_alpha_diff) {
CUDA_KERNEL_LOOP(index, k) {
mod_alpha_diff[index] = alpha[index] == (Dtype)0.
? (Dtype)0. : alpha_diff[index];
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// Backward_cpu(top, propagate_down, bottom);
const Dtype* D = this->blobs_[0]->gpu_data();
const Dtype* Dlr = this->Dlow_rank_.gpu_data();
Dtype* D_diff = this->blobs_[0]->mutable_gpu_diff();
if (this->param_propagate_down_[0]) {
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), D_diff);
}
CHECK(is_dict_normalized_);
CHECK_EQ(conv_out_spatial_dim_, 1) << "Convolutional dictionaries not implemented, yet!";
// Temporary storage and precomputed constants
int m = kernel_dim_;
int k = num_output_;
Dtype* tmp1 = tmp_buffer_.mutable_gpu_data();
Dtype* tmp2 = tmp_buffer_.mutable_gpu_data() + ::max(k,m);
Dtype* tmp3 = tmp_buffer_.mutable_gpu_data() + 2*::max(k,m);
Dtype* tmp_dl_dx = tmp_buffer_.mutable_gpu_data() + 3*::max(k,m);
// Precomputed matrices
const Dtype* Vt = this->blobs_[bias_idx_ + 2]->gpu_data();
const Dtype* Vt_sn2 = Vt_sn2_buffer_.gpu_data();
const Dtype* Ddagger = Ddagger_buffer_.gpu_data();
for (int idx = 0; idx < top.size()/2; ++idx) {
const Dtype* top_diff = top[2*idx]->gpu_diff();
const Dtype* top_data = top[2*idx]->gpu_data();
const Dtype* bottom_data = bottom[idx]->gpu_data();
Dtype* bottom_diff = bottom[idx]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[bias_idx_]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[idx*2]->offset(n));
}
}
if (this->param_propagate_down_[0] || propagate_down[idx]) {
for (int n = 0; n < this->num_; ++n) {
const Dtype* alpha = top_data + top[idx*2]->offset(n);
const Dtype* alpha_diff = top_diff + top[idx*2]->offset(n);
// Precompute modified output gradient
Dtype* mod_alpha_diff = mod_alpha_diff_buffer_.mutable_gpu_data();
hipLaunchKernelGGL(( kernel_mod_gradient<Dtype>), dim3(CAFFE_GET_BLOCKS(k)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, k, alpha, alpha_diff, mod_alpha_diff);
// dl/dx is necessary for both gradients
Dtype* dl_dx = propagate_down[idx] ?
bottom_diff + bottom[idx]->offset(n) : tmp_dl_dx;
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[idx] || this->param_propagate_down_[0]) {
this->backward_gpu_gemm(mod_alpha_diff, Ddagger, dl_dx);
}
// gradient w.r.t. dictionary. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->dict_gpu_backprop(bottom_data + bottom[idx]->offset(n),
dl_dx, mod_alpha_diff, Ddagger, Vt, Vt_sn2, tmp1, tmp2, tmp3,
D_diff);
this->dict_gpu_optimize(bottom_data + bottom[idx]->offset(n), alpha,
D, Dlr, (Dtype)etha_rec_, (Dtype)etha_lr_, tmp1, tmp2, D_diff);
// Mark dictionary as unnormalized
is_dict_normalized_ = false;
Dtype* Dflag = this->blobs_[bias_idx_ + 1]->mutable_cpu_data();
*Dflag = (Dtype)1.;
}
// gradient of reconstruction error w.r.t bottom data, if necessary
if (propagate_down[idx]) {
this->backward_gpu_optimize(alpha, D, Dlr,
bottom_data + bottom[idx]->offset(n),
(Dtype)(etha_rec_bp_), (Dtype)(etha_lr_bp_), dl_dx);
}
}
}
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::dict_gpu_backprop(const Dtype* x, const Dtype* dl_dx,
const Dtype* mod_alpha_diff, const Dtype* Dtdagger, const Dtype* Vt,
const Dtype* Vt_sn2, Dtype* tmp1, Dtype* tmp2, Dtype* tmp3, Dtype* D_diff) {
int m = kernel_dim_;
int k = num_output_;
int r = rank_;
// Compute intermediate products
// tmp1 = x^T * (D^dagger)^T
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, k, m, (Dtype)1., x,
Dtdagger, (Dtype)0., tmp1);
// tmp2 = dl_dalpha * V
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 1, r, k, (Dtype)1.,
mod_alpha_diff, Vt, (Dtype)0., tmp2);
// tmp3 = tmp2 * Vt_sn2
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, k, r, (Dtype)1.,
tmp2, Vt_sn2, (Dtype)0., tmp3);
// Compute gradient of dictionary and add it to D_diff
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, k, 1, -(Dtype)2., dl_dx,
tmp1, (Dtype)1., D_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, k, 1, (Dtype)1., x,
tmp3, (Dtype)1., D_diff);
// Result to be D_diff = D_diff - 2 * (D^dagger)^T * x^T * mod_alpha_diff^t * (D^dagger)^T
// + x * mod_alpha_diff * V * Vt_sn2
}
// Gradient that decreases reconstruction loss on dictionary
template <typename Dtype>
void DictionaryLayer<Dtype>::dict_gpu_optimize(const Dtype* x,
const Dtype* alpha, const Dtype* D, const Dtype* Dlr, Dtype etha_rec,
Dtype etha_lr, Dtype* tmp1, Dtype* tmp2, Dtype* D_diff) {
int m = kernel_dim_;
int k = num_output_;
if (etha_rec != (Dtype)0.) {
// Do dl/dD += etha_rec*(D*alpha-x)*alpha^T
caffe_copy(m, x, tmp1);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, 1, k,
(Dtype)1., D, alpha, -(Dtype)1., tmp1);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, k, 1,
etha_rec, tmp1, alpha, (Dtype)1., D_diff);
}
// if (etha_lr != (Dtype)0.) {
// // Do dl/dD += etha_lr*(Dlr-D)
// caffe_gpu_axpy(m*k, etha_lr, Dlr, D_diff);
// caffe_gpu_axpy(m*k, -etha_lr, D, D_diff);
// }
}
// Compute backpropagated reconstruction loss and add it to dl_dx
template <typename Dtype>
void DictionaryLayer<Dtype>::backward_gpu_optimize(const Dtype* alpha,
const Dtype* D, const Dtype* Dlr, const Dtype* x, Dtype etha_rec,
Dtype etha_lr, Dtype* dl_dx) {
int m = kernel_dim_;
int k = num_output_;
if (etha_rec != (Dtype)0.) {
// Do dl/dx += etha_rec*(x-D*alpha)
caffe_gpu_axpy(m, etha_rec, x, dl_dx);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, 1, k, -etha_rec, D, alpha,
(Dtype)1., dl_dx);
}
if (etha_lr != (Dtype)0.) {
// Do dl/dx += etha_lr*(D_lr)*alpha
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, 1, k, etha_lr, Dlr, alpha,
(Dtype)1., dl_dx);
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::backward_gpu_gemm(const Dtype* mod_alpha_diff,
const Dtype* D, Dtype* input) {
Dtype* col_buff = col_buffer_.mutable_gpu_data();
if (is_1x1_) {
col_buff = input;
}
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, kernel_dim_,
1, num_output_,
(Dtype)1., D, mod_alpha_diff,
(Dtype)0., col_buff);
if (!is_1x1_) {
conv_col2im_gpu(col_buff, input);
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::backward_gpu_bias(Dtype* bias,
const Dtype* input) {
int k = num_output_;
caffe_gpu_add(k, input, bias, bias);
}
INSTANTIATE_LAYER_GPU_FUNCS(DictionaryLayer);
} // namespace caffe
| 9a597f3e88f61120ba20269d0b647bdfbc037d28.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define EPSILON (1e-6)
namespace caffe {
template <typename Dtype>
__global__ void kernel_norm(int m, int k, const Dtype* D, Dtype* diagDtD) {
CUDA_KERNEL_LOOP(index, k) {
Dtype res = (Dtype)0.;
for (int j =0; j < m; ++j)
res += D[index+j*k] * D[index+j*k];
diagDtD[index] = res;
}
}
template <typename Dtype>
__global__ void kernel_vector_to_column(int m, int k, int j, const Dtype* v,
Dtype* A) {
CUDA_KERNEL_LOOP(index, m) {
A[j+index*k] = v[index];
}
}
template <typename Dtype>
__global__ void kernel_column_to_vector(int m, int k, int j, const Dtype* A,
Dtype* v) {
CUDA_KERNEL_LOOP(index, m) {
v[index] = A[j+index*k];
}
}
// Versions of caffe_gpu_scal that take scale factor as reference, so we can
// pass a pointer to the GPU.
// Remember, before calling this function, we need to call
// cublasSetPointerMode(CUBLAS_POINTER_MODE_DEVICE);
// and, afterwards, we need to call
// cublasSetPointerMode(CUBLAS_POINTER_MODE_HOST);
template <typename Dtype>
void caffe_gpu_scal(const int N, const Dtype* alpha, Dtype *X);
template <>
void caffe_gpu_scal<float>(const int N, const float* alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double* alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, alpha, X, 1));
}
// Normalize if norm > 1
template <typename Dtype>
__global__ void kernel_conditional_normalize(const int N, const Dtype* norm, Dtype* v) {
CUDA_KERNEL_LOOP(index, N) {
if (*norm > (Dtype)1.)
v[index] /= sqrt(*norm);
}
}
// Swap 2 columns in a matrix
template <typename Dtype>
__global__ void kernel_swap_columns(int m, int k, int j0, int j1, Dtype* A) {
CUDA_KERNEL_LOOP(index, m) {
Dtype tmp = A[j0+index*k];
A[j0+index*k] = A[j1+index*k];
A[j1+index*k] = tmp;
}
}
// Swap 2 vectors
template <typename Dtype>
__global__ void kernel_swap_vectors(const int N, Dtype* v0, Dtype* v1) {
CUDA_KERNEL_LOOP(index, N) {
Dtype tmp = v0[index];
v0[index] = v1[index];
v1[index] = tmp;
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::normalize_dictionary_gpu(int m, int k, Dtype* D) {
// Normalize in a temporary matrix Z (D transposed)
Dtype* Z = Z_buffer_.mutable_gpu_data();
transpose_gpu(m, k, D, Z);
// Normalize columns whose norm is greater than 1
Dtype* norm = tmp_buffer_.mutable_gpu_data();
// Tell cuBLAS that vector "norm" is in device memory
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_DEVICE);
for (int i = 0; i < k; ++i) {
Dtype* vi = Z + i*m;
caffe_gpu_dot(m, vi, vi, norm);
kernel_conditional_normalize<Dtype><<<CAFFE_GET_BLOCKS(m),
CAFFE_CUDA_NUM_THREADS>>>(m, norm, vi);
}
// Switch pointer mode back to host
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_HOST);
transpose_gpu(k, m, Z, D);
}
// Same as forward_preprocess_cpu, but assuming that matrix Vt has been computed
// in previous iteration
template <typename Dtype>
void DictionaryLayer<Dtype>::fast_preprocess_gpu() {
Dtype* Dflag = this->blobs_[bias_idx_ + 1]->mutable_cpu_data();
int m = kernel_dim_;
int k = num_output_;
int r = rank_;
// Normalize dictionary (make sure that the norm for each column is <= 1)
// Orthonormalize dictionary (make sure that D^T*D=diag(D^T*D))
if (!is_dict_normalized_ || (*Dflag)) {
Dtype* D = this->blobs_[0]->mutable_gpu_data();
if (orthogonalize_)
NOT_IMPLEMENTED; //orthogonalize_dictionary_gpu(m, k, D, &dict_order_[0]);
else
normalize_dictionary_gpu(m, k, D);
// Precompute SVD and pseudoinverse of D
// We assume that matrix D has not changed much since previous iteration,
// so we use previous vectors in Vt and refine
// Note: we use Ddagger as temporary storage
Dtype* work = Ddagger_buffer_.mutable_gpu_data(); // mxk
Dtype* tmp = tmp_buffer_.mutable_gpu_data();
caffe_gpu_memcpy(m*k*sizeof(Dtype), D, work);
// Low-rank approximation on D
Dtype* W = SV_buffer_.mutable_gpu_data(); // rx1
Dtype* U = U_buffer_.mutable_gpu_data(); // mxr
Dtype* Vt = this->blobs_[bias_idx_ + 2]->mutable_gpu_data(); // r*k
Dtype* u = tmp;
Dtype* prev_u = tmp + m;
vector<Dtype> hostW(r);
for (int ri = 0; ri < r; ++ri) {
// Copy column ri of U into u
kernel_column_to_vector<Dtype><<<CAFFE_GET_BLOCKS(m),
CAFFE_CUDA_NUM_THREADS>>>(m, r, ri, U, u);
Dtype* v = Vt + ri*k;
const int max_iter = 10;
for (int iter = 0; iter < max_iter; ++iter) {
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, 1, k, (Dtype)1., work, v,
(Dtype)0., u);
// Tell cuBLAS that vector W is in device memory
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_DEVICE);
caffe_gpu_dot<Dtype>(m, u, u, &W[ri]);
// Do W[ri] = 1/sqrt(W[ri]) on device memory
caffe_gpu_powx(1, &W[ri], -(Dtype)0.5, &W[ri]);
caffe_gpu_scal(m, &W[ri], u);
// Copy u back into corresponding column in U
kernel_vector_to_column<Dtype><<<CAFFE_GET_BLOCKS(m),
CAFFE_CUDA_NUM_THREADS>>>(m, r, ri, u, U);
// The constants provided to gemm are in host memory
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_HOST);
caffe_gpu_gemm(CblasTrans, CblasNoTrans, k, 1, m, (Dtype)1., work, u,
(Dtype)0., v);
// Tell cuBLAS that vector W is in device memory
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_DEVICE);
caffe_gpu_dot<Dtype>(k, v, v, &W[ri]);
// Do W[ri] = 1/sqrt(W[ri]) on device memory
caffe_gpu_powx(1, &W[ri], -(Dtype)0.5, &W[ri]);
caffe_gpu_scal(k, &W[ri], v);
// Set W[ri] to s
caffe_gpu_powx(1, &W[ri], -(Dtype)1., &W[ri]);
// Switch pointer mode back to host
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_HOST);
// Check for convergence
caffe_gpu_sub(m, u, prev_u, prev_u);
Dtype delta = (Dtype)0.;
caffe_gpu_dot<Dtype>(m, prev_u, prev_u, &delta);
delta /= m;
if (delta < 2 * EPSILON || iter == max_iter-1) {
//LOG(INFO) << "Converged after " << iter << " iterations, " <<
// " delta = " << delta;
break;
}
caffe_gpu_memcpy(m*sizeof(Dtype), u, prev_u);
}
caffe_gpu_memcpy(sizeof(Dtype), &W[ri], &hostW[ri]);
// Check that singular vectors are in non-increasing order
int ri1 = ri;
for (int i = 0; i < ri; ++i) {
if (hostW[i] < hostW[ri]) {
ri1 = i;
break;
}
}
if (ri1 != ri) {
//LOG(INFO) << "Swapping vectors W[" << ri << "] = " << W[ri] <<
// " and W[" << ri1 << "] = " << W[ri1];
std::swap(hostW[ri], hostW[ri1]);
// Swap u[ri] and u[ri1]
kernel_swap_columns<Dtype><<<CAFFE_GET_BLOCKS(m),
CAFFE_CUDA_NUM_THREADS>>>(m, r, ri, ri1, U);
kernel_swap_vectors<Dtype><<<CAFFE_GET_BLOCKS(k),
CAFFE_CUDA_NUM_THREADS>>>(k, Vt + ri*k, Vt + ri1*k);
// Inflate
caffe_gpu_memcpy(m*k*sizeof(Dtype), D, work);
// Re-deflate
for (int i = 0; i < ri1; ++i) {
// Copy column ri of U into u
kernel_column_to_vector<Dtype><<<CAFFE_GET_BLOCKS(m),
CAFFE_CUDA_NUM_THREADS>>>(m, r, i, U, u);
Dtype* v = Vt + i*k;
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, 1, -(Dtype)hostW[i], u, v,
(Dtype)1., work);
}
// Recompute new singular vector
ri = ri1-1;
}
else {
// Deflate
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, 1, -(Dtype)hostW[ri],
u, v, (Dtype)1., work);
}
}
// Reconstruct D from rank r approximation
caffe_gpu_memcpy(r*k*sizeof(Dtype), Vt, tmp);
for (int i = 0; i < r; ++i) {
//hostW[i] = std::max(hostW[i], (Dtype)EPSILON);
caffe_gpu_scal(k, hostW[i], tmp + i*k);
}
Dtype* Dlr = Dlow_rank_.mutable_gpu_data();
caffe_gpu_memcpy(m*k*sizeof(Dtype), D, work);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, r, (Dtype)1., U, tmp,
(Dtype)0., D);
caffe_gpu_sub(m*k, D, work, Dlr); // Now Dlr contains the gradient
// Precompute pseudoinverse of D
Dtype* Ddagger = Ddagger_buffer_.mutable_gpu_data();
Dtype* Vt_sn2 = Vt_sn2_buffer_.mutable_gpu_data();
Dtype* Vt_s2 = this->blobs_[bias_idx_ + 3]->mutable_gpu_data();
caffe_gpu_memcpy(r*k*sizeof(Dtype), Vt, Vt_sn2);
caffe_gpu_memcpy(r*k*sizeof(Dtype), Vt, Vt_s2);
for (int i = 0; i < r; ++i) {
if (hostW[i] > EPSILON) {
caffe_gpu_scal(k, (Dtype)1./(hostW[i]*hostW[i]), Vt_sn2 + i*k);
caffe_gpu_scal(k, hostW[i]*hostW[i], Vt_s2 + i*k);
}
else {
caffe_gpu_scal(k, (Dtype)0., Vt_sn2 + i*k);
caffe_gpu_scal(k, (Dtype)0., Vt_s2 + i*k);
}
}
caffe_gpu_gemm(CblasTrans, CblasNoTrans, k, k, r, (Dtype)1., Vt, Vt_sn2,
(Dtype)0., tmp);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, k, k, (Dtype)1., D, tmp,
(Dtype)0., Ddagger);
is_dict_normalized_ = true;
*Dflag = (Dtype)0.;
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//Forward_cpu(bottom, top);
// Perform normalization and decomposition (if necessary)
// if (first_time_ || this->phase_ == TEST) {
// forward_preprocess_cpu();
// first_time_ = false;
// }
// else
fast_preprocess_gpu();
// Perform sparse coding (and optionally dictionary learning) on each input vector
const Dtype* D = this->blobs_[0]->gpu_data();
//const Dtype* Dlr = this->Dlow_rank_.gpu_data();
const Dtype* Vt = this->blobs_[bias_idx_ + 2]->gpu_data();
const Dtype* Vt_s2 = this->blobs_[bias_idx_ + 3]->gpu_data();
for (int i = 0; i < top.size()/2; ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[2*i]->mutable_gpu_data();
double loss = 0.;
//LOG(INFO) << "Input norm = " << sqrt(caffe_cpu_dot<Dtype>(bottom[i]->count(),
// bottom_data, bottom_data)/bottom[i]->count());
for (int n = 0; n < this->num_; ++n) {
// Perform forward sparse coding
loss += this->forward_gpu_sparse_coding(bottom_data + bottom[i]->offset(n),
D, Vt, Vt_s2, top_data + top[2*i]->offset(n));
if (this->bias_term_) {
const Dtype* bias = this->blobs_[bias_idx_]->gpu_data();
this->forward_gpu_bias(top_data + top[i]->offset(n), bias);
}
}
// Put objective value in second output
top_data = top[2*i+1]->mutable_cpu_data();
*top_data = Dtype(loss/num_);
}
}
// A is mxk, B is kxm
template <typename Dtype>
__global__ void transpose_kernel(int n, int m, int k, const Dtype* A, Dtype* B) {
CUDA_KERNEL_LOOP(index, n) {
int idx_row = index / m;
int idx_col = index % m;
B[index] = A[idx_row + idx_col*k];
}
}
// Perform B = A^T
template<typename Dtype>
void DictionaryLayer<Dtype>::transpose_gpu(int m, int k, const Dtype* A, Dtype* B) {
int N = m*k;
transpose_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, m, k, A, B);
}
template <typename Dtype>
__global__ void kernel_compute_diag_weights(int n, Dtype lambda,
const Dtype* vec_alpha, Dtype* diag) {
CUDA_KERNEL_LOOP(index, n) {
diag[index] = 2*lambda/(fabs(vec_alpha[index])+EPSILON);
}
}
template <typename Dtype>
__global__ void kernel_hard_threshold(int n, Dtype lambda, Dtype* vec_alpha) {
CUDA_KERNEL_LOOP(index, n) {
if (fabs(vec_alpha[index]) < lambda)
vec_alpha[index] = (Dtype)0.;
}
}
// Perform sparse coding on the GPU, estimate the loss and add it to the
// previous loss value
template <typename Dtype>
double DictionaryLayer<Dtype>::forward_gpu_sparse_coding(const Dtype* input,
const Dtype* D, const Dtype* Vt, const Dtype *Vt_s2, Dtype* output,
bool skip_im2col) {
const Dtype* col_buff = input;
if (!is_1x1_) {
if (!skip_im2col) {
conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
}
col_buff = col_buffer_.gpu_data();
}
// Perform sparse coding for each input vector
int m = kernel_dim_;
int k = num_output_;
Dtype* vec_d = vec_d_buffer_.mutable_gpu_data(); // D^T * x
Dtype* vec_r = vec_r_buffer_.mutable_gpu_data(); // Residual vector
Dtype* vec_p = vec_p_buffer_.mutable_gpu_data(); // Descent direction
Dtype* vec_w = vec_w_buffer_.mutable_gpu_data(); // Vector w
Dtype* sparse_codes = sparse_codes_buffer_.mutable_gpu_data();
Dtype* diag = tmp_buffer_.mutable_gpu_data() + std::max(k,m);
Dtype* tmp = tmp_buffer_.mutable_gpu_data() + 2*std::max(k,m);
// Initialize loss
double loss = 0.;
for (int i = 0; i < conv_out_spatial_dim_; ++i)
{
const Dtype* x = col_buff + i*m; // Input sample
Dtype* vec_alpha = sparse_codes + i*k; // Sparse code vector
// Initialize sparse code
caffe_gpu_set<Dtype>(k, (Dtype)1., vec_alpha);
// Perform num_iter_irls iterations of iteratively reweighted
// least squares using the previous result as starting value
for (int iter_irls = 0; iter_irls < num_iter_irls_; ++iter_irls)
{
// Build matrix w = diag(2*lambda/fabs(alpha[])
kernel_compute_diag_weights<Dtype><<<CAFFE_GET_BLOCKS(k),
CAFFE_CUDA_NUM_THREADS>>>(k, (Dtype)lambda_, vec_alpha, diag);
// Build vector d = D^T * x
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, k, 1, m,
(Dtype)1., D, x,
(Dtype)0., vec_d);
// Perform conjugate gradient descent to approximately solve
// C * alpha = d for alpha
// Note: We do not compute matrix C explicitly, since
// C * alpha = diag(2*lambda/fabs(alpha[]) .* alpha + V * Vt_s2 * alpha
// C * alpha = tmp + V * Vt_s2 * alpha
// is more efficient to compute
conjugate_gradient_gpu(k, rank_, diag, Vt, Vt_s2, vec_d, vec_alpha,
num_iter_cg_, vec_p, vec_r, vec_w, tmp);
}
// Apply hard threshold to sparse codes
kernel_hard_threshold<<<CAFFE_GET_BLOCKS(k), CAFFE_CUDA_NUM_THREADS>>>(
k, (Dtype)lambda_, vec_alpha);
loss += objective_function_gpu(m, k, D, x, vec_alpha);
}
// Sparse codes are in pixel-first order, we need to transpose them so they
// are in channel-first order
transpose_gpu(conv_out_spatial_dim_, num_output_, sparse_codes, output);
return loss/conv_out_spatial_dim_;;
}
template <typename Dtype>
void DictionaryLayer<Dtype>::forward_gpu_bias(Dtype* output,
const Dtype* bias) {
int k = num_output_;
caffe_gpu_add(k, bias, output, output);
}
template <typename Dtype>
double DictionaryLayer<Dtype>::objective_function_gpu(int m, int k,
const Dtype* D, const Dtype* x, const Dtype* alpha) {
// Compute objective function
// Cost(alpha) = 0.5*||x_t-D*alpha||_2^2 + lambda*||alpha||_1
Dtype* tmp = tmp_buffer_.mutable_gpu_data();
caffe_gpu_memcpy(m*sizeof(Dtype), x, tmp);
caffe_gpu_gemv<Dtype>(CblasNoTrans, m, k,
(Dtype)(-1.), D, alpha,
(Dtype)1., tmp);
Dtype cost = (Dtype)0.;
//caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, 1, m, (Dtype)1., tmp,
// tmp, (Dtype)0., cost);
caffe_gpu_dot<Dtype>(m, tmp, tmp, &cost);
//caffe_gpu_scale<Dtype>(1, 0.5, cost, cost);
Dtype asum = (Dtype)0.;
// Sum of absolute values of elements in alpha
caffe_gpu_asum<Dtype>(k, alpha, &asum);
//caffe_gpu_scale<Dtype>(1, Dtype(lambda_), asum, asum);
// Add cost and asum to loss
//caffe_gpu_add<Dtype>(1, asum, loss, loss);
//caffe_gpu_add<Dtype>(1, cost, loss, loss);
cost = 0.5 * cost + lambda_ * asum;
//LOG(INFO) << "COST = " << cost;
return cost;
}
// Compute C * x = w .* x + V * Vt2 * x
// (kx1) (kx1) (k*r)(r*k)(k*1)
template <typename Dtype>
void DictionaryLayer<Dtype>::compute_Cx_gpu(int k, int r, const Dtype* w,
const Dtype* Vt, const Dtype* Vt2, const Dtype* x,
Dtype* tmp, Dtype* Cx) {
caffe_gpu_mul(k, w, x, Cx);
caffe_gpu_gemv<Dtype>(CblasNoTrans, r, k, (Dtype)1., Vt2, x, (Dtype)0.,
tmp);
caffe_gpu_gemv<Dtype>(CblasTrans, r, k, (Dtype)1., Vt, tmp, (Dtype)1., Cx);
}
// Perform one step of CGD in a single kernel
template <typename Dtype>
__global__ void kernel_step_cg(int k, Dtype* w, Dtype* p, Dtype* r,
Dtype* x, Dtype* ret_norm_r) {
__shared__ Dtype prev_norm_r;
__shared__ Dtype dot_p_w[1024];
__shared__ Dtype norm_r[1024];
if (threadIdx.x == 0)
prev_norm_r = *ret_norm_r;
// Compute dot_p_w
dot_p_w[threadIdx.x] = (Dtype)0.;
CUDA_KERNEL_LOOP(j, k) {
dot_p_w[threadIdx.x] += w[j]*p[j];
}
__syncthreads();
// Reduce sum and leave result in dot_p_w[0]
if (threadIdx.x < 512) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 32];
__syncthreads();
if (threadIdx.x < 16) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x < 8) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x < 4) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x < 2) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x < 1) dot_p_w[threadIdx.x] += dot_p_w[threadIdx.x + 1];
__syncthreads();
// Compute alpha
Dtype alpha = prev_norm_r / dot_p_w[0];
// Update x and r
CUDA_KERNEL_LOOP(j, k) {
x[j] += alpha*p[j];
r[j] -= alpha*w[j];
}
__syncthreads();
// Compute norm of r
norm_r[threadIdx.x] = (Dtype)0.;
CUDA_KERNEL_LOOP(j, k) {
norm_r[threadIdx.x] += r[j]*r[j];
}
// Reduce sum and leave result in norm_r[0]
if (threadIdx.x < 512) norm_r[threadIdx.x] += norm_r[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) norm_r[threadIdx.x] += norm_r[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) norm_r[threadIdx.x] += norm_r[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) norm_r[threadIdx.x] += norm_r[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) norm_r[threadIdx.x] += norm_r[threadIdx.x + 32];
__syncthreads();
if (threadIdx.x < 16) norm_r[threadIdx.x] += norm_r[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x < 8) norm_r[threadIdx.x] += norm_r[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x < 4) norm_r[threadIdx.x] += norm_r[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x < 2) norm_r[threadIdx.x] += norm_r[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x < 1) norm_r[threadIdx.x] += norm_r[threadIdx.x + 1];
__syncthreads();
// Compute beta
Dtype beta = norm_r[0] / prev_norm_r;
// Update p
CUDA_KERNEL_LOOP(j, k) {
p[j] = r[j] + beta*p[j];
}
// Return new norm of r
if (threadIdx.x == 0)
*ret_norm_r = norm_r[0];
}
template <typename Dtype>
void DictionaryLayer<Dtype>::conjugate_gradient_gpu(int k, int r,
const Dtype* weights, const Dtype* Vt, const Dtype* Vt2, const Dtype* d,
Dtype* x, int num_iter, Dtype* temp_p, Dtype* temp_r, Dtype* temp_w,
Dtype* tmp) {
// Temporay scalar variables on GPU
thrust::device_ptr<Dtype> norm_r(tmp++);
// Initialize the residual
compute_Cx_gpu(k, r, weights, Vt, Vt2, x, tmp, temp_r);
caffe_gpu_sub(k, d, temp_r, temp_r);
// Compute norm of the residual
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_DEVICE);
caffe_gpu_dot<Dtype>(k, temp_r, temp_r, norm_r.get());
cublasSetPointerMode(Caffe::cublas_handle(), CUBLAS_POINTER_MODE_HOST);
if (fabs(*norm_r) < EPSILON) {
return; // Accept initial solution
}
// Initialize the descent direction
caffe_gpu_memcpy(k*sizeof(Dtype), temp_r, temp_p);
// Perform num_iter_cg iterations of conjugate gradient descent
for (int iter_cg = 0; iter_cg < num_iter; ++iter_cg)
{
// w = C * p
compute_Cx_gpu(k, r, weights, Vt, Vt2, temp_p, tmp, temp_w);
// Invoke kernel that does
// dot_p_w = sum_j(p[j]*w[j])
// alpha = prev_norm_r / dot_p_w;
// x = x + alpha*p
// r = r - alpha*w
// norm_r = sum_j(r[j]*r[j])
// beta = norm_r / prev_norm_r
// p = r + beta*p
// and returns norm_r
// Our kernel only has 1 block to allow thread synchronization
kernel_step_cg<Dtype><<<1, 1024>>>(k, temp_w, temp_p,
temp_r, x, norm_r.get());
if (fabs(*norm_r) < EPSILON) {
return;
}
}
}
template <typename Dtype>
__global__ void kernel_mod_gradient(int k, const Dtype* alpha,
const Dtype* alpha_diff, Dtype* mod_alpha_diff) {
CUDA_KERNEL_LOOP(index, k) {
mod_alpha_diff[index] = alpha[index] == (Dtype)0.
? (Dtype)0. : alpha_diff[index];
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// Backward_cpu(top, propagate_down, bottom);
const Dtype* D = this->blobs_[0]->gpu_data();
const Dtype* Dlr = this->Dlow_rank_.gpu_data();
Dtype* D_diff = this->blobs_[0]->mutable_gpu_diff();
if (this->param_propagate_down_[0]) {
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), D_diff);
}
CHECK(is_dict_normalized_);
CHECK_EQ(conv_out_spatial_dim_, 1) << "Convolutional dictionaries not implemented, yet!";
// Temporary storage and precomputed constants
int m = kernel_dim_;
int k = num_output_;
Dtype* tmp1 = tmp_buffer_.mutable_gpu_data();
Dtype* tmp2 = tmp_buffer_.mutable_gpu_data() + std::max(k,m);
Dtype* tmp3 = tmp_buffer_.mutable_gpu_data() + 2*std::max(k,m);
Dtype* tmp_dl_dx = tmp_buffer_.mutable_gpu_data() + 3*std::max(k,m);
// Precomputed matrices
const Dtype* Vt = this->blobs_[bias_idx_ + 2]->gpu_data();
const Dtype* Vt_sn2 = Vt_sn2_buffer_.gpu_data();
const Dtype* Ddagger = Ddagger_buffer_.gpu_data();
for (int idx = 0; idx < top.size()/2; ++idx) {
const Dtype* top_diff = top[2*idx]->gpu_diff();
const Dtype* top_data = top[2*idx]->gpu_data();
const Dtype* bottom_data = bottom[idx]->gpu_data();
Dtype* bottom_diff = bottom[idx]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[bias_idx_]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[idx*2]->offset(n));
}
}
if (this->param_propagate_down_[0] || propagate_down[idx]) {
for (int n = 0; n < this->num_; ++n) {
const Dtype* alpha = top_data + top[idx*2]->offset(n);
const Dtype* alpha_diff = top_diff + top[idx*2]->offset(n);
// Precompute modified output gradient
Dtype* mod_alpha_diff = mod_alpha_diff_buffer_.mutable_gpu_data();
kernel_mod_gradient<Dtype><<<CAFFE_GET_BLOCKS(k),
CAFFE_CUDA_NUM_THREADS>>>(k, alpha, alpha_diff, mod_alpha_diff);
// dl/dx is necessary for both gradients
Dtype* dl_dx = propagate_down[idx] ?
bottom_diff + bottom[idx]->offset(n) : tmp_dl_dx;
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[idx] || this->param_propagate_down_[0]) {
this->backward_gpu_gemm(mod_alpha_diff, Ddagger, dl_dx);
}
// gradient w.r.t. dictionary. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->dict_gpu_backprop(bottom_data + bottom[idx]->offset(n),
dl_dx, mod_alpha_diff, Ddagger, Vt, Vt_sn2, tmp1, tmp2, tmp3,
D_diff);
this->dict_gpu_optimize(bottom_data + bottom[idx]->offset(n), alpha,
D, Dlr, (Dtype)etha_rec_, (Dtype)etha_lr_, tmp1, tmp2, D_diff);
// Mark dictionary as unnormalized
is_dict_normalized_ = false;
Dtype* Dflag = this->blobs_[bias_idx_ + 1]->mutable_cpu_data();
*Dflag = (Dtype)1.;
}
// gradient of reconstruction error w.r.t bottom data, if necessary
if (propagate_down[idx]) {
this->backward_gpu_optimize(alpha, D, Dlr,
bottom_data + bottom[idx]->offset(n),
(Dtype)(etha_rec_bp_), (Dtype)(etha_lr_bp_), dl_dx);
}
}
}
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::dict_gpu_backprop(const Dtype* x, const Dtype* dl_dx,
const Dtype* mod_alpha_diff, const Dtype* Dtdagger, const Dtype* Vt,
const Dtype* Vt_sn2, Dtype* tmp1, Dtype* tmp2, Dtype* tmp3, Dtype* D_diff) {
int m = kernel_dim_;
int k = num_output_;
int r = rank_;
// Compute intermediate products
// tmp1 = x^T * (D^dagger)^T
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, k, m, (Dtype)1., x,
Dtdagger, (Dtype)0., tmp1);
// tmp2 = dl_dalpha * V
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 1, r, k, (Dtype)1.,
mod_alpha_diff, Vt, (Dtype)0., tmp2);
// tmp3 = tmp2 * Vt_sn2
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, k, r, (Dtype)1.,
tmp2, Vt_sn2, (Dtype)0., tmp3);
// Compute gradient of dictionary and add it to D_diff
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, k, 1, -(Dtype)2., dl_dx,
tmp1, (Dtype)1., D_diff);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, k, 1, (Dtype)1., x,
tmp3, (Dtype)1., D_diff);
// Result to be D_diff = D_diff - 2 * (D^dagger)^T * x^T * mod_alpha_diff^t * (D^dagger)^T
// + x * mod_alpha_diff * V * Vt_sn2
}
// Gradient that decreases reconstruction loss on dictionary
template <typename Dtype>
void DictionaryLayer<Dtype>::dict_gpu_optimize(const Dtype* x,
const Dtype* alpha, const Dtype* D, const Dtype* Dlr, Dtype etha_rec,
Dtype etha_lr, Dtype* tmp1, Dtype* tmp2, Dtype* D_diff) {
int m = kernel_dim_;
int k = num_output_;
if (etha_rec != (Dtype)0.) {
// Do dl/dD += etha_rec*(D*alpha-x)*alpha^T
caffe_copy(m, x, tmp1);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, 1, k,
(Dtype)1., D, alpha, -(Dtype)1., tmp1);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, m, k, 1,
etha_rec, tmp1, alpha, (Dtype)1., D_diff);
}
// if (etha_lr != (Dtype)0.) {
// // Do dl/dD += etha_lr*(Dlr-D)
// caffe_gpu_axpy(m*k, etha_lr, Dlr, D_diff);
// caffe_gpu_axpy(m*k, -etha_lr, D, D_diff);
// }
}
// Compute backpropagated reconstruction loss and add it to dl_dx
template <typename Dtype>
void DictionaryLayer<Dtype>::backward_gpu_optimize(const Dtype* alpha,
const Dtype* D, const Dtype* Dlr, const Dtype* x, Dtype etha_rec,
Dtype etha_lr, Dtype* dl_dx) {
int m = kernel_dim_;
int k = num_output_;
if (etha_rec != (Dtype)0.) {
// Do dl/dx += etha_rec*(x-D*alpha)
caffe_gpu_axpy(m, etha_rec, x, dl_dx);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, 1, k, -etha_rec, D, alpha,
(Dtype)1., dl_dx);
}
if (etha_lr != (Dtype)0.) {
// Do dl/dx += etha_lr*(D_lr)*alpha
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, m, 1, k, etha_lr, Dlr, alpha,
(Dtype)1., dl_dx);
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::backward_gpu_gemm(const Dtype* mod_alpha_diff,
const Dtype* D, Dtype* input) {
Dtype* col_buff = col_buffer_.mutable_gpu_data();
if (is_1x1_) {
col_buff = input;
}
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, kernel_dim_,
1, num_output_,
(Dtype)1., D, mod_alpha_diff,
(Dtype)0., col_buff);
if (!is_1x1_) {
conv_col2im_gpu(col_buff, input);
}
}
template <typename Dtype>
void DictionaryLayer<Dtype>::backward_gpu_bias(Dtype* bias,
const Dtype* input) {
int k = num_output_;
caffe_gpu_add(k, input, bias, bias);
}
INSTANTIATE_LAYER_GPU_FUNCS(DictionaryLayer);
} // namespace caffe
|
9c20c2733097ff2430cbbeb68e58db30aff0eab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
/* ************************** */
__device__ __forceinline__ float3
float3_mul_elements(const float3 & a,const float3 & b)
{
return make_float3(a.x * b.x,a.y * b.y,a.z * b.z);
}
template <class StoreCondition,class StoreAction,class SearchCondition>
struct RayCaster
{
enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 };
Mat33 Rcurr;
float3 tcurr;
float time_step;
float3 volume_size;
int3 voxels_size;
int3 voxels_volume_padding;
float3 cell_size;
float3 cell_size_inv;
int cols, rows;
mutable SearchCondition search_condition;
mutable StoreCondition store_condition;
mutable StoreAction store_action;
PtrStep<short2> volume;
float min_range;
Intr intr;
mutable PtrStep<float> vmap;
__device__ __forceinline__ float3
get_ray_next (int x, int y) const
{
float3 ray_next;
ray_next.x = (x - intr.cx) / intr.fx;
ray_next.y = (y - intr.cy) / intr.fy;
ray_next.z = 1;
return ray_next;
}
__device__ __forceinline__ bool
checkInds (const int3& g) const
{
return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < voxels_size.x && g.y < voxels_size.y && g.z < voxels_size.z);
}
__device__ __forceinline__ bool
checkSafeInds (const int3& g) const
{
return (g.x >= voxels_volume_padding.x && g.y >= voxels_volume_padding.y && g.z >= voxels_volume_padding.z &&
g.x < voxels_size.x - voxels_volume_padding.x &&
g.y < voxels_size.y - voxels_volume_padding.y &&
g.z < voxels_size.z - voxels_volume_padding.z);
}
__device__ __forceinline__ void
shift_coords (int & x, int & y, int & z, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
x += buffer.origin_GRID.x;
y += buffer.origin_GRID.y;
z += buffer.origin_GRID.z;
if (x >= buffer.voxels_size.x)
x -= buffer.voxels_size.x;
if (y >= buffer.voxels_size.y)
y -= buffer.voxels_size.y;
if (z >= buffer.voxels_size.z)
z -= buffer.voxels_size.z;
}
__device__ __forceinline__ float
readTsdf (int x, int y, int z, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
shift_coords(x,y,z,buffer);
const short2* pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
return unpack_tsdf (*pos);
}
__device__ __forceinline__ void
readTsdf (int x, int y, int z, const pcl::gpu::kinfuLS::tsdf_buffer & buffer,float& tsdf, int& weight) const
{
shift_coords(x,y,z,buffer);
const short2* pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
unpack_tsdf (*pos,tsdf,weight);
}
__device__ __forceinline__ float3
fromMetersToCells (const float3 & point) const
{
return float3_mul_elements(point,cell_size_inv);
}
__device__ __forceinline__ float3
fromCellsToMeters (const float3 & point) const
{
return float3_mul_elements(point,cell_size);
}
__device__ __forceinline__ int3
getVoxelFromPoint (float3 point) const
{
return getVoxelFromCell(fromMetersToCells(point));
}
__device__ __forceinline__ int3
getVoxelFromCell (const float3 & cell) const
{
int vx = __float2int_rd (cell.x); // round to negative infinity
int vy = __float2int_rd (cell.y);
int vz = __float2int_rd (cell.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
const float3 cell = fromMetersToCells (point);
return interpolateTrilinearyFromCell (cell,buffer);
}
__device__ __forceinline__ float
interpolateTrilinearyFromCell (const float3& cell, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
int3 g = getVoxelFromCell (cell);
if (g.x <= 0 || g.x >= buffer.voxels_size.x - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.y <= 0 || g.y >= buffer.voxels_size.y - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.z <= 0 || g.z >= buffer.voxels_size.z - 1)
return numeric_limits<float>::quiet_NaN ();
/* //OLD CODE
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * a * b * c;
*/
//NEW CODE
float a = cell.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; };
float b = cell.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; };
float c = cell.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; };
float res = (1 - a) * (
(1 - b) * (
readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * c
)
+ b * (
readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * c
)
)
+ a * (
(1 - b) * (
readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * c
)
+ b * (
readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * c
)
)
;
return res;
}
__device__ void find_min_max_time(float3 ray_org, float3 ray_dir, float3 box_max, float &tnear, float &tfar) const
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = float3_mul_elements(invR,box_min - ray_org);
float3 ttop = float3_mul_elements(invR,box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
__device__ __forceinline__ void
operator () (pcl::gpu::kinfuLS::tsdf_buffer buffer) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= cols || y >= rows)
return;
store_action.Init(*this,x,y);
const float3 ray_start = tcurr;
float3 norm_ray_next = normalized (get_ray_next (x, y));
float3 ray_dir = normalized (Rcurr * get_ray_next (x, y));
//ensure that it isn't a degenerate case
ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x;
ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y;
ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z;
// computer time when entry and exit volume
float time_start_volume;
float time_exit_volume;
find_min_max_time(ray_start,ray_dir,volume_size,time_start_volume,time_exit_volume);
const float min_dist = 0.f; //in meters
time_start_volume = fmax (time_start_volume, min_dist);
if (time_start_volume >= time_exit_volume)
return;
time_exit_volume -= time_step;
float time_curr = time_start_volume;
int3 g = getVoxelFromPoint (ray_start + ray_dir * time_curr);
g.x = max (0, min (g.x, buffer.voxels_size.x - 1));
g.y = max (0, min (g.y, buffer.voxels_size.y - 1));
g.z = max (0, min (g.z, buffer.voxels_size.z - 1));
float tsdf;
int weight;
readTsdf (g.x, g.y, g.z, buffer, tsdf, weight);
//infinite loop guard
const float max_time = fmin(time_exit_volume,3.0 * (volume_size.x + volume_size.y + volume_size.z));
float curr_time_step = time_step;
bool zero_crossing = false;
for (; time_curr < max_time; time_curr += curr_time_step)
{
float tsdf_prev = tsdf;
int weight_prev = weight;
const float3 world_pt = ray_start + ray_dir * (time_curr + curr_time_step);
if (!search_condition.Evaluate(world_pt))
continue;
g = getVoxelFromPoint (world_pt);
if (!checkInds (g))
return;
if (!checkSafeInds(g))
continue;
readTsdf (g.x, g.y, g.z, buffer, tsdf, weight);
{
float new_time_step;
if (store_condition.ChangeTimeStep(tsdf_prev,tsdf,weight_prev,weight,curr_time_step,new_time_step))
{
if (new_time_step > min(cell_size.x,min(cell_size.y,cell_size.z)))
{
tsdf = tsdf_prev;
weight = weight_prev;
time_curr -= curr_time_step;
curr_time_step = new_time_step;
continue;
}
}
}
if (tsdf_prev < 0.f && tsdf > 0.f)
return;
zero_crossing = store_condition.Evaluate(tsdf_prev,tsdf,weight_prev,weight);
if (zero_crossing && (time_curr * norm_ray_next.z) < min_range)
return;
if (zero_crossing)
break; // break out of the cycle here, so Stores will be executed in sync by all threads
}
if (zero_crossing)
{
const float3 world_pt_prev = ray_start + ray_dir * (time_curr);
const float3 world_pt = ray_start + ray_dir * (time_curr + curr_time_step);
store_action.Store(world_pt_prev,world_pt,g,tsdf,weight,time_curr,time_step,ray_start,ray_dir,
*this,x,y,buffer);
}
}
};
struct SphereSearchCondition
{
SphereSearchCondition () {}
SphereSearchCondition (const float3 & c,const float & r): sphere_center(c), sphere_radius(r) {}
__device__ __forceinline__ bool Evaluate (const float3 & pt) const
{
const float xx = (sphere_center.x - pt.x);
const float yy = (sphere_center.y - pt.y);
const float zz = (sphere_center.z - pt.z);
return xx * xx + yy * yy + zz * zz < sphere_radius * sphere_radius;
}
float3 sphere_center;
float sphere_radius;
};
struct BBoxSearchCondition
{
BBoxSearchCondition () {}
BBoxSearchCondition (const float3 & m,const float3 & M): bbox_min(m), bbox_max(M) {}
__device__ __forceinline__ bool Evaluate (const float3 & pt) const
{
return (pt.x >= bbox_min.x && pt.y >= bbox_min.y && pt.z >= bbox_min.z &&
pt.x < bbox_max.x && pt.y < bbox_max.y && pt.z < bbox_max.z);
}
float3 bbox_min;
float3 bbox_max;
};
struct TrueSearchCondition
{
__device__ __forceinline__ bool Evaluate (const float3 & /*pt*/) const {return true; }
};
struct ZeroCrossingStoreCondition
{
__device__ __forceinline__ bool Evaluate(float tsdf_prev,float tsdf_curr,int /*weight_prev*/,int /*weight_curr*/)
{
return tsdf_prev > 0.0f && tsdf_curr < 0.0f;
}
__device__ __forceinline__ bool ChangeTimeStep(float /*tsdf_prev*/,float /*tsdf*/,int /*weight_prev*/,int /*weight_curr*/,
float /*time_step*/,float & /*new_time_step*/)
{
return false;
}
};
struct NotEmptyStoreCondition
{
__device__ __forceinline__ bool Evaluate(float /*tsdf_prev*/,float tsdf_curr,int /*weight_prev*/,int weight_curr)
{
return tsdf_curr < 0.0f || weight_curr == 0;
}
__device__ __forceinline__ bool ChangeTimeStep(float /*tsdf_prev*/,float /*tsdf*/,int weight_prev,int weight_curr,
float time_step,float & new_time_step)
{
if (weight_curr == 0 && weight_prev != 0)
{
new_time_step = time_step / 2.0;
return true;
}
return false;
}
};
struct InterpolatePointAndNormalStoreAction
{
template <class _RayCaster>
__device__ __forceinline__ void Init(_RayCaster & parent,int x,int y)
{
parent.vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
}
template <class _RayCaster>
__device__ __forceinline__ void Store(const float3 & world_pt_prev,const float3 & world_pt,
const int3 & /*voxel_id*/,float /*tsdf*/,float weight,
float time_curr,float time_step,const float3 & ray_start,const float3 & ray_dir,
const _RayCaster & parent,int x,int y,pcl::gpu::kinfuLS::tsdf_buffer & buffer)
{
if (weight == 0)
return;
float step_correction = 0.5;
float Ftdt = parent.interpolateTrilineary (world_pt, buffer);
if (isnan (Ftdt))
return;
float Ft = parent.interpolateTrilineary (world_pt_prev, buffer);
if (isnan (Ft))
return;
if (abs(Ftdt - Ft) > 0.1)
step_correction = __fdividef(Ft,Ftdt - Ft);
float Ts = time_curr - time_step * step_correction;
float3 vetex_found = ray_start + ray_dir * Ts;
parent.vmap.ptr (y )[x] = vetex_found.x;
parent.vmap.ptr (y + parent.rows)[x] = vetex_found.y;
parent.vmap.ptr (y + 2 * parent.rows)[x] = vetex_found.z;
float3 t;
float3 n;
t = vetex_found;
t.x += parent.cell_size.x;
float Fx1 = parent.interpolateTrilineary (t, buffer);
t = vetex_found;
t.x -= parent.cell_size.x;
float Fx2 = parent.interpolateTrilineary (t, buffer);
n.x = (Fx1 - Fx2);
t = vetex_found;
t.y += parent.cell_size.y;
float Fy1 = parent.interpolateTrilineary (t, buffer);
t = vetex_found;
t.y -= parent.cell_size.y;
float Fy2 = parent.interpolateTrilineary (t, buffer);
n.y = (Fy1 - Fy2);
t = vetex_found;
t.z += parent.cell_size.z;
float Fz1 = parent.interpolateTrilineary (t, buffer);
t = vetex_found;
t.z -= parent.cell_size.z;
float Fz2 = parent.interpolateTrilineary (t, buffer);
n.z = (Fz1 - Fz2);
n = normalized (n);
nmap.ptr (y )[x] = n.x;
nmap.ptr (y + parent.rows)[x] = n.y;
nmap.ptr (y + 2 * parent.rows)[x] = n.z;
}
PtrStep<float> nmap;
};
enum // bitmask for STORE_POSE
{
STORE_POSE_NONE = 0,
STORE_POSE_WORLD = 1,
STORE_POSE_VOXEL = 2,
};
template <int STORE_POSE>
struct SignedSensorDistanceStoreAction
{
template <class _RayCaster>
__device__ __forceinline__ void Init(_RayCaster & parent,int x,int y)
{
if (STORE_POSE & STORE_POSE_WORLD)
parent.vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
if (STORE_POSE & STORE_POSE_VOXEL)
voxel_map.ptr (y + parent.rows)[x] = -1;
umap.ptr (y)[x] = 0.0; // empty
}
template <class _RayCaster>
__device__ __forceinline__ void Store(const float3 & /*world_pt_prev*/,const float3 & world_pt,
const int3 & voxel_id,float /*tsdf*/,float weight,
float time_curr,float /*time_step*/,const float3 & /*ray_start*/,const float3 & /*ray_dir*/,
const _RayCaster & parent,int x,int y,pcl::gpu::kinfuLS::tsdf_buffer & buffer)
{
if (filter.has_bbox)
{
if (world_pt.x < filter.bbox_min.x || world_pt.y < filter.bbox_min.y || world_pt.z < filter.bbox_min.z ||
world_pt.x >= filter.bbox_max.x || world_pt.y >= filter.bbox_max.y || world_pt.z >= filter.bbox_max.z)
return;
}
if (filter.has_sphere)
{
float3 diff = world_pt - filter.sphere_center;
float sqnorm = diff.x * diff.x + diff.y * diff.y + diff.z * diff.z;
if (sqnorm > filter.sphere_radius * filter.sphere_radius)
return;
}
if (STORE_POSE & STORE_POSE_WORLD)
{
parent.vmap.ptr (y )[x] = world_pt.x;
parent.vmap.ptr (y + parent.rows)[x] = world_pt.y;
parent.vmap.ptr (y + 2 * parent.rows)[x] = world_pt.z;
}
if (STORE_POSE & STORE_POSE_VOXEL)
{
voxel_map.ptr (y )[x] = voxel_id.x;
voxel_map.ptr (y + parent.rows)[x] = voxel_id.y;
voxel_map.ptr (y + 2 * parent.rows)[x] = voxel_id.z;
}
if (weight == 0)
umap.ptr(y)[x] = -time_curr; // unknown
else
umap.ptr(y)[x] = time_curr; // occupied
}
PtrStep<float> umap; // intensity values
PtrStep<int> voxel_map;
RaycastFilter filter;
};
typedef SignedSensorDistanceStoreAction<STORE_POSE_NONE>
SignedSensorDistanceNoPoseStoreAction;
typedef SignedSensorDistanceStoreAction<STORE_POSE_WORLD>
SignedSensorDistanceWithPoseStoreAction;
typedef SignedSensorDistanceStoreAction<STORE_POSE_VOXEL>
SignedSensorDistanceWithVoxelIndexStoreAction;
template <class StoreCondition,class StoreAction,class SearchCondition>
__global__ void
rayCastKernel (const RayCaster<StoreCondition,StoreAction,SearchCondition> rc, pcl::gpu::kinfuLS::tsdf_buffer buffer) {
rc (buffer);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <class StoreCondition,class StoreAction,class SearchCondition>
void
templatedRaycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,MapArr& vmap,
const StoreCondition & store_condition,const StoreAction & store_action,
const SearchCondition & search_condition)
{
RayCaster<StoreCondition,StoreAction,SearchCondition> rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.8f;
rc.volume_size = volume_size;
rc.voxels_size = buffer->voxels_size;
rc.voxels_volume_padding = buffer->voxels_volume_padding;
rc.cell_size.x = volume_size.x / buffer->voxels_size.x;
rc.cell_size.y = volume_size.y / buffer->voxels_size.y;
rc.cell_size.z = volume_size.z / buffer->voxels_size.z;
rc.cell_size_inv.x = 1.0 / rc.cell_size.x;
rc.cell_size_inv.y = 1.0 / rc.cell_size.y;
rc.cell_size_inv.z = 1.0 / rc.cell_size.z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.min_range = min_range;
rc.volume = volume;
rc.vmap = vmap;
rc.search_condition = search_condition;
rc.store_action = store_action;
rc.store_condition = store_condition;
dim3 block (RayCaster<StoreCondition,StoreAction,SearchCondition>::CTA_SIZE_X,
RayCaster<StoreCondition,StoreAction,SearchCondition>::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
hipLaunchKernelGGL(( rayCastKernel<StoreCondition,StoreAction,SearchCondition>), dim3(grid), dim3(block), 0, 0, rc, *buffer);
cudaSafeCall (hipGetLastError ());
cudaSafeCall(hipDeviceSynchronize());
}
void
raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, MapArr& vmap, MapArr& nmap)
{
InterpolatePointAndNormalStoreAction ipan;
ipan.nmap = nmap;
templatedRaycast<ZeroCrossingStoreCondition,InterpolatePointAndNormalStoreAction,TrueSearchCondition>
(intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
ZeroCrossingStoreCondition(),ipan,TrueSearchCondition());
}
void
unkRaycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter, MapArr& vmap, MapArr& umap)
{
SignedSensorDistanceWithPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithPoseStoreAction,TrueSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,TrueSearchCondition());
}
void
unkRaycastBBox (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter, MapArr& vmap, MapArr& umap,
const float3 & bbox_min,const float3 & bbox_max)
{
SignedSensorDistanceWithPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithPoseStoreAction,BBoxSearchCondition>
(intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,BBoxSearchCondition(bbox_min,bbox_max));
}
void
unkRaycastVoxelIndex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap, PtrStep<int> voxel_ids)
{
SignedSensorDistanceWithVoxelIndexStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
nesc.voxel_map = voxel_ids;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithVoxelIndexStoreAction,TrueSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,TrueSearchCondition());
}
void
unkRaycastBBoxVoxelIndex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap, PtrStep<int> voxel_ids,
const float3 & bbox_min, const float3 & bbox_max)
{
SignedSensorDistanceWithVoxelIndexStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
nesc.voxel_map = voxel_ids;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithVoxelIndexStoreAction,BBoxSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,BBoxSearchCondition(bbox_min,bbox_max));
}
void
unkRaycastNoVertex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap)
{
SignedSensorDistanceNoPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceNoPoseStoreAction,TrueSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,TrueSearchCondition());
}
void
unkRaycastBBoxNoVertex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap,
const float3 & bbox_min, const float3 & bbox_max)
{
SignedSensorDistanceNoPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceNoPoseStoreAction,BBoxSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,BBoxSearchCondition(bbox_min,bbox_max));
}
}
}
}
| 9c20c2733097ff2430cbbeb68e58db30aff0eab2.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
/* ************************** */
__device__ __forceinline__ float3
float3_mul_elements(const float3 & a,const float3 & b)
{
return make_float3(a.x * b.x,a.y * b.y,a.z * b.z);
}
template <class StoreCondition,class StoreAction,class SearchCondition>
struct RayCaster
{
enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 };
Mat33 Rcurr;
float3 tcurr;
float time_step;
float3 volume_size;
int3 voxels_size;
int3 voxels_volume_padding;
float3 cell_size;
float3 cell_size_inv;
int cols, rows;
mutable SearchCondition search_condition;
mutable StoreCondition store_condition;
mutable StoreAction store_action;
PtrStep<short2> volume;
float min_range;
Intr intr;
mutable PtrStep<float> vmap;
__device__ __forceinline__ float3
get_ray_next (int x, int y) const
{
float3 ray_next;
ray_next.x = (x - intr.cx) / intr.fx;
ray_next.y = (y - intr.cy) / intr.fy;
ray_next.z = 1;
return ray_next;
}
__device__ __forceinline__ bool
checkInds (const int3& g) const
{
return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < voxels_size.x && g.y < voxels_size.y && g.z < voxels_size.z);
}
__device__ __forceinline__ bool
checkSafeInds (const int3& g) const
{
return (g.x >= voxels_volume_padding.x && g.y >= voxels_volume_padding.y && g.z >= voxels_volume_padding.z &&
g.x < voxels_size.x - voxels_volume_padding.x &&
g.y < voxels_size.y - voxels_volume_padding.y &&
g.z < voxels_size.z - voxels_volume_padding.z);
}
__device__ __forceinline__ void
shift_coords (int & x, int & y, int & z, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
x += buffer.origin_GRID.x;
y += buffer.origin_GRID.y;
z += buffer.origin_GRID.z;
if (x >= buffer.voxels_size.x)
x -= buffer.voxels_size.x;
if (y >= buffer.voxels_size.y)
y -= buffer.voxels_size.y;
if (z >= buffer.voxels_size.z)
z -= buffer.voxels_size.z;
}
__device__ __forceinline__ float
readTsdf (int x, int y, int z, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
shift_coords(x,y,z,buffer);
const short2* pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
return unpack_tsdf (*pos);
}
__device__ __forceinline__ void
readTsdf (int x, int y, int z, const pcl::gpu::kinfuLS::tsdf_buffer & buffer,float& tsdf, int& weight) const
{
shift_coords(x,y,z,buffer);
const short2* pos = &(volume.ptr (buffer.voxels_size.y * z + y)[x]);
unpack_tsdf (*pos,tsdf,weight);
}
__device__ __forceinline__ float3
fromMetersToCells (const float3 & point) const
{
return float3_mul_elements(point,cell_size_inv);
}
__device__ __forceinline__ float3
fromCellsToMeters (const float3 & point) const
{
return float3_mul_elements(point,cell_size);
}
__device__ __forceinline__ int3
getVoxelFromPoint (float3 point) const
{
return getVoxelFromCell(fromMetersToCells(point));
}
__device__ __forceinline__ int3
getVoxelFromCell (const float3 & cell) const
{
int vx = __float2int_rd (cell.x); // round to negative infinity
int vy = __float2int_rd (cell.y);
int vz = __float2int_rd (cell.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
const float3 cell = fromMetersToCells (point);
return interpolateTrilinearyFromCell (cell,buffer);
}
__device__ __forceinline__ float
interpolateTrilinearyFromCell (const float3& cell, const pcl::gpu::kinfuLS::tsdf_buffer & buffer) const
{
int3 g = getVoxelFromCell (cell);
if (g.x <= 0 || g.x >= buffer.voxels_size.x - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.y <= 0 || g.y >= buffer.voxels_size.y - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.z <= 0 || g.z >= buffer.voxels_size.z - 1)
return numeric_limits<float>::quiet_NaN ();
/* //OLD CODE
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * a * b * c;
*/
//NEW CODE
float a = cell.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; };
float b = cell.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; };
float c = cell.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; };
float res = (1 - a) * (
(1 - b) * (
readTsdf (g.x + 0, g.y + 0, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1, buffer) * c
)
+ b * (
readTsdf (g.x + 0, g.y + 1, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1, buffer) * c
)
)
+ a * (
(1 - b) * (
readTsdf (g.x + 1, g.y + 0, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1, buffer) * c
)
+ b * (
readTsdf (g.x + 1, g.y + 1, g.z + 0, buffer) * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1, buffer) * c
)
)
;
return res;
}
__device__ void find_min_max_time(float3 ray_org, float3 ray_dir, float3 box_max, float &tnear, float &tfar) const
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = float3_mul_elements(invR,box_min - ray_org);
float3 ttop = float3_mul_elements(invR,box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
__device__ __forceinline__ void
operator () (pcl::gpu::kinfuLS::tsdf_buffer buffer) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= cols || y >= rows)
return;
store_action.Init(*this,x,y);
const float3 ray_start = tcurr;
float3 norm_ray_next = normalized (get_ray_next (x, y));
float3 ray_dir = normalized (Rcurr * get_ray_next (x, y));
//ensure that it isn't a degenerate case
ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x;
ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y;
ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z;
// computer time when entry and exit volume
float time_start_volume;
float time_exit_volume;
find_min_max_time(ray_start,ray_dir,volume_size,time_start_volume,time_exit_volume);
const float min_dist = 0.f; //in meters
time_start_volume = fmax (time_start_volume, min_dist);
if (time_start_volume >= time_exit_volume)
return;
time_exit_volume -= time_step;
float time_curr = time_start_volume;
int3 g = getVoxelFromPoint (ray_start + ray_dir * time_curr);
g.x = max (0, min (g.x, buffer.voxels_size.x - 1));
g.y = max (0, min (g.y, buffer.voxels_size.y - 1));
g.z = max (0, min (g.z, buffer.voxels_size.z - 1));
float tsdf;
int weight;
readTsdf (g.x, g.y, g.z, buffer, tsdf, weight);
//infinite loop guard
const float max_time = fmin(time_exit_volume,3.0 * (volume_size.x + volume_size.y + volume_size.z));
float curr_time_step = time_step;
bool zero_crossing = false;
for (; time_curr < max_time; time_curr += curr_time_step)
{
float tsdf_prev = tsdf;
int weight_prev = weight;
const float3 world_pt = ray_start + ray_dir * (time_curr + curr_time_step);
if (!search_condition.Evaluate(world_pt))
continue;
g = getVoxelFromPoint (world_pt);
if (!checkInds (g))
return;
if (!checkSafeInds(g))
continue;
readTsdf (g.x, g.y, g.z, buffer, tsdf, weight);
{
float new_time_step;
if (store_condition.ChangeTimeStep(tsdf_prev,tsdf,weight_prev,weight,curr_time_step,new_time_step))
{
if (new_time_step > min(cell_size.x,min(cell_size.y,cell_size.z)))
{
tsdf = tsdf_prev;
weight = weight_prev;
time_curr -= curr_time_step;
curr_time_step = new_time_step;
continue;
}
}
}
if (tsdf_prev < 0.f && tsdf > 0.f)
return;
zero_crossing = store_condition.Evaluate(tsdf_prev,tsdf,weight_prev,weight);
if (zero_crossing && (time_curr * norm_ray_next.z) < min_range)
return;
if (zero_crossing)
break; // break out of the cycle here, so Stores will be executed in sync by all threads
}
if (zero_crossing)
{
const float3 world_pt_prev = ray_start + ray_dir * (time_curr);
const float3 world_pt = ray_start + ray_dir * (time_curr + curr_time_step);
store_action.Store(world_pt_prev,world_pt,g,tsdf,weight,time_curr,time_step,ray_start,ray_dir,
*this,x,y,buffer);
}
}
};
struct SphereSearchCondition
{
SphereSearchCondition () {}
SphereSearchCondition (const float3 & c,const float & r): sphere_center(c), sphere_radius(r) {}
__device__ __forceinline__ bool Evaluate (const float3 & pt) const
{
const float xx = (sphere_center.x - pt.x);
const float yy = (sphere_center.y - pt.y);
const float zz = (sphere_center.z - pt.z);
return xx * xx + yy * yy + zz * zz < sphere_radius * sphere_radius;
}
float3 sphere_center;
float sphere_radius;
};
struct BBoxSearchCondition
{
BBoxSearchCondition () {}
BBoxSearchCondition (const float3 & m,const float3 & M): bbox_min(m), bbox_max(M) {}
__device__ __forceinline__ bool Evaluate (const float3 & pt) const
{
return (pt.x >= bbox_min.x && pt.y >= bbox_min.y && pt.z >= bbox_min.z &&
pt.x < bbox_max.x && pt.y < bbox_max.y && pt.z < bbox_max.z);
}
float3 bbox_min;
float3 bbox_max;
};
struct TrueSearchCondition
{
__device__ __forceinline__ bool Evaluate (const float3 & /*pt*/) const {return true; }
};
struct ZeroCrossingStoreCondition
{
__device__ __forceinline__ bool Evaluate(float tsdf_prev,float tsdf_curr,int /*weight_prev*/,int /*weight_curr*/)
{
return tsdf_prev > 0.0f && tsdf_curr < 0.0f;
}
__device__ __forceinline__ bool ChangeTimeStep(float /*tsdf_prev*/,float /*tsdf*/,int /*weight_prev*/,int /*weight_curr*/,
float /*time_step*/,float & /*new_time_step*/)
{
return false;
}
};
struct NotEmptyStoreCondition
{
__device__ __forceinline__ bool Evaluate(float /*tsdf_prev*/,float tsdf_curr,int /*weight_prev*/,int weight_curr)
{
return tsdf_curr < 0.0f || weight_curr == 0;
}
__device__ __forceinline__ bool ChangeTimeStep(float /*tsdf_prev*/,float /*tsdf*/,int weight_prev,int weight_curr,
float time_step,float & new_time_step)
{
if (weight_curr == 0 && weight_prev != 0)
{
new_time_step = time_step / 2.0;
return true;
}
return false;
}
};
struct InterpolatePointAndNormalStoreAction
{
template <class _RayCaster>
__device__ __forceinline__ void Init(_RayCaster & parent,int x,int y)
{
parent.vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
}
template <class _RayCaster>
__device__ __forceinline__ void Store(const float3 & world_pt_prev,const float3 & world_pt,
const int3 & /*voxel_id*/,float /*tsdf*/,float weight,
float time_curr,float time_step,const float3 & ray_start,const float3 & ray_dir,
const _RayCaster & parent,int x,int y,pcl::gpu::kinfuLS::tsdf_buffer & buffer)
{
if (weight == 0)
return;
float step_correction = 0.5;
float Ftdt = parent.interpolateTrilineary (world_pt, buffer);
if (isnan (Ftdt))
return;
float Ft = parent.interpolateTrilineary (world_pt_prev, buffer);
if (isnan (Ft))
return;
if (abs(Ftdt - Ft) > 0.1)
step_correction = __fdividef(Ft,Ftdt - Ft);
float Ts = time_curr - time_step * step_correction;
float3 vetex_found = ray_start + ray_dir * Ts;
parent.vmap.ptr (y )[x] = vetex_found.x;
parent.vmap.ptr (y + parent.rows)[x] = vetex_found.y;
parent.vmap.ptr (y + 2 * parent.rows)[x] = vetex_found.z;
float3 t;
float3 n;
t = vetex_found;
t.x += parent.cell_size.x;
float Fx1 = parent.interpolateTrilineary (t, buffer);
t = vetex_found;
t.x -= parent.cell_size.x;
float Fx2 = parent.interpolateTrilineary (t, buffer);
n.x = (Fx1 - Fx2);
t = vetex_found;
t.y += parent.cell_size.y;
float Fy1 = parent.interpolateTrilineary (t, buffer);
t = vetex_found;
t.y -= parent.cell_size.y;
float Fy2 = parent.interpolateTrilineary (t, buffer);
n.y = (Fy1 - Fy2);
t = vetex_found;
t.z += parent.cell_size.z;
float Fz1 = parent.interpolateTrilineary (t, buffer);
t = vetex_found;
t.z -= parent.cell_size.z;
float Fz2 = parent.interpolateTrilineary (t, buffer);
n.z = (Fz1 - Fz2);
n = normalized (n);
nmap.ptr (y )[x] = n.x;
nmap.ptr (y + parent.rows)[x] = n.y;
nmap.ptr (y + 2 * parent.rows)[x] = n.z;
}
PtrStep<float> nmap;
};
enum // bitmask for STORE_POSE
{
STORE_POSE_NONE = 0,
STORE_POSE_WORLD = 1,
STORE_POSE_VOXEL = 2,
};
template <int STORE_POSE>
struct SignedSensorDistanceStoreAction
{
template <class _RayCaster>
__device__ __forceinline__ void Init(_RayCaster & parent,int x,int y)
{
if (STORE_POSE & STORE_POSE_WORLD)
parent.vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
if (STORE_POSE & STORE_POSE_VOXEL)
voxel_map.ptr (y + parent.rows)[x] = -1;
umap.ptr (y)[x] = 0.0; // empty
}
template <class _RayCaster>
__device__ __forceinline__ void Store(const float3 & /*world_pt_prev*/,const float3 & world_pt,
const int3 & voxel_id,float /*tsdf*/,float weight,
float time_curr,float /*time_step*/,const float3 & /*ray_start*/,const float3 & /*ray_dir*/,
const _RayCaster & parent,int x,int y,pcl::gpu::kinfuLS::tsdf_buffer & buffer)
{
if (filter.has_bbox)
{
if (world_pt.x < filter.bbox_min.x || world_pt.y < filter.bbox_min.y || world_pt.z < filter.bbox_min.z ||
world_pt.x >= filter.bbox_max.x || world_pt.y >= filter.bbox_max.y || world_pt.z >= filter.bbox_max.z)
return;
}
if (filter.has_sphere)
{
float3 diff = world_pt - filter.sphere_center;
float sqnorm = diff.x * diff.x + diff.y * diff.y + diff.z * diff.z;
if (sqnorm > filter.sphere_radius * filter.sphere_radius)
return;
}
if (STORE_POSE & STORE_POSE_WORLD)
{
parent.vmap.ptr (y )[x] = world_pt.x;
parent.vmap.ptr (y + parent.rows)[x] = world_pt.y;
parent.vmap.ptr (y + 2 * parent.rows)[x] = world_pt.z;
}
if (STORE_POSE & STORE_POSE_VOXEL)
{
voxel_map.ptr (y )[x] = voxel_id.x;
voxel_map.ptr (y + parent.rows)[x] = voxel_id.y;
voxel_map.ptr (y + 2 * parent.rows)[x] = voxel_id.z;
}
if (weight == 0)
umap.ptr(y)[x] = -time_curr; // unknown
else
umap.ptr(y)[x] = time_curr; // occupied
}
PtrStep<float> umap; // intensity values
PtrStep<int> voxel_map;
RaycastFilter filter;
};
typedef SignedSensorDistanceStoreAction<STORE_POSE_NONE>
SignedSensorDistanceNoPoseStoreAction;
typedef SignedSensorDistanceStoreAction<STORE_POSE_WORLD>
SignedSensorDistanceWithPoseStoreAction;
typedef SignedSensorDistanceStoreAction<STORE_POSE_VOXEL>
SignedSensorDistanceWithVoxelIndexStoreAction;
template <class StoreCondition,class StoreAction,class SearchCondition>
__global__ void
rayCastKernel (const RayCaster<StoreCondition,StoreAction,SearchCondition> rc, pcl::gpu::kinfuLS::tsdf_buffer buffer) {
rc (buffer);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <class StoreCondition,class StoreAction,class SearchCondition>
void
templatedRaycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,MapArr& vmap,
const StoreCondition & store_condition,const StoreAction & store_action,
const SearchCondition & search_condition)
{
RayCaster<StoreCondition,StoreAction,SearchCondition> rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.8f;
rc.volume_size = volume_size;
rc.voxels_size = buffer->voxels_size;
rc.voxels_volume_padding = buffer->voxels_volume_padding;
rc.cell_size.x = volume_size.x / buffer->voxels_size.x;
rc.cell_size.y = volume_size.y / buffer->voxels_size.y;
rc.cell_size.z = volume_size.z / buffer->voxels_size.z;
rc.cell_size_inv.x = 1.0 / rc.cell_size.x;
rc.cell_size_inv.y = 1.0 / rc.cell_size.y;
rc.cell_size_inv.z = 1.0 / rc.cell_size.z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.min_range = min_range;
rc.volume = volume;
rc.vmap = vmap;
rc.search_condition = search_condition;
rc.store_action = store_action;
rc.store_condition = store_condition;
dim3 block (RayCaster<StoreCondition,StoreAction,SearchCondition>::CTA_SIZE_X,
RayCaster<StoreCondition,StoreAction,SearchCondition>::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
rayCastKernel<StoreCondition,StoreAction,SearchCondition><<<grid, block>>>(rc, *buffer);
cudaSafeCall (cudaGetLastError ());
cudaSafeCall(cudaDeviceSynchronize());
}
void
raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, MapArr& vmap, MapArr& nmap)
{
InterpolatePointAndNormalStoreAction ipan;
ipan.nmap = nmap;
templatedRaycast<ZeroCrossingStoreCondition,InterpolatePointAndNormalStoreAction,TrueSearchCondition>
(intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
ZeroCrossingStoreCondition(),ipan,TrueSearchCondition());
}
void
unkRaycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter, MapArr& vmap, MapArr& umap)
{
SignedSensorDistanceWithPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithPoseStoreAction,TrueSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,TrueSearchCondition());
}
void
unkRaycastBBox (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter, MapArr& vmap, MapArr& umap,
const float3 & bbox_min,const float3 & bbox_max)
{
SignedSensorDistanceWithPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithPoseStoreAction,BBoxSearchCondition>
(intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,BBoxSearchCondition(bbox_min,bbox_max));
}
void
unkRaycastVoxelIndex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap, PtrStep<int> voxel_ids)
{
SignedSensorDistanceWithVoxelIndexStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
nesc.voxel_map = voxel_ids;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithVoxelIndexStoreAction,TrueSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,TrueSearchCondition());
}
void
unkRaycastBBoxVoxelIndex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap, PtrStep<int> voxel_ids,
const float3 & bbox_min, const float3 & bbox_max)
{
SignedSensorDistanceWithVoxelIndexStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
nesc.voxel_map = voxel_ids;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceWithVoxelIndexStoreAction,BBoxSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,BBoxSearchCondition(bbox_min,bbox_max));
}
void
unkRaycastNoVertex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap)
{
SignedSensorDistanceNoPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceNoPoseStoreAction,TrueSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,TrueSearchCondition());
}
void
unkRaycastBBoxNoVertex (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, float min_range, const float3& volume_size,
const PtrStep<short2>& volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,
const RaycastFilter & filter,
MapArr& vmap, MapArr& umap,
const float3 & bbox_min, const float3 & bbox_max)
{
SignedSensorDistanceNoPoseStoreAction nesc;
nesc.umap = umap;
nesc.filter = filter;
templatedRaycast<NotEmptyStoreCondition,SignedSensorDistanceNoPoseStoreAction,BBoxSearchCondition>(
intr,Rcurr,tcurr,tranc_dist,min_range,volume_size,volume,buffer,vmap,
NotEmptyStoreCondition(),nesc,BBoxSearchCondition(bbox_min,bbox_max));
}
}
}
}
|
74d7d6cddb9ef9a2d9b20f7f179d7795e2a14735.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/adam_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
phi::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
if (!use_global_beta_pow) {
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
}
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
phi::funcs::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
phi::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
hipLaunchKernelGGL(( SparseAdamCUDAKernelREG<T, MPDType>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamFunctor<T, funcs::GPUAdam, MPDType> functor(
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adam_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
| 74d7d6cddb9ef9a2d9b20f7f179d7795e2a14735.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/selected_rows/adam_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
phi::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
if (!use_global_beta_pow) {
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
}
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
phi::funcs::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
phi::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
SparseAdamCUDAKernelREG<T, MPDType>
<<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamFunctor<T, funcs::GPUAdam, MPDType> functor(
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
UpdateBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adam_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(5).SetDataType(phi::DataType::FLOAT32);
}
kernel->OutputAt(3).SetBackend(phi::Backend::UNDEFINED);
kernel->OutputAt(4).SetBackend(phi::Backend::UNDEFINED);
}
|
2066702f94c8a8ed86186ba2c3eba64832bfd38c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstddef>
#include "limits.cuh"
// assume column major here
template <typename T> __host__ __device__
bool keyEqualCM(const T* key_columns, size_t idx1, size_t idx2, size_t num_key_rows, size_t num_key_columns)
{
for (size_t i=0; i < num_key_columns; ++i) {
if (key_columns[i*num_key_rows+idx1] != key_columns[i*num_key_rows+idx2])
return false;
}
return true;
}
// assume row major here
template <typename T> __host__ __device__
bool keyEqualRM(const T* key_columns, size_t idx1, size_t idx2, size_t num_key_rows, size_t num_key_columns)
{
for (size_t i=0; i < num_key_columns; ++i) {
if (key_columns[i+num_key_rows*idx1] != key_columns[i+num_key_rows*idx2])
return false;
}
return true;
}
// hashKey generating
template <typename T> __host__ __device__
size_t HashKey(size_t idx, T* key_columns, size_t num_key_rows, size_t num_key_columns) {
size_t hash_key = 0;
for (size_t i=0; i < num_key_columns; ++i) {
hash_key = (31 * hash_key) + key_columns[i*num_key_rows+idx];
}
return hash_key;
}
template <typename Tval> __device__
void updateEntry(Tval* value_columns,
size_t num_val_rows,
size_t num_ops,
size_t idx,
size_t hashPos,
Tval* hash_results,
int* countPtr,
size_t len_hash_table,
int count=1)
{
// update count
atomicAdd(countPtr, count);
// update each item
for (size_t i = 0; i < num_ops; ++i) {
Tval value = value_columns[i * num_val_rows + idx];
size_t val_idx = i * len_hash_table + hashPos;
switch(ops_c[i]) {
case rmin:
atomicMin(&(hash_results[val_idx]), value);
break;
case rmax:
atomicMax(&(hash_results[val_idx]), value);
break;
case rcount:
atomicAdd(&(hash_results[val_idx]), count);
break;
case rmean: // fall-thru
case rsum:
atomicAdd(&(hash_results[val_idx]), value);
break;
}
}
}
template <typename Tkey, typename Tval> __global__
void fillTable(Tkey* key_columns,
size_t num_key_rows,
size_t num_key_cols,
Tval* value_columns,
size_t num_val_rows,
size_t num_val_cols,
int* hash_key_idx,
int* hash_count,
Tval* hash_results,
size_t len_hash_table,
size_t num_ops,
int* overflow_flag
)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t offset = gridDim.x * blockDim.x;
for (size_t i = idx; i < num_key_rows; i += offset) {
// try inserting, assume there is enough space
size_t curPos = HashKey(i, key_columns, num_key_rows, num_key_cols) % len_hash_table;
unsigned int collisionCount = 0;
bool isInserted = false;
while (!isInserted) {
// first read the value out
int old = hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(hash_key_idx[curPos]), -1, i);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, i, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_hash_table; // linear probing
if (++collisionCount >= len_hash_table * 0.75)
break; // break the loop if it looped over the hash table and still failed
continue;
}
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(value_columns, num_val_rows, num_ops, i, curPos, hash_results, &(hash_count[curPos]), len_hash_table);
}
if (!isInserted) {
// Do sth in the case of overflowing hash table
overflow_flag[0] = 1;
//printf("Overflow happened at %d \n", len_hash_table);
}
}
}
template <typename Tkey, typename Tval> __global__
void fillTable_privatization(Tkey* key_columns,
size_t num_key_rows,
size_t num_key_cols,
Tval* value_columns,
size_t num_val_rows,
size_t num_val_cols,
int* hash_key_idx,
int* hash_count,
Tval* hash_results,
size_t len_hash_table,
size_t len_shared_hash_table,
size_t num_ops)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t offset = gridDim.x * blockDim.x;
__shared__ unsigned int filled_hash_table_shared;
extern __shared__ char hash_table_shared[];
int* s_hash_key_idx = (int*)hash_table_shared;
int* s_hash_count = (int*)&(hash_table_shared[len_shared_hash_table*sizeof(int)]);
size_t s_offset = (2*len_shared_hash_table*sizeof(int) + sizeof(Tval) - 1) / sizeof(Tval);
Tval* s_hash_results = (Tval*)&(hash_table_shared[s_offset*sizeof(Tval)]);
// initialization
for (size_t i = threadIdx.x; i < len_shared_hash_table; i += blockDim.x) {
s_hash_key_idx[i] = -1;
s_hash_count[i] = 0;
for (size_t j = 0; j < num_ops; ++j) {
// replace following with specialized limit template in the future
if (ops_c[j] == rmin) {
s_hash_results[j * len_shared_hash_table + i] = cuda_custom::limits<Tval>::max();
} else if (ops_c[j] == rmax) {
s_hash_results[j * len_shared_hash_table + i] = cuda_custom::limits<Tval>::lowest();
} else {
s_hash_results[j * len_shared_hash_table + i] = 0;
}
}
}
if (threadIdx.x == 0) filled_hash_table_shared = 0;
__syncthreads();
for (size_t i = idx; i < num_key_rows; i += offset) {
// try inserting, assume there is enough space
size_t curPos = HashKey(i, key_columns, num_key_rows, num_key_cols) % len_shared_hash_table;
unsigned int collisionCount = 0;
bool isInserted = false;
while (!isInserted) {
// quit if shared hash table is 80% full
if (filled_hash_table_shared >= ( 8 * len_shared_hash_table / 10)) break;
int old = s_hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(s_hash_key_idx[curPos]), -1, i);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, i, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_shared_hash_table; // linear probing
if (++collisionCount == len_shared_hash_table)
break; // break the loop if it looped over the hash table and still failed
continue;
}
} else {
atomicAdd(&filled_hash_table_shared, 1);
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(value_columns, num_val_rows, num_ops, i, curPos, s_hash_results, &(s_hash_count[curPos]), len_shared_hash_table);
}
// if current column not inserted, insert to global one
curPos = HashKey(i, key_columns, num_key_rows, num_key_cols) % len_hash_table;
collisionCount = 0;
while (!isInserted) {
// first read the value out
int old = hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(hash_key_idx[curPos]), -1, i);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, i, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_hash_table; // linear probing
if (++collisionCount == len_hash_table)
break; // break the loop if it looped over the hash table and still failed
continue;
}
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(value_columns, num_val_rows, num_ops, i, curPos, hash_results, &(hash_count[curPos]), len_hash_table);
}
if (!isInserted) {
// Do sth in the case of overflowing hash table
}
}
__syncthreads();
for (size_t i = threadIdx.x; i < len_shared_hash_table; i += blockDim.x) {
int real_idx = s_hash_key_idx[i];
if (real_idx != -1) {
size_t curPos = HashKey(real_idx, key_columns, num_key_rows, num_key_cols) % len_hash_table;
int collisionCount = 0;
bool isInserted = false;
while (!isInserted) {
// first read the value out
int old = hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(hash_key_idx[curPos]), -1, real_idx);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, real_idx, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_hash_table; // linear probing
if (++collisionCount == len_hash_table)
break; // break the loop if it looped over the hash table and still failed
continue;
}
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(s_hash_results, len_shared_hash_table, num_ops,
i, curPos, hash_results, &(hash_count[curPos]), len_hash_table, s_hash_count[i]);
}
if (!isInserted) {
// Do sth in the case of overflowing hash table
}
}
}
}
template <typename Tval> __global__
void initializeVariable(int* hash_key_idx,
int* hash_count,
Tval* hash_results,
size_t len_hash_table,
size_t num_ops)
{
// each thread responsible for one entry (with thread coarsening)
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t offset = gridDim.x * blockDim.x;
for (size_t i = idx; i < len_hash_table; i += offset) {
hash_key_idx[i] = -1;
hash_count[i] = 0;
for (size_t j = 0; j < num_ops; ++j) {
// replace following with specialized limit template in the future
if (ops_c[j] == rmin) {
hash_results[j * len_hash_table + i] = cuda_custom::limits<Tval>::max();
} else if (ops_c[j] == rmax) {
hash_results[j * len_hash_table + i] = cuda_custom::limits<Tval>::lowest();
} else {
hash_results[j * len_hash_table + i] = 0;
}
}
}
}
template <typename Tval> __global__
void copyUnique(
int *hashTable_idxs_d, //where key resides in hash vector
int *hash_key_idx_d, //where key resides in original key matrix
Tval* key_columns_d,
Tval* output_key_columns_d,
int num_output_rows,
int num_key_columns,
int num_key_rows)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < num_output_rows){//num_output_rows){
// printf("%d | : %d : %d \n ",hash_key_idx_d[hashTable_idxs_d[idx]], key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*0], key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*1]);
for (int i = 0; i < num_key_columns; i++){//each column of key matrix
// printf(" : %d",key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*i]);
output_key_columns_d[idx+num_output_rows*i] = key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*i];//copy original key entry to output
}
// printf("\n");
idx += gridDim.x*blockDim.x;//increment idx by thread space
}
}
template <typename Tval> __global__
void copyValues(
int *hashTable_idxs_d,
Tval* hash_results_d,
int *hash_count_d,
Tval* value_columns_d,
Tval* output_value_columns_d,
int num_output_rows,
int num_value_columns,
int num_value_rows,
size_t num_ops,
size_t len_hash_table
)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//printf("%d\n",idx);
while (idx < num_output_rows){
for (size_t i = 0; i < num_ops; ++i) {
size_t val_idx = i * len_hash_table + hashTable_idxs_d[idx];
switch(ops_c[i]) {
case rmin:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
case rmax:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
case rcount:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
case rmean:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx]/hash_count_d[hashTable_idxs_d[idx]];//copy result to output
break;
case rsum:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
}
}
idx += gridDim.x*blockDim.x;//increment idx by thread space
}
}
struct is_pos
{
__host__ __device__
bool operator()(const int x)
{
return x >= 0;
}
};
extern std::mt19937 gen;
template <typename T> __host__
unsigned int predictTableLength_CPU(const T* key_columns,
size_t num_key_rows,
size_t num_key_columns)
{
// Predict Hash Table length based on 2 state transfer matrix
unsigned int numEqual = 0;
unsigned int numTotal = 0;
std::uniform_int_distribution<unsigned int> keyRange(0, num_key_rows-1);
// max try 1% of key_rows
for (size_t i=0; i < num_key_rows/100; ++i) {
size_t idx1 = keyRange(gen);
size_t idx2 = keyRange(gen);
bool result = keyEqualCM(key_columns, idx1, idx2, num_key_rows, num_key_columns);
if (result)
++numEqual;
++numTotal;
if (numEqual == 10)
break;
}
if (numEqual < 2) // very few sample, return 1/4 of original
return num_key_rows / 4;
return (unsigned int) 2.6f * ((float)(numTotal) / numEqual);
}
__global__
void fillCURANDState(hiprandState_t* state, unsigned long seed)
{
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init(seed, idx, 0, &state[idx]);
}
template <typename T> __global__
void predictTableLength_GPU(const T* key_columns,
size_t num_key_rows,
size_t num_key_columns,
size_t iterations,
unsigned int* count,
hiprandState_t* state)
{
#ifdef DEBUG
constexpr unsigned int BLOCKSIZE = 512;
#else
constexpr unsigned int BLOCKSIZE = 1024;
#endif
__shared__ unsigned int count_shared[3*BLOCKSIZE];
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
// initial shared memory
for (size_t i = 0; i < 3; ++i) {
count_shared[i*BLOCKSIZE + threadIdx.x] = 0;
}
for (size_t i = 0; i < iterations; ++i) {
unsigned int test_idx[3];
bool result[3];
for (size_t j = 0; j < 3; ++j)
test_idx[j] = floorf(hiprand_uniform(&state[idx]) * num_key_rows);
// compare keys
for (size_t j = 0; j < 3; ++j)
result[j] = keyEqualCM(key_columns, test_idx[j],
test_idx[(j+1)%3], num_key_rows,
num_key_columns);
if (result[0] && result[1]) // any two is true then 3 are equal
count_shared[threadIdx.x] += 1;
else if (result[0] || result[1] || result[2]) // any one is true then 2 are equal
count_shared[BLOCKSIZE + threadIdx.x] += 1;
else // three are different
count_shared[BLOCKSIZE*2 + threadIdx.x] += 1;
}
__syncthreads();
// reduction
for (size_t stride = (blockDim.x >> 1);
stride >= 1;
stride >>= 1) {
if (threadIdx.x < stride) {
for (size_t i = 0; i < 3; ++i) {
count_shared[threadIdx.x + BLOCKSIZE*i]
+= count_shared[threadIdx.x + BLOCKSIZE*i + stride];
}
}
__syncthreads();
}
if (threadIdx.x == 0)
for (size_t i = 0; i < 3; ++i) {
count[i] = count_shared[BLOCKSIZE*i];
}
}
| 2066702f94c8a8ed86186ba2c3eba64832bfd38c.cu | #include <cstddef>
#include "limits.cuh"
// assume column major here
template <typename T> __host__ __device__
bool keyEqualCM(const T* key_columns, size_t idx1, size_t idx2, size_t num_key_rows, size_t num_key_columns)
{
for (size_t i=0; i < num_key_columns; ++i) {
if (key_columns[i*num_key_rows+idx1] != key_columns[i*num_key_rows+idx2])
return false;
}
return true;
}
// assume row major here
template <typename T> __host__ __device__
bool keyEqualRM(const T* key_columns, size_t idx1, size_t idx2, size_t num_key_rows, size_t num_key_columns)
{
for (size_t i=0; i < num_key_columns; ++i) {
if (key_columns[i+num_key_rows*idx1] != key_columns[i+num_key_rows*idx2])
return false;
}
return true;
}
// hashKey generating
template <typename T> __host__ __device__
size_t HashKey(size_t idx, T* key_columns, size_t num_key_rows, size_t num_key_columns) {
size_t hash_key = 0;
for (size_t i=0; i < num_key_columns; ++i) {
hash_key = (31 * hash_key) + key_columns[i*num_key_rows+idx];
}
return hash_key;
}
template <typename Tval> __device__
void updateEntry(Tval* value_columns,
size_t num_val_rows,
size_t num_ops,
size_t idx,
size_t hashPos,
Tval* hash_results,
int* countPtr,
size_t len_hash_table,
int count=1)
{
// update count
atomicAdd(countPtr, count);
// update each item
for (size_t i = 0; i < num_ops; ++i) {
Tval value = value_columns[i * num_val_rows + idx];
size_t val_idx = i * len_hash_table + hashPos;
switch(ops_c[i]) {
case rmin:
atomicMin(&(hash_results[val_idx]), value);
break;
case rmax:
atomicMax(&(hash_results[val_idx]), value);
break;
case rcount:
atomicAdd(&(hash_results[val_idx]), count);
break;
case rmean: // fall-thru
case rsum:
atomicAdd(&(hash_results[val_idx]), value);
break;
}
}
}
template <typename Tkey, typename Tval> __global__
void fillTable(Tkey* key_columns,
size_t num_key_rows,
size_t num_key_cols,
Tval* value_columns,
size_t num_val_rows,
size_t num_val_cols,
int* hash_key_idx,
int* hash_count,
Tval* hash_results,
size_t len_hash_table,
size_t num_ops,
int* overflow_flag
)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t offset = gridDim.x * blockDim.x;
for (size_t i = idx; i < num_key_rows; i += offset) {
// try inserting, assume there is enough space
size_t curPos = HashKey(i, key_columns, num_key_rows, num_key_cols) % len_hash_table;
unsigned int collisionCount = 0;
bool isInserted = false;
while (!isInserted) {
// first read the value out
int old = hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(hash_key_idx[curPos]), -1, i);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, i, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_hash_table; // linear probing
if (++collisionCount >= len_hash_table * 0.75)
break; // break the loop if it looped over the hash table and still failed
continue;
}
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(value_columns, num_val_rows, num_ops, i, curPos, hash_results, &(hash_count[curPos]), len_hash_table);
}
if (!isInserted) {
// Do sth in the case of overflowing hash table
overflow_flag[0] = 1;
//printf("Overflow happened at %d \n", len_hash_table);
}
}
}
template <typename Tkey, typename Tval> __global__
void fillTable_privatization(Tkey* key_columns,
size_t num_key_rows,
size_t num_key_cols,
Tval* value_columns,
size_t num_val_rows,
size_t num_val_cols,
int* hash_key_idx,
int* hash_count,
Tval* hash_results,
size_t len_hash_table,
size_t len_shared_hash_table,
size_t num_ops)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t offset = gridDim.x * blockDim.x;
__shared__ unsigned int filled_hash_table_shared;
extern __shared__ char hash_table_shared[];
int* s_hash_key_idx = (int*)hash_table_shared;
int* s_hash_count = (int*)&(hash_table_shared[len_shared_hash_table*sizeof(int)]);
size_t s_offset = (2*len_shared_hash_table*sizeof(int) + sizeof(Tval) - 1) / sizeof(Tval);
Tval* s_hash_results = (Tval*)&(hash_table_shared[s_offset*sizeof(Tval)]);
// initialization
for (size_t i = threadIdx.x; i < len_shared_hash_table; i += blockDim.x) {
s_hash_key_idx[i] = -1;
s_hash_count[i] = 0;
for (size_t j = 0; j < num_ops; ++j) {
// replace following with specialized limit template in the future
if (ops_c[j] == rmin) {
s_hash_results[j * len_shared_hash_table + i] = cuda_custom::limits<Tval>::max();
} else if (ops_c[j] == rmax) {
s_hash_results[j * len_shared_hash_table + i] = cuda_custom::limits<Tval>::lowest();
} else {
s_hash_results[j * len_shared_hash_table + i] = 0;
}
}
}
if (threadIdx.x == 0) filled_hash_table_shared = 0;
__syncthreads();
for (size_t i = idx; i < num_key_rows; i += offset) {
// try inserting, assume there is enough space
size_t curPos = HashKey(i, key_columns, num_key_rows, num_key_cols) % len_shared_hash_table;
unsigned int collisionCount = 0;
bool isInserted = false;
while (!isInserted) {
// quit if shared hash table is 80% full
if (filled_hash_table_shared >= ( 8 * len_shared_hash_table / 10)) break;
int old = s_hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(s_hash_key_idx[curPos]), -1, i);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, i, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_shared_hash_table; // linear probing
if (++collisionCount == len_shared_hash_table)
break; // break the loop if it looped over the hash table and still failed
continue;
}
} else {
atomicAdd(&filled_hash_table_shared, 1);
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(value_columns, num_val_rows, num_ops, i, curPos, s_hash_results, &(s_hash_count[curPos]), len_shared_hash_table);
}
// if current column not inserted, insert to global one
curPos = HashKey(i, key_columns, num_key_rows, num_key_cols) % len_hash_table;
collisionCount = 0;
while (!isInserted) {
// first read the value out
int old = hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(hash_key_idx[curPos]), -1, i);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, i, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_hash_table; // linear probing
if (++collisionCount == len_hash_table)
break; // break the loop if it looped over the hash table and still failed
continue;
}
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(value_columns, num_val_rows, num_ops, i, curPos, hash_results, &(hash_count[curPos]), len_hash_table);
}
if (!isInserted) {
// Do sth in the case of overflowing hash table
}
}
__syncthreads();
for (size_t i = threadIdx.x; i < len_shared_hash_table; i += blockDim.x) {
int real_idx = s_hash_key_idx[i];
if (real_idx != -1) {
size_t curPos = HashKey(real_idx, key_columns, num_key_rows, num_key_cols) % len_hash_table;
int collisionCount = 0;
bool isInserted = false;
while (!isInserted) {
// first read the value out
int old = hash_key_idx[curPos];
// if it is -1, try update, else don't
if (old == -1)
old = atomicCAS(&(hash_key_idx[curPos]), -1, real_idx);
// now old contains either -1 or a new address, if it is a new address meaning other thread claimed it
if (old != -1) {
// note: old should not contain -1 now, safe to cast to size_t
if (!keyEqualCM<Tkey>(key_columns, (size_t)old, real_idx, num_key_rows, num_key_cols)) {
// collision
curPos = (curPos + 1) % len_hash_table; // linear probing
if (++collisionCount == len_hash_table)
break; // break the loop if it looped over the hash table and still failed
continue;
}
}
// now it is safe to update the entry
isInserted = true;
updateEntry<Tval>(s_hash_results, len_shared_hash_table, num_ops,
i, curPos, hash_results, &(hash_count[curPos]), len_hash_table, s_hash_count[i]);
}
if (!isInserted) {
// Do sth in the case of overflowing hash table
}
}
}
}
template <typename Tval> __global__
void initializeVariable(int* hash_key_idx,
int* hash_count,
Tval* hash_results,
size_t len_hash_table,
size_t num_ops)
{
// each thread responsible for one entry (with thread coarsening)
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t offset = gridDim.x * blockDim.x;
for (size_t i = idx; i < len_hash_table; i += offset) {
hash_key_idx[i] = -1;
hash_count[i] = 0;
for (size_t j = 0; j < num_ops; ++j) {
// replace following with specialized limit template in the future
if (ops_c[j] == rmin) {
hash_results[j * len_hash_table + i] = cuda_custom::limits<Tval>::max();
} else if (ops_c[j] == rmax) {
hash_results[j * len_hash_table + i] = cuda_custom::limits<Tval>::lowest();
} else {
hash_results[j * len_hash_table + i] = 0;
}
}
}
}
template <typename Tval> __global__
void copyUnique(
int *hashTable_idxs_d, //where key resides in hash vector
int *hash_key_idx_d, //where key resides in original key matrix
Tval* key_columns_d,
Tval* output_key_columns_d,
int num_output_rows,
int num_key_columns,
int num_key_rows)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < num_output_rows){//num_output_rows){
// printf("%d | : %d : %d \n ",hash_key_idx_d[hashTable_idxs_d[idx]], key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*0], key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*1]);
for (int i = 0; i < num_key_columns; i++){//each column of key matrix
// printf(" : %d",key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*i]);
output_key_columns_d[idx+num_output_rows*i] = key_columns_d[hash_key_idx_d[hashTable_idxs_d[idx]]+num_key_rows*i];//copy original key entry to output
}
// printf("\n");
idx += gridDim.x*blockDim.x;//increment idx by thread space
}
}
template <typename Tval> __global__
void copyValues(
int *hashTable_idxs_d,
Tval* hash_results_d,
int *hash_count_d,
Tval* value_columns_d,
Tval* output_value_columns_d,
int num_output_rows,
int num_value_columns,
int num_value_rows,
size_t num_ops,
size_t len_hash_table
)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//printf("%d\n",idx);
while (idx < num_output_rows){
for (size_t i = 0; i < num_ops; ++i) {
size_t val_idx = i * len_hash_table + hashTable_idxs_d[idx];
switch(ops_c[i]) {
case rmin:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
case rmax:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
case rcount:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
case rmean:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx]/hash_count_d[hashTable_idxs_d[idx]];//copy result to output
break;
case rsum:
output_value_columns_d[idx+num_output_rows*i] = hash_results_d[val_idx];//copy result to output
break;
}
}
idx += gridDim.x*blockDim.x;//increment idx by thread space
}
}
struct is_pos
{
__host__ __device__
bool operator()(const int x)
{
return x >= 0;
}
};
extern std::mt19937 gen;
template <typename T> __host__
unsigned int predictTableLength_CPU(const T* key_columns,
size_t num_key_rows,
size_t num_key_columns)
{
// Predict Hash Table length based on 2 state transfer matrix
unsigned int numEqual = 0;
unsigned int numTotal = 0;
std::uniform_int_distribution<unsigned int> keyRange(0, num_key_rows-1);
// max try 1% of key_rows
for (size_t i=0; i < num_key_rows/100; ++i) {
size_t idx1 = keyRange(gen);
size_t idx2 = keyRange(gen);
bool result = keyEqualCM(key_columns, idx1, idx2, num_key_rows, num_key_columns);
if (result)
++numEqual;
++numTotal;
if (numEqual == 10)
break;
}
if (numEqual < 2) // very few sample, return 1/4 of original
return num_key_rows / 4;
return (unsigned int) 2.6f * ((float)(numTotal) / numEqual);
}
__global__
void fillCURANDState(curandState* state, unsigned long seed)
{
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(seed, idx, 0, &state[idx]);
}
template <typename T> __global__
void predictTableLength_GPU(const T* key_columns,
size_t num_key_rows,
size_t num_key_columns,
size_t iterations,
unsigned int* count,
curandState* state)
{
#ifdef DEBUG
constexpr unsigned int BLOCKSIZE = 512;
#else
constexpr unsigned int BLOCKSIZE = 1024;
#endif
__shared__ unsigned int count_shared[3*BLOCKSIZE];
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
// initial shared memory
for (size_t i = 0; i < 3; ++i) {
count_shared[i*BLOCKSIZE + threadIdx.x] = 0;
}
for (size_t i = 0; i < iterations; ++i) {
unsigned int test_idx[3];
bool result[3];
for (size_t j = 0; j < 3; ++j)
test_idx[j] = floorf(curand_uniform(&state[idx]) * num_key_rows);
// compare keys
for (size_t j = 0; j < 3; ++j)
result[j] = keyEqualCM(key_columns, test_idx[j],
test_idx[(j+1)%3], num_key_rows,
num_key_columns);
if (result[0] && result[1]) // any two is true then 3 are equal
count_shared[threadIdx.x] += 1;
else if (result[0] || result[1] || result[2]) // any one is true then 2 are equal
count_shared[BLOCKSIZE + threadIdx.x] += 1;
else // three are different
count_shared[BLOCKSIZE*2 + threadIdx.x] += 1;
}
__syncthreads();
// reduction
for (size_t stride = (blockDim.x >> 1);
stride >= 1;
stride >>= 1) {
if (threadIdx.x < stride) {
for (size_t i = 0; i < 3; ++i) {
count_shared[threadIdx.x + BLOCKSIZE*i]
+= count_shared[threadIdx.x + BLOCKSIZE*i + stride];
}
}
__syncthreads();
}
if (threadIdx.x == 0)
for (size_t i = 0; i < 3; ++i) {
count[i] = count_shared[BLOCKSIZE*i];
}
}
|
d7697f9f2b38b68cccaa4f801f0dd2a6d0a2896b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
printf("y");
}
int main()
{
printf("");
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| d7697f9f2b38b68cccaa4f801f0dd2a6d0a2896b.cu |
#include "cuda_runtime.h"
#include <cstdio>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
printf("y");
}
int main()
{
printf("°’Υά");
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
fee53fe4f8ef48cdaa8398e31b16f9888cb162ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <contract.h>
#include <constants.h>
#include <device_opts_inline.h>
#include <utils.h>
#include <stdio.h>
using namespace contract;
// =========== Constant memory references ================//
__constant__ short int c_spinIndices_pipi_square[256][6]; // 3 Kb
__constant__ float c_coef_pipi_square[256][2]; // 2 Kb
__constant__ short int c_spinIndices_pipi_doubleTriangle[256][6]; // 3 Kb
__constant__ float c_coef_pipi_doubleTriangle[256][2]; // 2 Kb
__constant__ short int c_spinIndices_pipi_doubleTriangle_hor[256][4]; // 2Kb
__constant__ float c_coef_pipi_doubleTriangle_hor[256][2]; // 2Kb
// 14 Kb total
// ======================================================//
bool isConstantPiPiPiPiOn = false;
static void copy_constants_pipi(){
hipMemcpyToSymbol(c_spinIndices_pipi_square, spinIndices_pipi_square, 256*6*sizeof(short int));
hipMemcpyToSymbol(c_spinIndices_pipi_doubleTriangle, spinIndices_pipi_doubleTriangle, 256*6*sizeof(short int));
hipMemcpyToSymbol(c_spinIndices_pipi_doubleTriangle_hor, spinIndices_pipi_doubleTriangle_hor, 256*4*sizeof(short int));
hipMemcpyToSymbol(c_coef_pipi_square, coef_pipi_square, 256*2*sizeof(float));
hipMemcpyToSymbol(c_coef_pipi_doubleTriangle, coef_pipi_doubleTriangle, 256*2*sizeof(float));
hipMemcpyToSymbol(c_coef_pipi_doubleTriangle_hor, coef_pipi_doubleTriangle_hor, 256*2*sizeof(float));
CHECK_CUDA_ERROR();
}
//=======================================================//
// !!!!!!! for now the code will work only with 100 eigenVectors
// !!!!!!! for now the code will work only with submatrix side 25 ==> 25x25=625 threads
#define BLOCK_SIZE 25
#define NSIZE 100
//=====================================================//
//=====================================================//
__global__ void calculate_pipi_doubleTriangleHor_kernel_float(float2* out, hipTextureObject_t texProp, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_doubleTriangleHor_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_doubleTriangleHor_kernel_double(double2* out, hipTextureObject_t texProp, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_doubleTriangleHor_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_square_kernel_float(float2* out, hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_square_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_square_kernel_double(double2* out, hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_square_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//=====================================================//
__global__ void calculate_pipi_doubleTriangle_kernel_float(float2* out, hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_doubleTriangle_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_doubleTriangle_kernel_double(double2* out, hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_doubleTriangle_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
// =================================================//
__global__ void calculate_pipi_starfish_kernel_float(float2* out, hipTextureObject_t texProp, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_starfish_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
// =================================================//
__global__ void calculate_pipi_starfish_kernel_double(double2* out, hipTextureObject_t texProp, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_starfish_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
template<typename Float2, typename Float>
static void calculate_pipi_kernel(hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, int Nt, Float* corr){
if(!isConstantPiPiPiPiOn)
ABORT("Error: You need to initialize device constants before calling Kernels\n");
int numBlocks_square = Nt * 256; // 256 non-zero spin combinations
int numBlocks_doubleTriangle = Nt * 256;
int numBlocks_star = Nt * 16; // 16 non-zero spin combinations
int numBlocks_fish = Nt * 16;
dim3 blockDim(BLOCK_SIZE,BLOCK_SIZE,1); // 625 threads
dim3 gridDim_square(numBlocks_square,1,1);
dim3 gridDim_doubleTriangle(numBlocks_doubleTriangle,1,1);
dim3 gridDim_star(numBlocks_star,1,1);
dim3 gridDim_fish(numBlocks_fish,1,1);
Float *h_square = NULL;
Float *h_doubleTriangle = NULL;
Float *h_star = NULL;
Float *h_fish = NULL;
h_square = (Float*) malloc(numBlocks_square*2*sizeof(Float));
h_doubleTriangle = (Float*) malloc(numBlocks_doubleTriangle*2*sizeof(Float));
h_star = (Float*) malloc(numBlocks_star*2*2*sizeof(Float)); // two traces to store
h_fish = (Float*) malloc(numBlocks_fish*2*2*sizeof(Float)); // two traces to store
if(h_square == NULL || h_doubleTriangle == NULL || h_star == NULL || h_fish == NULL)
ABORT("Error allocating memory\n");
Float *d_square = NULL;
Float *d_doubleTriangle = NULL;
Float *d_star = NULL;
Float *d_fish = NULL;
hipMalloc((void**)&d_square, numBlocks_square*2*sizeof(Float));
hipMalloc((void**)&d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float));
hipMalloc((void**)&d_star, numBlocks_star*2*2*sizeof(Float));
hipMalloc((void**)&d_fish, numBlocks_fish*2*2*sizeof(Float));
CHECK_CUDA_ERROR();
Float *tmp1 = NULL;
Float *tmp2 = NULL;
hipMalloc((void**)&tmp1, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
hipMalloc((void**)&tmp2, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
//++++
if( typeid(Float2) == typeid(float2) ){
hipLaunchKernelGGL(( calculate_pipi_square_kernel_float), dim3(gridDim_square),dim3(blockDim), 0, 0, (float2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_doubleTriangle_kernel_float), dim3(gridDim_doubleTriangle),dim3(blockDim), 0, 0, (float2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_float), dim3(gridDim_star),dim3(blockDim), 0, 0, (float2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_float), dim3(gridDim_fish),dim3(blockDim), 0, 0, (float2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (float2*) tmp1, (float2*) tmp2);
}
else if ( typeid(Float2) == typeid(double2) ){
hipLaunchKernelGGL(( calculate_pipi_square_kernel_double), dim3(gridDim_square),dim3(blockDim), 0, 0, (double2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_doubleTriangle_kernel_double), dim3(gridDim_doubleTriangle),dim3(blockDim), 0, 0, (double2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_double), dim3(gridDim_star),dim3(blockDim), 0, 0, (double2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_double), dim3(gridDim_fish),dim3(blockDim), 0, 0, (double2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (double2*) tmp1, (double2*) tmp2);
}
else
ABORT("Something fishy is happening\n");
//++++
hipMemcpy(h_square, d_square, numBlocks_square*2*sizeof(Float), hipMemcpyDeviceToHost);
hipMemcpy(h_doubleTriangle, d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float), hipMemcpyDeviceToHost);
hipMemcpy(h_star, d_star, numBlocks_star*2*2*sizeof(Float), hipMemcpyDeviceToHost);
hipMemcpy(h_fish, d_fish, numBlocks_fish*2*2*sizeof(Float), hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
Float *h_square_reduce = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_doubleTriangle_reduce = (Float*) calloc(Nt*2,sizeof(Float));
if(h_square_reduce == NULL || h_doubleTriangle_reduce == NULL)ABORT("Error allocating memory for reduction\n");
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 256 ; is++){
h_square_reduce[ti*2 + 0] += h_square[ti*256*2 + is*2 + 0];
h_square_reduce[ti*2 + 1] += h_square[ti*256*2 + is*2 + 1];
h_doubleTriangle_reduce[ti*2 + 0] += h_doubleTriangle[ti*256*2 + is*2 + 0];
h_doubleTriangle_reduce[ti*2 + 1] += h_doubleTriangle[ti*256*2 + is*2 + 1];
}
Float *h_star_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
Float *h_fish_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
if(h_star_reduce == NULL || h_fish_reduce == NULL) ABORT("Error allocating memory for reduction\n");
Float *h_star_trtr = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_fish_trtr = (Float*) calloc(Nt*2,sizeof(Float));
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 16 ; is++)
for(int tr = 0 ; tr < 2 ; tr++){
h_star_reduce[tr*Nt*2 + ti*2 + 0] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_star_reduce[tr*Nt*2 + ti*2 + 1] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
h_fish_reduce[tr*Nt*2 + ti*2 + 0] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_fish_reduce[tr*Nt*2 + ti*2 + 1] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
}
for(int ti = 0 ; ti < Nt ; ti++){
h_star_trtr[ti*2+0] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 0] - h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 1];
h_star_trtr[ti*2+1] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 1] + h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 0];
h_fish_trtr[ti*2+0] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 0] - h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 1];
h_fish_trtr[ti*2+1] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 1] + h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 0];
}
memset(corr, 0, Nt*5*2*sizeof(Float)); // 5 because we have 5 diagrams
/*
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*2+0] = - (2.*h_square_reduce[ti*2 + 0] - 2.*h_doubleTriangle_reduce[ti*2 + 0] + h_star_trtr[ti*2 + 0] - h_fish_trtr[ti*2 + 0]);
corr[ti*2+1] = - (2.*h_square_reduce[ti*2 + 1] - 2.*h_doubleTriangle_reduce[ti*2 + 1] + h_star_trtr[ti*2 + 1] - h_fish_trtr[ti*2 + 1]);
}
*/
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*5*2 + 0*2 +0] = -2.*h_square_reduce[ti*2 + 0];
corr[ti*5*2 + 0*2 +1] = -2.*h_square_reduce[ti*2 + 1];
corr[ti*5*2 + 1*2 +0] = 2.*h_doubleTriangle_reduce[ti*2 + 0];
corr[ti*5*2 + 1*2 +1] = 2.*h_doubleTriangle_reduce[ti*2 + 1];
corr[ti*5*2 + 2*2 +0] = -h_star_trtr[ti*2 + 0];
corr[ti*5*2 + 2*2 +1] = -h_star_trtr[ti*2 + 1];
corr[ti*5*2 + 3*2 +0] = h_fish_trtr[ti*2 + 0];
corr[ti*5*2 + 3*2 +1] = h_fish_trtr[ti*2 + 1];
corr[ti*5*2 + 4*2 +0] = 0.; // for I=1 there are only 4 diagrams
corr[ti*5*2 + 4*2 +1] = 0.;
}
free(h_star_trtr);
free(h_fish_trtr);
free(h_star_reduce);
free(h_fish_reduce);
free(h_square_reduce);
free(h_doubleTriangle_reduce);
hipFree(tmp1);
hipFree(tmp2);
hipFree(d_square);
hipFree(d_doubleTriangle);
hipFree(d_star);
hipFree(d_fish);
CHECK_CUDA_ERROR();
free(h_square);
free(h_doubleTriangle);
free(h_star);
free(h_fish);
}
//=========================================================//
template<typename Float2, typename Float>
static void calculate_pipi_kernel_I0(hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, int Nt, Float* corr){
if(!isConstantPiPiPiPiOn)
ABORT("Error: You need to initialize device constants before calling Kernels\n");
int numBlocks_square = Nt * 256; // 256 non-zero spin combinations
int numBlocks_doubleTriangle = Nt * 256;
int numBlocks_doubleTriangle_hor = Nt * 256;
int numBlocks_star = Nt * 16; // 16 non-zero spin combinations
int numBlocks_fish = Nt * 16;
dim3 blockDim(BLOCK_SIZE,BLOCK_SIZE,1); // 625 threads
dim3 gridDim_square(numBlocks_square,1,1);
dim3 gridDim_doubleTriangle(numBlocks_doubleTriangle,1,1);
dim3 gridDim_doubleTriangle_hor(numBlocks_doubleTriangle_hor,1,1);
dim3 gridDim_star(numBlocks_star,1,1);
dim3 gridDim_fish(numBlocks_fish,1,1);
Float *h_square = NULL;
Float *h_doubleTriangle = NULL;
Float *h_doubleTriangle_hor = NULL;
Float *h_star = NULL;
Float *h_fish = NULL;
h_square = (Float*) malloc(numBlocks_square*2*sizeof(Float));
h_doubleTriangle = (Float*) malloc(numBlocks_doubleTriangle*2*sizeof(Float));
h_doubleTriangle_hor = (Float*) malloc(numBlocks_doubleTriangle_hor*2*sizeof(Float));
h_star = (Float*) malloc(numBlocks_star*2*2*sizeof(Float)); // two traces to store
h_fish = (Float*) malloc(numBlocks_fish*2*2*sizeof(Float)); // two traces to store
if(h_square == NULL || h_doubleTriangle == NULL || h_doubleTriangle_hor == NULL || h_star == NULL || h_fish == NULL)
ABORT("Error allocating memory\n");
Float *d_square = NULL;
Float *d_doubleTriangle = NULL;
Float *d_doubleTriangle_hor = NULL;
Float *d_star = NULL;
Float *d_fish = NULL;
hipMalloc((void**)&d_square, numBlocks_square*2*sizeof(Float));
hipMalloc((void**)&d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float));
hipMalloc((void**)&d_doubleTriangle_hor, numBlocks_doubleTriangle_hor*2*sizeof(Float));
hipMalloc((void**)&d_star, numBlocks_star*2*2*sizeof(Float));
hipMalloc((void**)&d_fish, numBlocks_fish*2*2*sizeof(Float));
CHECK_CUDA_ERROR();
Float *tmp1 = NULL;
Float *tmp2 = NULL;
hipMalloc((void**)&tmp1, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
hipMalloc((void**)&tmp2, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
//++++
if( typeid(Float2) == typeid(float2) ){
hipLaunchKernelGGL(( calculate_pipi_square_kernel_float), dim3(gridDim_square),dim3(blockDim), 0, 0, (float2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_doubleTriangle_kernel_float), dim3(gridDim_doubleTriangle),dim3(blockDim), 0, 0, (float2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_doubleTriangleHor_kernel_float), dim3(gridDim_doubleTriangle_hor),dim3(blockDim), 0, 0, (float2*) d_doubleTriangle_hor, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_float), dim3(gridDim_star),dim3(blockDim), 0, 0, (float2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_float), dim3(gridDim_fish),dim3(blockDim), 0, 0, (float2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (float2*) tmp1, (float2*) tmp2);
}
else if ( typeid(Float2) == typeid(double2) ){
hipLaunchKernelGGL(( calculate_pipi_square_kernel_double), dim3(gridDim_square),dim3(blockDim), 0, 0, (double2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_doubleTriangle_kernel_double), dim3(gridDim_doubleTriangle),dim3(blockDim), 0, 0, (double2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_doubleTriangleHor_kernel_double), dim3(gridDim_doubleTriangle_hor),dim3(blockDim), 0, 0, (double2*) d_doubleTriangle_hor, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_double), dim3(gridDim_star),dim3(blockDim), 0, 0, (double2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
hipLaunchKernelGGL(( calculate_pipi_starfish_kernel_double), dim3(gridDim_fish),dim3(blockDim), 0, 0, (double2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (double2*) tmp1, (double2*) tmp2);
}
else
ABORT("Something fishy is happening\n");
//++++
hipMemcpy(h_square, d_square, numBlocks_square*2*sizeof(Float), hipMemcpyDeviceToHost);
hipMemcpy(h_doubleTriangle, d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float), hipMemcpyDeviceToHost);
hipMemcpy(h_doubleTriangle_hor, d_doubleTriangle_hor, numBlocks_doubleTriangle_hor*2*sizeof(Float), hipMemcpyDeviceToHost);
hipMemcpy(h_star, d_star, numBlocks_star*2*2*sizeof(Float), hipMemcpyDeviceToHost);
hipMemcpy(h_fish, d_fish, numBlocks_fish*2*2*sizeof(Float), hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
Float *h_square_reduce = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_doubleTriangle_reduce = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_doubleTriangle_hor_reduce = (Float*) calloc(Nt*2,sizeof(Float));
if(h_square_reduce == NULL || h_doubleTriangle_reduce == NULL || h_doubleTriangle_hor_reduce == NULL)ABORT("Error allocating memory for reduction\n");
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 256 ; is++){
h_square_reduce[ti*2 + 0] += h_square[ti*256*2 + is*2 + 0];
h_square_reduce[ti*2 + 1] += h_square[ti*256*2 + is*2 + 1];
h_doubleTriangle_reduce[ti*2 + 0] += h_doubleTriangle[ti*256*2 + is*2 + 0];
h_doubleTriangle_reduce[ti*2 + 1] += h_doubleTriangle[ti*256*2 + is*2 + 1];
h_doubleTriangle_hor_reduce[ti*2 + 0] += h_doubleTriangle_hor[ti*256*2 + is*2 + 0];
h_doubleTriangle_hor_reduce[ti*2 + 1] += h_doubleTriangle_hor[ti*256*2 + is*2 + 1];
}
Float *h_star_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
Float *h_fish_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
if(h_star_reduce == NULL || h_fish_reduce == NULL) ABORT("Error allocating memory for reduction\n");
Float *h_star_trtr = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_fish_trtr = (Float*) calloc(Nt*2,sizeof(Float));
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 16 ; is++)
for(int tr = 0 ; tr < 2 ; tr++){
h_star_reduce[tr*Nt*2 + ti*2 + 0] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_star_reduce[tr*Nt*2 + ti*2 + 1] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
h_fish_reduce[tr*Nt*2 + ti*2 + 0] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_fish_reduce[tr*Nt*2 + ti*2 + 1] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
}
for(int ti = 0 ; ti < Nt ; ti++){
h_star_trtr[ti*2+0] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 0] - h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 1];
h_star_trtr[ti*2+1] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 1] + h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 0];
h_fish_trtr[ti*2+0] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 0] - h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 1];
h_fish_trtr[ti*2+1] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 1] + h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 0];
}
memset(corr, 0, Nt*5*2*sizeof(Float));
/*
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*2+0] = (-1./3.)*h_square_reduce[ti*2 + 0] - (1./3.)*h_doubleTriangle_reduce[ti*2 + 0] - (5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 0] + h_star_trtr[ti*2 + 0] + h_fish_trtr[ti*2 + 0];
corr[ti*2+1] = (-1./3.)*h_square_reduce[ti*2 + 1] - (1./3.)*h_doubleTriangle_reduce[ti*2 + 1] - (5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 1] + h_star_trtr[ti*2 + 1] + h_fish_trtr[ti*2 + 1];
}
*/
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*5*2 + 0*2 +0] = (-1./3.)*h_square_reduce[ti*2 + 0];
corr[ti*5*2 + 0*2 +1] = (-1./3.)*h_square_reduce[ti*2 + 1];
corr[ti*5*2 + 1*2 +0] = -(1./3.)*h_doubleTriangle_reduce[ti*2 + 0];
corr[ti*5*2 + 1*2 +1] = -(1./3.)*h_doubleTriangle_reduce[ti*2 + 1];
corr[ti*5*2 + 2*2 +0] = h_star_trtr[ti*2 + 0];
corr[ti*5*2 + 2*2 +1] = h_star_trtr[ti*2 + 1];
corr[ti*5*2 + 3*2 +0] = h_fish_trtr[ti*2 + 0];
corr[ti*5*2 + 3*2 +1] = h_fish_trtr[ti*2 + 1];
corr[ti*5*2 + 4*2 +0] = -(5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 0];
corr[ti*5*2 + 4*2 +1] = -(5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 1];
}
free(h_star_trtr);
free(h_fish_trtr);
free(h_star_reduce);
free(h_fish_reduce);
free(h_square_reduce);
free(h_doubleTriangle_reduce);
free(h_doubleTriangle_hor_reduce);
hipFree(tmp1);
hipFree(tmp2);
hipFree(d_square);
hipFree(d_doubleTriangle);
hipFree(d_doubleTriangle_hor);
hipFree(d_star);
hipFree(d_fish);
CHECK_CUDA_ERROR();
free(h_square);
free(h_doubleTriangle);
free(h_doubleTriangle_hor);
free(h_star);
free(h_fish);
}
//===================================================//
void contract::run_ContractPiPi_I0(hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, int Nt, void* corr, PRECISION prec){
if(prec == SINGLE){
calculate_pipi_kernel_I0<float2,float>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(float*) corr);
}
else if (prec == DOUBLE){
calculate_pipi_kernel_I0<double2,double>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(double*) corr);
}
else{
ABORT("Error: this precision in not implemented");
}
}
//===================================================//
void contract::run_ContractPiPi(hipTextureObject_t texProp, hipTextureObject_t texPropDiag, hipTextureObject_t texMomP1, hipTextureObject_t texMomP2, hipTextureObject_t texMomP3, hipTextureObject_t texMomP4, int tf, int Nt, void* corr, PRECISION prec){
if(prec == SINGLE){
calculate_pipi_kernel<float2,float>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(float*) corr);
}
else if (prec == DOUBLE){
calculate_pipi_kernel<double2,double>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(double*) corr);
}
else{
ABORT("Error: this precision in not implemented");
}
}
//===================================================//
void contract::run_CopyConstantsPiPi(){
if(isConstantPiPiPiPiOn){
WARNING("Warning: Copy constants for pi-pi again will be skipped\n");
return;
}
copy_constants_pipi();
isConstantPiPiPiPiOn = true;
}
//==================================================//
| fee53fe4f8ef48cdaa8398e31b16f9888cb162ec.cu | #include <contract.h>
#include <constants.h>
#include <device_opts_inline.h>
#include <utils.h>
#include <stdio.h>
using namespace contract;
// =========== Constant memory references ================//
__constant__ short int c_spinIndices_pipi_square[256][6]; // 3 Kb
__constant__ float c_coef_pipi_square[256][2]; // 2 Kb
__constant__ short int c_spinIndices_pipi_doubleTriangle[256][6]; // 3 Kb
__constant__ float c_coef_pipi_doubleTriangle[256][2]; // 2 Kb
__constant__ short int c_spinIndices_pipi_doubleTriangle_hor[256][4]; // 2Kb
__constant__ float c_coef_pipi_doubleTriangle_hor[256][2]; // 2Kb
// 14 Kb total
// ======================================================//
bool isConstantPiPiPiPiOn = false;
static void copy_constants_pipi(){
cudaMemcpyToSymbol(c_spinIndices_pipi_square, spinIndices_pipi_square, 256*6*sizeof(short int));
cudaMemcpyToSymbol(c_spinIndices_pipi_doubleTriangle, spinIndices_pipi_doubleTriangle, 256*6*sizeof(short int));
cudaMemcpyToSymbol(c_spinIndices_pipi_doubleTriangle_hor, spinIndices_pipi_doubleTriangle_hor, 256*4*sizeof(short int));
cudaMemcpyToSymbol(c_coef_pipi_square, coef_pipi_square, 256*2*sizeof(float));
cudaMemcpyToSymbol(c_coef_pipi_doubleTriangle, coef_pipi_doubleTriangle, 256*2*sizeof(float));
cudaMemcpyToSymbol(c_coef_pipi_doubleTriangle_hor, coef_pipi_doubleTriangle_hor, 256*2*sizeof(float));
CHECK_CUDA_ERROR();
}
//=======================================================//
// !!!!!!! for now the code will work only with 100 eigenVectors
// !!!!!!! for now the code will work only with submatrix side 25 ==> 25x25=625 threads
#define BLOCK_SIZE 25
#define NSIZE 100
//=====================================================//
//=====================================================//
__global__ void calculate_pipi_doubleTriangleHor_kernel_float(float2* out, cudaTextureObject_t texProp, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_doubleTriangleHor_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_doubleTriangleHor_kernel_double(double2* out, cudaTextureObject_t texProp, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_doubleTriangleHor_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_square_kernel_float(float2* out, cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_square_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_square_kernel_double(double2* out, cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_square_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//=====================================================//
__global__ void calculate_pipi_doubleTriangle_kernel_float(float2* out, cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_doubleTriangle_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
//=====================================================//
__global__ void calculate_pipi_doubleTriangle_kernel_double(double2* out, cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_doubleTriangle_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
// =================================================//
__global__ void calculate_pipi_starfish_kernel_float(float2* out, cudaTextureObject_t texProp, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, float2* tmp1, float2* tmp2){
#define FLOAT2 float2
#define FLOAT float
#define FETCH_FLOAT2 fetch_float2
#include <calculate_pipi_starfish_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
// =================================================//
__global__ void calculate_pipi_starfish_kernel_double(double2* out, cudaTextureObject_t texProp, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, double2* tmp1, double2* tmp2){
#define FLOAT2 double2
#define FLOAT double
#define FETCH_FLOAT2 fetch_double2
#include <calculate_pipi_starfish_core.h>
#undef FLOAT2
#undef FLOAT
#undef FETCH_FLOAT2
}
//==================================================//
template<typename Float2, typename Float>
static void calculate_pipi_kernel(cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, int Nt, Float* corr){
if(!isConstantPiPiPiPiOn)
ABORT("Error: You need to initialize device constants before calling Kernels\n");
int numBlocks_square = Nt * 256; // 256 non-zero spin combinations
int numBlocks_doubleTriangle = Nt * 256;
int numBlocks_star = Nt * 16; // 16 non-zero spin combinations
int numBlocks_fish = Nt * 16;
dim3 blockDim(BLOCK_SIZE,BLOCK_SIZE,1); // 625 threads
dim3 gridDim_square(numBlocks_square,1,1);
dim3 gridDim_doubleTriangle(numBlocks_doubleTriangle,1,1);
dim3 gridDim_star(numBlocks_star,1,1);
dim3 gridDim_fish(numBlocks_fish,1,1);
Float *h_square = NULL;
Float *h_doubleTriangle = NULL;
Float *h_star = NULL;
Float *h_fish = NULL;
h_square = (Float*) malloc(numBlocks_square*2*sizeof(Float));
h_doubleTriangle = (Float*) malloc(numBlocks_doubleTriangle*2*sizeof(Float));
h_star = (Float*) malloc(numBlocks_star*2*2*sizeof(Float)); // two traces to store
h_fish = (Float*) malloc(numBlocks_fish*2*2*sizeof(Float)); // two traces to store
if(h_square == NULL || h_doubleTriangle == NULL || h_star == NULL || h_fish == NULL)
ABORT("Error allocating memory\n");
Float *d_square = NULL;
Float *d_doubleTriangle = NULL;
Float *d_star = NULL;
Float *d_fish = NULL;
cudaMalloc((void**)&d_square, numBlocks_square*2*sizeof(Float));
cudaMalloc((void**)&d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float));
cudaMalloc((void**)&d_star, numBlocks_star*2*2*sizeof(Float));
cudaMalloc((void**)&d_fish, numBlocks_fish*2*2*sizeof(Float));
CHECK_CUDA_ERROR();
Float *tmp1 = NULL;
Float *tmp2 = NULL;
cudaMalloc((void**)&tmp1, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
cudaMalloc((void**)&tmp2, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
//++++
if( typeid(Float2) == typeid(float2) ){
calculate_pipi_square_kernel_float<<<gridDim_square,blockDim>>>((float2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
calculate_pipi_doubleTriangle_kernel_float<<<gridDim_doubleTriangle,blockDim>>>((float2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
calculate_pipi_starfish_kernel_float<<<gridDim_star,blockDim>>>((float2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
calculate_pipi_starfish_kernel_float<<<gridDim_fish,blockDim>>>((float2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (float2*) tmp1, (float2*) tmp2);
}
else if ( typeid(Float2) == typeid(double2) ){
calculate_pipi_square_kernel_double<<<gridDim_square,blockDim>>>((double2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
calculate_pipi_doubleTriangle_kernel_double<<<gridDim_doubleTriangle,blockDim>>>((double2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
calculate_pipi_starfish_kernel_double<<<gridDim_star,blockDim>>>((double2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
calculate_pipi_starfish_kernel_double<<<gridDim_fish,blockDim>>>((double2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (double2*) tmp1, (double2*) tmp2);
}
else
ABORT("Something fishy is happening\n");
//++++
cudaMemcpy(h_square, d_square, numBlocks_square*2*sizeof(Float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_doubleTriangle, d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_star, d_star, numBlocks_star*2*2*sizeof(Float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_fish, d_fish, numBlocks_fish*2*2*sizeof(Float), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
Float *h_square_reduce = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_doubleTriangle_reduce = (Float*) calloc(Nt*2,sizeof(Float));
if(h_square_reduce == NULL || h_doubleTriangle_reduce == NULL)ABORT("Error allocating memory for reduction\n");
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 256 ; is++){
h_square_reduce[ti*2 + 0] += h_square[ti*256*2 + is*2 + 0];
h_square_reduce[ti*2 + 1] += h_square[ti*256*2 + is*2 + 1];
h_doubleTriangle_reduce[ti*2 + 0] += h_doubleTriangle[ti*256*2 + is*2 + 0];
h_doubleTriangle_reduce[ti*2 + 1] += h_doubleTriangle[ti*256*2 + is*2 + 1];
}
Float *h_star_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
Float *h_fish_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
if(h_star_reduce == NULL || h_fish_reduce == NULL) ABORT("Error allocating memory for reduction\n");
Float *h_star_trtr = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_fish_trtr = (Float*) calloc(Nt*2,sizeof(Float));
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 16 ; is++)
for(int tr = 0 ; tr < 2 ; tr++){
h_star_reduce[tr*Nt*2 + ti*2 + 0] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_star_reduce[tr*Nt*2 + ti*2 + 1] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
h_fish_reduce[tr*Nt*2 + ti*2 + 0] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_fish_reduce[tr*Nt*2 + ti*2 + 1] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
}
for(int ti = 0 ; ti < Nt ; ti++){
h_star_trtr[ti*2+0] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 0] - h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 1];
h_star_trtr[ti*2+1] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 1] + h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 0];
h_fish_trtr[ti*2+0] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 0] - h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 1];
h_fish_trtr[ti*2+1] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 1] + h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 0];
}
memset(corr, 0, Nt*5*2*sizeof(Float)); // 5 because we have 5 diagrams
/*
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*2+0] = - (2.*h_square_reduce[ti*2 + 0] - 2.*h_doubleTriangle_reduce[ti*2 + 0] + h_star_trtr[ti*2 + 0] - h_fish_trtr[ti*2 + 0]);
corr[ti*2+1] = - (2.*h_square_reduce[ti*2 + 1] - 2.*h_doubleTriangle_reduce[ti*2 + 1] + h_star_trtr[ti*2 + 1] - h_fish_trtr[ti*2 + 1]);
}
*/
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*5*2 + 0*2 +0] = -2.*h_square_reduce[ti*2 + 0];
corr[ti*5*2 + 0*2 +1] = -2.*h_square_reduce[ti*2 + 1];
corr[ti*5*2 + 1*2 +0] = 2.*h_doubleTriangle_reduce[ti*2 + 0];
corr[ti*5*2 + 1*2 +1] = 2.*h_doubleTriangle_reduce[ti*2 + 1];
corr[ti*5*2 + 2*2 +0] = -h_star_trtr[ti*2 + 0];
corr[ti*5*2 + 2*2 +1] = -h_star_trtr[ti*2 + 1];
corr[ti*5*2 + 3*2 +0] = h_fish_trtr[ti*2 + 0];
corr[ti*5*2 + 3*2 +1] = h_fish_trtr[ti*2 + 1];
corr[ti*5*2 + 4*2 +0] = 0.; // for I=1 there are only 4 diagrams
corr[ti*5*2 + 4*2 +1] = 0.;
}
free(h_star_trtr);
free(h_fish_trtr);
free(h_star_reduce);
free(h_fish_reduce);
free(h_square_reduce);
free(h_doubleTriangle_reduce);
cudaFree(tmp1);
cudaFree(tmp2);
cudaFree(d_square);
cudaFree(d_doubleTriangle);
cudaFree(d_star);
cudaFree(d_fish);
CHECK_CUDA_ERROR();
free(h_square);
free(h_doubleTriangle);
free(h_star);
free(h_fish);
}
//=========================================================//
template<typename Float2, typename Float>
static void calculate_pipi_kernel_I0(cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, int Nt, Float* corr){
if(!isConstantPiPiPiPiOn)
ABORT("Error: You need to initialize device constants before calling Kernels\n");
int numBlocks_square = Nt * 256; // 256 non-zero spin combinations
int numBlocks_doubleTriangle = Nt * 256;
int numBlocks_doubleTriangle_hor = Nt * 256;
int numBlocks_star = Nt * 16; // 16 non-zero spin combinations
int numBlocks_fish = Nt * 16;
dim3 blockDim(BLOCK_SIZE,BLOCK_SIZE,1); // 625 threads
dim3 gridDim_square(numBlocks_square,1,1);
dim3 gridDim_doubleTriangle(numBlocks_doubleTriangle,1,1);
dim3 gridDim_doubleTriangle_hor(numBlocks_doubleTriangle_hor,1,1);
dim3 gridDim_star(numBlocks_star,1,1);
dim3 gridDim_fish(numBlocks_fish,1,1);
Float *h_square = NULL;
Float *h_doubleTriangle = NULL;
Float *h_doubleTriangle_hor = NULL;
Float *h_star = NULL;
Float *h_fish = NULL;
h_square = (Float*) malloc(numBlocks_square*2*sizeof(Float));
h_doubleTriangle = (Float*) malloc(numBlocks_doubleTriangle*2*sizeof(Float));
h_doubleTriangle_hor = (Float*) malloc(numBlocks_doubleTriangle_hor*2*sizeof(Float));
h_star = (Float*) malloc(numBlocks_star*2*2*sizeof(Float)); // two traces to store
h_fish = (Float*) malloc(numBlocks_fish*2*2*sizeof(Float)); // two traces to store
if(h_square == NULL || h_doubleTriangle == NULL || h_doubleTriangle_hor == NULL || h_star == NULL || h_fish == NULL)
ABORT("Error allocating memory\n");
Float *d_square = NULL;
Float *d_doubleTriangle = NULL;
Float *d_doubleTriangle_hor = NULL;
Float *d_star = NULL;
Float *d_fish = NULL;
cudaMalloc((void**)&d_square, numBlocks_square*2*sizeof(Float));
cudaMalloc((void**)&d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float));
cudaMalloc((void**)&d_doubleTriangle_hor, numBlocks_doubleTriangle_hor*2*sizeof(Float));
cudaMalloc((void**)&d_star, numBlocks_star*2*2*sizeof(Float));
cudaMalloc((void**)&d_fish, numBlocks_fish*2*2*sizeof(Float));
CHECK_CUDA_ERROR();
Float *tmp1 = NULL;
Float *tmp2 = NULL;
cudaMalloc((void**)&tmp1, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
cudaMalloc((void**)&tmp2, numBlocks_square*NSIZE*NSIZE*2*sizeof(Float));
CHECK_CUDA_ERROR();
//++++
if( typeid(Float2) == typeid(float2) ){
calculate_pipi_square_kernel_float<<<gridDim_square,blockDim>>>((float2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
calculate_pipi_doubleTriangle_kernel_float<<<gridDim_doubleTriangle,blockDim>>>((float2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
calculate_pipi_doubleTriangleHor_kernel_float<<<gridDim_doubleTriangle_hor,blockDim>>>((float2*) d_doubleTriangle_hor, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
calculate_pipi_starfish_kernel_float<<<gridDim_star,blockDim>>>((float2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (float2*) tmp1, (float2*) tmp2);
calculate_pipi_starfish_kernel_float<<<gridDim_fish,blockDim>>>((float2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (float2*) tmp1, (float2*) tmp2);
}
else if ( typeid(Float2) == typeid(double2) ){
calculate_pipi_square_kernel_double<<<gridDim_square,blockDim>>>((double2*) d_square, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
calculate_pipi_doubleTriangle_kernel_double<<<gridDim_doubleTriangle,blockDim>>>((double2*) d_doubleTriangle, texProp, texPropDiag, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
calculate_pipi_doubleTriangleHor_kernel_double<<<gridDim_doubleTriangle_hor,blockDim>>>((double2*) d_doubleTriangle_hor, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
calculate_pipi_starfish_kernel_double<<<gridDim_star,blockDim>>>((double2*) d_star, texProp, texMomP1, texMomP2, texMomP3, texMomP4, tf, (double2*) tmp1, (double2*) tmp2);
calculate_pipi_starfish_kernel_double<<<gridDim_fish,blockDim>>>((double2*) d_fish, texProp, texMomP1, texMomP2, texMomP4, texMomP3, tf, (double2*) tmp1, (double2*) tmp2);
}
else
ABORT("Something fishy is happening\n");
//++++
cudaMemcpy(h_square, d_square, numBlocks_square*2*sizeof(Float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_doubleTriangle, d_doubleTriangle, numBlocks_doubleTriangle*2*sizeof(Float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_doubleTriangle_hor, d_doubleTriangle_hor, numBlocks_doubleTriangle_hor*2*sizeof(Float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_star, d_star, numBlocks_star*2*2*sizeof(Float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_fish, d_fish, numBlocks_fish*2*2*sizeof(Float), cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
Float *h_square_reduce = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_doubleTriangle_reduce = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_doubleTriangle_hor_reduce = (Float*) calloc(Nt*2,sizeof(Float));
if(h_square_reduce == NULL || h_doubleTriangle_reduce == NULL || h_doubleTriangle_hor_reduce == NULL)ABORT("Error allocating memory for reduction\n");
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 256 ; is++){
h_square_reduce[ti*2 + 0] += h_square[ti*256*2 + is*2 + 0];
h_square_reduce[ti*2 + 1] += h_square[ti*256*2 + is*2 + 1];
h_doubleTriangle_reduce[ti*2 + 0] += h_doubleTriangle[ti*256*2 + is*2 + 0];
h_doubleTriangle_reduce[ti*2 + 1] += h_doubleTriangle[ti*256*2 + is*2 + 1];
h_doubleTriangle_hor_reduce[ti*2 + 0] += h_doubleTriangle_hor[ti*256*2 + is*2 + 0];
h_doubleTriangle_hor_reduce[ti*2 + 1] += h_doubleTriangle_hor[ti*256*2 + is*2 + 1];
}
Float *h_star_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
Float *h_fish_reduce = (Float*) calloc(2*Nt*2,sizeof(Float));
if(h_star_reduce == NULL || h_fish_reduce == NULL) ABORT("Error allocating memory for reduction\n");
Float *h_star_trtr = (Float*) calloc(Nt*2,sizeof(Float));
Float *h_fish_trtr = (Float*) calloc(Nt*2,sizeof(Float));
for(int ti = 0 ; ti < Nt ; ti++)
for(int is = 0 ; is < 16 ; is++)
for(int tr = 0 ; tr < 2 ; tr++){
h_star_reduce[tr*Nt*2 + ti*2 + 0] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_star_reduce[tr*Nt*2 + ti*2 + 1] += h_star[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
h_fish_reduce[tr*Nt*2 + ti*2 + 0] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 0];
h_fish_reduce[tr*Nt*2 + ti*2 + 1] += h_fish[tr*Nt*16*2 + ti*16*2 + is*2 + 1];
}
for(int ti = 0 ; ti < Nt ; ti++){
h_star_trtr[ti*2+0] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 0] - h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 1];
h_star_trtr[ti*2+1] = h_star_reduce[0*Nt*2 + ti*2 + 0]*h_star_reduce[1*Nt*2 + ti*2 + 1] + h_star_reduce[0*Nt*2 + ti*2 + 1]*h_star_reduce[1*Nt*2 + ti*2 + 0];
h_fish_trtr[ti*2+0] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 0] - h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 1];
h_fish_trtr[ti*2+1] = h_fish_reduce[0*Nt*2 + ti*2 + 0]*h_fish_reduce[1*Nt*2 + ti*2 + 1] + h_fish_reduce[0*Nt*2 + ti*2 + 1]*h_fish_reduce[1*Nt*2 + ti*2 + 0];
}
memset(corr, 0, Nt*5*2*sizeof(Float));
/*
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*2+0] = (-1./3.)*h_square_reduce[ti*2 + 0] - (1./3.)*h_doubleTriangle_reduce[ti*2 + 0] - (5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 0] + h_star_trtr[ti*2 + 0] + h_fish_trtr[ti*2 + 0];
corr[ti*2+1] = (-1./3.)*h_square_reduce[ti*2 + 1] - (1./3.)*h_doubleTriangle_reduce[ti*2 + 1] - (5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 1] + h_star_trtr[ti*2 + 1] + h_fish_trtr[ti*2 + 1];
}
*/
for(int ti = 0 ; ti < Nt ; ti++){
corr[ti*5*2 + 0*2 +0] = (-1./3.)*h_square_reduce[ti*2 + 0];
corr[ti*5*2 + 0*2 +1] = (-1./3.)*h_square_reduce[ti*2 + 1];
corr[ti*5*2 + 1*2 +0] = -(1./3.)*h_doubleTriangle_reduce[ti*2 + 0];
corr[ti*5*2 + 1*2 +1] = -(1./3.)*h_doubleTriangle_reduce[ti*2 + 1];
corr[ti*5*2 + 2*2 +0] = h_star_trtr[ti*2 + 0];
corr[ti*5*2 + 2*2 +1] = h_star_trtr[ti*2 + 1];
corr[ti*5*2 + 3*2 +0] = h_fish_trtr[ti*2 + 0];
corr[ti*5*2 + 3*2 +1] = h_fish_trtr[ti*2 + 1];
corr[ti*5*2 + 4*2 +0] = -(5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 0];
corr[ti*5*2 + 4*2 +1] = -(5./3.)*h_doubleTriangle_hor_reduce[ti*2 + 1];
}
free(h_star_trtr);
free(h_fish_trtr);
free(h_star_reduce);
free(h_fish_reduce);
free(h_square_reduce);
free(h_doubleTriangle_reduce);
free(h_doubleTriangle_hor_reduce);
cudaFree(tmp1);
cudaFree(tmp2);
cudaFree(d_square);
cudaFree(d_doubleTriangle);
cudaFree(d_doubleTriangle_hor);
cudaFree(d_star);
cudaFree(d_fish);
CHECK_CUDA_ERROR();
free(h_square);
free(h_doubleTriangle);
free(h_doubleTriangle_hor);
free(h_star);
free(h_fish);
}
//===================================================//
void contract::run_ContractPiPi_I0(cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, int Nt, void* corr, PRECISION prec){
if(prec == SINGLE){
calculate_pipi_kernel_I0<float2,float>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(float*) corr);
}
else if (prec == DOUBLE){
calculate_pipi_kernel_I0<double2,double>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(double*) corr);
}
else{
ABORT("Error: this precision in not implemented");
}
}
//===================================================//
void contract::run_ContractPiPi(cudaTextureObject_t texProp, cudaTextureObject_t texPropDiag, cudaTextureObject_t texMomP1, cudaTextureObject_t texMomP2, cudaTextureObject_t texMomP3, cudaTextureObject_t texMomP4, int tf, int Nt, void* corr, PRECISION prec){
if(prec == SINGLE){
calculate_pipi_kernel<float2,float>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(float*) corr);
}
else if (prec == DOUBLE){
calculate_pipi_kernel<double2,double>(texProp,texPropDiag,texMomP1,texMomP2,texMomP3,texMomP4,tf,Nt,(double*) corr);
}
else{
ABORT("Error: this precision in not implemented");
}
}
//===================================================//
void contract::run_CopyConstantsPiPi(){
if(isConstantPiPiPiPiOn){
WARNING("Warning: Copy constants for pi-pi again will be skipped\n");
return;
}
copy_constants_pipi();
isConstantPiPiPiPiOn = true;
}
//==================================================//
|
4325425cb4503432569176bf053eb185abe9ab0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <errno.h>
#include <error.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define DEVICE_NUMBER (0)
//#define USE_SHARED
#define SHARED_SIZE (8192) // 32kbytes with int
#define SPIN_DURATION (500000000)
typedef struct {
uint32_t nofThreads;
uint32_t nofBlocks;
int32_t nof_repetitions;
size_t data_size;
size_t buffer_length;
uint32_t *targetMeasOH;
uint32_t hostMeasOH;
uint32_t *hostBuffer;
uint32_t *targetBuffer;
uint64_t *target_realSum;
uint64_t host_realSum;
clock_t *target_times;
clock_t *host_times;
FILE *fd;
} param_t;
// Prints a message and returns zero if the given value is not hipSuccess
#define CheckCUDAError(val) (InternalCheckCUDAError((val), #val, __FILE__, __LINE__))
// Called internally by CheckCUDAError
static int InternalCheckCUDAError(hipError_t result, const char *fn,
const char *file, int line) {
if (result == hipSuccess) return 0;
printf("CUDA error %d in %s, line %d (%s): %s\n", (int) result, file, line,
fn, hipGetErrorString(result));
return -1;
}
#ifdef USE_SHARED
static __device__ inline uint64_t GlobalTimer64(void) {
volatile uint64_t val;
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(val));
return first_reading;
}
#endif
/*! \brief Create random 32bit number
* \return returns random 32bit number
* Uses rand() function to create a random 32 bit number using two calls
*/
static uint32_t random32(void){
return (rand() ^ (rand() << 15));
}
/*! \brief Create a randomized array for random walks
* \param buffer Pointer to allocated memory segment
* \param nofElem Number of elements in array
* \return returns error
*/
static int createShuffledArray(uint32_t * buffer, size_t nofElem){
// Seed random
srand(time(0));
// Link sequentially
for(uint32_t i = 0; i< nofElem-1; i++){
buffer[i] = i+1;
}
buffer[nofElem-1] = 0;
// Shuffle array
for (uint32_t i = 0; i<nofElem;i++){
uint32_t rndi, tmp1, tmp2, tmp3;
rndi = random32()%nofElem;
if (rndi == i) continue;
tmp1 = buffer[i];
tmp2 = buffer[rndi];
tmp3 = buffer[tmp2];
if (i== tmp2) continue;
// Reassign links
buffer[i] = tmp2;
buffer[rndi] = tmp3;
buffer[tmp2] = tmp1;
}
return 0;
}
static __global__ void getMeasurementOverhead(param_t params) {
long long int start, stop;
uint64_t sum;
start = clock64();
for(int j = 0; j < params.buffer_length; j++){
sum += j;
}
stop = clock64();
*params.targetMeasOH = ((unsigned int)(stop-start))/params.buffer_length;
*params.target_realSum = sum;
}
#ifdef USE_SHARED
// Uses 8192 bytes of statically-defined shared memory.
static __device__ uint32_t UseSharedMemory(void) {
__shared__ uint32_t shared_mem_arr[SHARED_SIZE];
uint32_t num_threads, elts_per_thread, i;
num_threads = blockDim.x;
elts_per_thread = SHARED_SIZE / num_threads;
for (i = 0; i < elts_per_thread; i++) {
shared_mem_arr[threadIdx.x * elts_per_thread + i] = threadIdx.x;
}
return shared_mem_arr[threadIdx.x * elts_per_thread];
}
/ spin_duration nanoseconds have elapsed.
static __global__ void spinSHM(uint64_t spin_duration) {
uint32_t shared_mem_res;
uint64_t start_time = GlobalTimer64();
shared_mem_res = UseSharedMemory();
while ((GlobalTimer64() - start_time) < spin_duration) {
continue;
}
}
#endif
static __global__ void randomWalk(param_t params) {
uint32_t current;
long long int time_start, time_end;
unsigned int time_acc;
uint64_t sum;
unsigned int oh = *params.targetMeasOH;
if (blockIdx.x != 0) return;
if (threadIdx.x != 0) return;
#ifdef USE_SHARED
uint32_t shared_mem_res;
shared_mem_res = UseSharedMemory();
#endif
// Warm up data cache
for(int i = 0; i < params.buffer_length; i++){
sum += params.targetBuffer[i];
}
// Run experiment multiple times. First iteration (-1) is to warm up icache
for (int i = -1; i < params.nof_repetitions; i++){
sum = 0;
time_acc = 0;
current = 0;
time_start = clock64();
for(int j = 0; j < params.buffer_length; j++){
current = params.targetBuffer[current];
sum += current;
}
time_end = clock64();
time_acc = (unsigned int) time_end - time_start;
*params.target_realSum = sum;
// Do not write time for warm up iteration
if (i>=0){
// Write element access time with measurement overhead
params.target_times[i] = (time_acc/(params.buffer_length))-oh;
}
}
}
static int initializeTest(param_t *params){
//allocate buffer
params->hostBuffer = NULL;
params->hostBuffer = (uint32_t *) malloc(params->buffer_length*sizeof(uint32_t));
if (!params->hostBuffer) {
perror("Failed allocating host buffer: ");
return -1;
}
if (createShuffledArray(params->hostBuffer, params->buffer_length) != 0) return EXIT_FAILURE;
//allocate device random buffer
if (CheckCUDAError(hipMalloc(¶ms->targetBuffer, \
params->buffer_length*sizeof(uint32_t)))) return -1;
if (CheckCUDAError(hipMemcpy(params->targetBuffer, \
params->hostBuffer, \
params->buffer_length*sizeof(uint32_t), \
hipMemcpyHostToDevice))) return -1;
//allocate device times
if (CheckCUDAError(hipMalloc(¶ms->target_times, \
params->nof_repetitions*sizeof(clock_t)))) return -1;
// Allocate device accumulator
if (CheckCUDAError(hipMalloc(¶ms->target_realSum, \
sizeof(uint64_t)))) return -1;
// Allocate device measOH
if (CheckCUDAError(hipMalloc(¶ms->targetMeasOH, \
sizeof(uint32_t)))) return -1;
//allocate host times
params->host_times = NULL;
params->host_times = (clock_t *) malloc(params->nof_repetitions*sizeof(clock_t));
if (!params->host_times) {
perror("Failed allocating host_times buffer: ");
return -1;
}
memset(params->host_times,0, params->nof_repetitions*sizeof(clock_t));
return 0;
}
static int runTest(param_t *params){
hipProfilerStart();
hipLaunchKernelGGL(( getMeasurementOverhead), dim3(1),dim3(1), 0, 0, *params);
#ifdef USE_SHARED
hipStream_t stream[5];
for (int i = 0; i < 5; ++i)
hipStreamCreate(&stream[i]);
// Get measurement overhead
if (CheckCUDAError(hipDeviceSynchronize())) return -1;
// Launch kernel
hipLaunchKernelGGL(( spinSHM), dim3(1),dim3(1),0,stream[1], SPIN_DURATION);
hipLaunchKernelGGL(( spinSHM), dim3(1),dim3(1),0,stream[2], SPIN_DURATION);
hipLaunchKernelGGL(( spinSHM), dim3(1),dim3(1),0,stream[3], SPIN_DURATION);
//spinSHM<<<1,1,0,stream[4]>>>(SPIN_DURATION);
hipLaunchKernelGGL(( randomWalk), dim3(1),dim3(1),0,stream[0], *params);
#else
hipLaunchKernelGGL(( randomWalk), dim3(1),dim3(1), 0, 0, *params);
#endif
// Synchronize with device
if (CheckCUDAError(hipDeviceSynchronize())) return -1;
#ifdef USE_SHARED
for (int i = 0; i < 5; ++i)
hipStreamDestroy(stream[i]);
#endif
// Copyback times
if (CheckCUDAError(hipMemcpy(params->host_times, \
params->target_times, \
params->nof_repetitions*sizeof(clock_t), \
hipMemcpyDeviceToHost))) return -1;
// Copyback sum
params->host_realSum=0;
if (CheckCUDAError(hipMemcpy(¶ms->host_realSum, \
params->target_realSum, \
sizeof(uint64_t), \
hipMemcpyDeviceToHost))) return -1;
// Copyback target meas oh
params->hostMeasOH=0;
if (CheckCUDAError(hipMemcpy(¶ms->hostMeasOH, \
params->targetMeasOH, \
sizeof(uint32_t), \
hipMemcpyDeviceToHost))) return -1;
hipProfilerStop();
return 0;
}
static int writeResults(param_t *params){
if (fprintf(params->fd,"{\n") < 0 ) return -1;
// Write device info
hipDeviceProp_t deviceProp;
if (CheckCUDAError(hipGetDeviceProperties(&deviceProp, DEVICE_NUMBER))) return -1;
int driverVersion = 0;
if (CheckCUDAError(hipDriverGetVersion(&driverVersion))) return -1;
int runtimeVersion = 0;
if (CheckCUDAError(hipRuntimeGetVersion(&runtimeVersion))) return -1;
if (fprintf(params->fd,"\"driverVer\": \"%d\",\n", driverVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"runTimeVer\": \"%d\",\n", runtimeVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"clockRate\": \"%d\",\n", deviceProp.clockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"globalL1CacheSupported\": \"%d\",\n", deviceProp.globalL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"localL1CacheSupported\": \"%d\",\n", deviceProp.localL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"l2CacheSize\": \"%d\",\n", deviceProp.l2CacheSize) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryBusWidth\": \"%d\",\n", deviceProp.memoryBusWidth) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryClockRate\": \"%d\",\n", deviceProp.memoryClockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"multiProcessorCount\": \"%d\",\n", deviceProp.multiProcessorCount) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerBlock\": \"%d\",\n", deviceProp.regsPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerMultiprocessor\": \"%d\",\n", deviceProp.regsPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerBlock\": \"%zu\",\n", deviceProp.sharedMemPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerMultiprocessor\": \"%zu\",\n", deviceProp.sharedMemPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"warpSize\": \"%d\",\n", deviceProp.warpSize) < 0 ) return -1;
hipFuncCache_t config;
if (CheckCUDAError(hipDeviceGetCacheConfig ( &config ) )) return -1;
if (fprintf(params->fd,"\"cacheConfig\": \"%d\",\n", config) < 0 ) return -1;
// Write header
if (fprintf(params->fd,"\"nofThreads\": \"%u\",\n", params->nofThreads) < 0 ) return -1;
if (fprintf(params->fd,"\"nofBlocks\": \"%u\",\n", params->nofBlocks) < 0 ) return -1;
if (fprintf(params->fd,"\"nof_repetitions\": \"%d\",\n", params->nof_repetitions) < 0 ) return -1;
if (fprintf(params->fd,"\"data_size\": \"%zu\",\n", params->data_size) < 0 ) return -1;
if (fprintf(params->fd,"\"buffer_length\": \"%zu\",\n", params->buffer_length) < 0 ) return -1;
if (fprintf(params->fd,"\"real_sum\": \"%llu\",\n", (unsigned long long)params->host_realSum) < 0 ) return -1;
if (fprintf(params->fd,"\"exp_sum\": \"%lu\",\n", ((params->buffer_length-1)*params->buffer_length)/2) < 0 ) return -1;
if (fprintf(params->fd,"\"measOH\": \"%u\",\n", params->hostMeasOH) < 0 ) return -1;
// Write times
if (fprintf(params->fd,"\"times\":[\n") < 0 ) return -1;
for (int32_t i = 0; i < params->nof_repetitions-1; i++){
if (fprintf(params->fd,"\"%Lf\",\n",(long double)params->host_times[i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%Lf\"]\n}", (long double)params->host_times[params->nof_repetitions-1]) < 0 ) return -1;
if (fclose(params->fd) < 0) return -1;
return 0;
}
static int cleanUp(param_t *params){
// Free target buffers
hipFree(params->targetBuffer);
hipFree(params->target_times);
hipFree(params->target_realSum);
hipFree(params->targetMeasOH);
// Free host buffers
free(params->hostBuffer);
free(params->host_times);
return 0;
}
static void PrintUsage(const char *name) {
printf("Usage: %s <# of intervals> <size in KB> <cache mode>"
"<output JSON file name>\n", name);
}
int main(int argc, char **argv) {
if (argc != 5) {
PrintUsage(argv[0]);
return 1;
}
param_t params;
// Parse input parameter
int nof_repetitions = atoi(argv[1]);
if (nof_repetitions <= 0) {
printf("More than 0 repetitions need to be used. Got %s repetitions\n", argv[2]);
return EXIT_FAILURE;
}
int data_size = atoi(argv[2]);
if (data_size <= 0) {
printf("The buffer must be 1 or more KB. Got %s KB\n", argv[3]);
return EXIT_FAILURE;
}
/*
https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g6c9cc78ca80490386cf593b4baa35a15
hipFuncCachePreferNone: no preference for shared memory or L1 (default)
hipFuncCachePreferShared: prefer larger shared memory and smaller L1 cache
hipFuncCachePreferL1: prefer larger L1 cache and smaller shared memory
hipFuncCachePreferEqual: prefer equal size L1 cache and shared memory
*/
hipFuncCache_t cacheMode = (hipFuncCache_t)atoi(argv[3]);
if (cacheMode < hipFuncCachePreferNone || cacheMode > hipFuncCachePreferEqual) {
printf("cacheMode must be between 0 and 3. Got %s\n", argv[3]);
return EXIT_FAILURE;
}
params.nof_repetitions = nof_repetitions;
params.data_size = data_size*1024;
params.buffer_length = data_size*1024/sizeof(uint32_t);
params.nofBlocks = 1;
params.nofThreads = 1;
params.fd = NULL;
params.fd = fopen(argv[4],"w");
if (params.fd == NULL) {
perror("Error opening output file:");
return EXIT_FAILURE;
}
// Set CUDA device
if (CheckCUDAError(hipSetDevice(DEVICE_NUMBER))) {
return EXIT_FAILURE;
}
// Set cache mode
if (CheckCUDAError(hipDeviceSetCacheConfig(cacheMode))) {
return EXIT_FAILURE;
}
if (CheckCUDAError(hipFuncSetCacheConfig(randomWalk, cacheMode))) {
return EXIT_FAILURE;
}
// Initialize parameters
if (initializeTest(¶ms) < 0) return EXIT_FAILURE;
// Run test
if (runTest(¶ms) < 0) return EXIT_FAILURE;
// Write results
if (writeResults(¶ms) < 0){
perror("Error while writing outpufile: ");
return EXIT_FAILURE;
}
// Clean up
if (cleanUp(¶ms) < 0) return EXIT_FAILURE;
printf("Finished testrun\n");
hipDeviceReset();
return 0;
}
| 4325425cb4503432569176bf053eb185abe9ab0b.cu | #include <errno.h>
#include <error.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#define DEVICE_NUMBER (0)
//#define USE_SHARED
#define SHARED_SIZE (8192) // 32kbytes with int
#define SPIN_DURATION (500000000)
typedef struct {
uint32_t nofThreads;
uint32_t nofBlocks;
int32_t nof_repetitions;
size_t data_size;
size_t buffer_length;
uint32_t *targetMeasOH;
uint32_t hostMeasOH;
uint32_t *hostBuffer;
uint32_t *targetBuffer;
uint64_t *target_realSum;
uint64_t host_realSum;
clock_t *target_times;
clock_t *host_times;
FILE *fd;
} param_t;
// Prints a message and returns zero if the given value is not cudaSuccess
#define CheckCUDAError(val) (InternalCheckCUDAError((val), #val, __FILE__, __LINE__))
// Called internally by CheckCUDAError
static int InternalCheckCUDAError(cudaError_t result, const char *fn,
const char *file, int line) {
if (result == cudaSuccess) return 0;
printf("CUDA error %d in %s, line %d (%s): %s\n", (int) result, file, line,
fn, cudaGetErrorString(result));
return -1;
}
#ifdef USE_SHARED
static __device__ inline uint64_t GlobalTimer64(void) {
volatile uint64_t val;
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(val));
return first_reading;
}
#endif
/*! \brief Create random 32bit number
* \return returns random 32bit number
* Uses rand() function to create a random 32 bit number using two calls
*/
static uint32_t random32(void){
return (rand() ^ (rand() << 15));
}
/*! \brief Create a randomized array for random walks
* \param buffer Pointer to allocated memory segment
* \param nofElem Number of elements in array
* \return returns error
*/
static int createShuffledArray(uint32_t * buffer, size_t nofElem){
// Seed random
srand(time(0));
// Link sequentially
for(uint32_t i = 0; i< nofElem-1; i++){
buffer[i] = i+1;
}
buffer[nofElem-1] = 0;
// Shuffle array
for (uint32_t i = 0; i<nofElem;i++){
uint32_t rndi, tmp1, tmp2, tmp3;
rndi = random32()%nofElem;
if (rndi == i) continue;
tmp1 = buffer[i];
tmp2 = buffer[rndi];
tmp3 = buffer[tmp2];
if (i== tmp2) continue;
// Reassign links
buffer[i] = tmp2;
buffer[rndi] = tmp3;
buffer[tmp2] = tmp1;
}
return 0;
}
static __global__ void getMeasurementOverhead(param_t params) {
long long int start, stop;
uint64_t sum;
start = clock64();
for(int j = 0; j < params.buffer_length; j++){
sum += j;
}
stop = clock64();
*params.targetMeasOH = ((unsigned int)(stop-start))/params.buffer_length;
*params.target_realSum = sum;
}
#ifdef USE_SHARED
// Uses 8192 bytes of statically-defined shared memory.
static __device__ uint32_t UseSharedMemory(void) {
__shared__ uint32_t shared_mem_arr[SHARED_SIZE];
uint32_t num_threads, elts_per_thread, i;
num_threads = blockDim.x;
elts_per_thread = SHARED_SIZE / num_threads;
for (i = 0; i < elts_per_thread; i++) {
shared_mem_arr[threadIdx.x * elts_per_thread + i] = threadIdx.x;
}
return shared_mem_arr[threadIdx.x * elts_per_thread];
}
/ spin_duration nanoseconds have elapsed.
static __global__ void spinSHM(uint64_t spin_duration) {
uint32_t shared_mem_res;
uint64_t start_time = GlobalTimer64();
shared_mem_res = UseSharedMemory();
while ((GlobalTimer64() - start_time) < spin_duration) {
continue;
}
}
#endif
static __global__ void randomWalk(param_t params) {
uint32_t current;
long long int time_start, time_end;
unsigned int time_acc;
uint64_t sum;
unsigned int oh = *params.targetMeasOH;
if (blockIdx.x != 0) return;
if (threadIdx.x != 0) return;
#ifdef USE_SHARED
uint32_t shared_mem_res;
shared_mem_res = UseSharedMemory();
#endif
// Warm up data cache
for(int i = 0; i < params.buffer_length; i++){
sum += params.targetBuffer[i];
}
// Run experiment multiple times. First iteration (-1) is to warm up icache
for (int i = -1; i < params.nof_repetitions; i++){
sum = 0;
time_acc = 0;
current = 0;
time_start = clock64();
for(int j = 0; j < params.buffer_length; j++){
current = params.targetBuffer[current];
sum += current;
}
time_end = clock64();
time_acc = (unsigned int) time_end - time_start;
*params.target_realSum = sum;
// Do not write time for warm up iteration
if (i>=0){
// Write element access time with measurement overhead
params.target_times[i] = (time_acc/(params.buffer_length))-oh;
}
}
}
static int initializeTest(param_t *params){
//allocate buffer
params->hostBuffer = NULL;
params->hostBuffer = (uint32_t *) malloc(params->buffer_length*sizeof(uint32_t));
if (!params->hostBuffer) {
perror("Failed allocating host buffer: ");
return -1;
}
if (createShuffledArray(params->hostBuffer, params->buffer_length) != 0) return EXIT_FAILURE;
//allocate device random buffer
if (CheckCUDAError(cudaMalloc(¶ms->targetBuffer, \
params->buffer_length*sizeof(uint32_t)))) return -1;
if (CheckCUDAError(cudaMemcpy(params->targetBuffer, \
params->hostBuffer, \
params->buffer_length*sizeof(uint32_t), \
cudaMemcpyHostToDevice))) return -1;
//allocate device times
if (CheckCUDAError(cudaMalloc(¶ms->target_times, \
params->nof_repetitions*sizeof(clock_t)))) return -1;
// Allocate device accumulator
if (CheckCUDAError(cudaMalloc(¶ms->target_realSum, \
sizeof(uint64_t)))) return -1;
// Allocate device measOH
if (CheckCUDAError(cudaMalloc(¶ms->targetMeasOH, \
sizeof(uint32_t)))) return -1;
//allocate host times
params->host_times = NULL;
params->host_times = (clock_t *) malloc(params->nof_repetitions*sizeof(clock_t));
if (!params->host_times) {
perror("Failed allocating host_times buffer: ");
return -1;
}
memset(params->host_times,0, params->nof_repetitions*sizeof(clock_t));
return 0;
}
static int runTest(param_t *params){
cudaProfilerStart();
getMeasurementOverhead<<<1,1>>>(*params);
#ifdef USE_SHARED
cudaStream_t stream[5];
for (int i = 0; i < 5; ++i)
cudaStreamCreate(&stream[i]);
// Get measurement overhead
if (CheckCUDAError(cudaDeviceSynchronize())) return -1;
// Launch kernel
spinSHM<<<1,1,0,stream[1]>>>(SPIN_DURATION);
spinSHM<<<1,1,0,stream[2]>>>(SPIN_DURATION);
spinSHM<<<1,1,0,stream[3]>>>(SPIN_DURATION);
//spinSHM<<<1,1,0,stream[4]>>>(SPIN_DURATION);
randomWalk<<<1,1,0,stream[0]>>>(*params);
#else
randomWalk<<<1,1>>>(*params);
#endif
// Synchronize with device
if (CheckCUDAError(cudaDeviceSynchronize())) return -1;
#ifdef USE_SHARED
for (int i = 0; i < 5; ++i)
cudaStreamDestroy(stream[i]);
#endif
// Copyback times
if (CheckCUDAError(cudaMemcpy(params->host_times, \
params->target_times, \
params->nof_repetitions*sizeof(clock_t), \
cudaMemcpyDeviceToHost))) return -1;
// Copyback sum
params->host_realSum=0;
if (CheckCUDAError(cudaMemcpy(¶ms->host_realSum, \
params->target_realSum, \
sizeof(uint64_t), \
cudaMemcpyDeviceToHost))) return -1;
// Copyback target meas oh
params->hostMeasOH=0;
if (CheckCUDAError(cudaMemcpy(¶ms->hostMeasOH, \
params->targetMeasOH, \
sizeof(uint32_t), \
cudaMemcpyDeviceToHost))) return -1;
cudaProfilerStop();
return 0;
}
static int writeResults(param_t *params){
if (fprintf(params->fd,"{\n") < 0 ) return -1;
// Write device info
cudaDeviceProp deviceProp;
if (CheckCUDAError(cudaGetDeviceProperties(&deviceProp, DEVICE_NUMBER))) return -1;
int driverVersion = 0;
if (CheckCUDAError(cudaDriverGetVersion(&driverVersion))) return -1;
int runtimeVersion = 0;
if (CheckCUDAError(cudaRuntimeGetVersion(&runtimeVersion))) return -1;
if (fprintf(params->fd,"\"driverVer\": \"%d\",\n", driverVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"runTimeVer\": \"%d\",\n", runtimeVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"clockRate\": \"%d\",\n", deviceProp.clockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"globalL1CacheSupported\": \"%d\",\n", deviceProp.globalL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"localL1CacheSupported\": \"%d\",\n", deviceProp.localL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"l2CacheSize\": \"%d\",\n", deviceProp.l2CacheSize) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryBusWidth\": \"%d\",\n", deviceProp.memoryBusWidth) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryClockRate\": \"%d\",\n", deviceProp.memoryClockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"multiProcessorCount\": \"%d\",\n", deviceProp.multiProcessorCount) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerBlock\": \"%d\",\n", deviceProp.regsPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerMultiprocessor\": \"%d\",\n", deviceProp.regsPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerBlock\": \"%zu\",\n", deviceProp.sharedMemPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerMultiprocessor\": \"%zu\",\n", deviceProp.sharedMemPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"warpSize\": \"%d\",\n", deviceProp.warpSize) < 0 ) return -1;
cudaFuncCache config;
if (CheckCUDAError(cudaDeviceGetCacheConfig ( &config ) )) return -1;
if (fprintf(params->fd,"\"cacheConfig\": \"%d\",\n", config) < 0 ) return -1;
// Write header
if (fprintf(params->fd,"\"nofThreads\": \"%u\",\n", params->nofThreads) < 0 ) return -1;
if (fprintf(params->fd,"\"nofBlocks\": \"%u\",\n", params->nofBlocks) < 0 ) return -1;
if (fprintf(params->fd,"\"nof_repetitions\": \"%d\",\n", params->nof_repetitions) < 0 ) return -1;
if (fprintf(params->fd,"\"data_size\": \"%zu\",\n", params->data_size) < 0 ) return -1;
if (fprintf(params->fd,"\"buffer_length\": \"%zu\",\n", params->buffer_length) < 0 ) return -1;
if (fprintf(params->fd,"\"real_sum\": \"%llu\",\n", (unsigned long long)params->host_realSum) < 0 ) return -1;
if (fprintf(params->fd,"\"exp_sum\": \"%lu\",\n", ((params->buffer_length-1)*params->buffer_length)/2) < 0 ) return -1;
if (fprintf(params->fd,"\"measOH\": \"%u\",\n", params->hostMeasOH) < 0 ) return -1;
// Write times
if (fprintf(params->fd,"\"times\":[\n") < 0 ) return -1;
for (int32_t i = 0; i < params->nof_repetitions-1; i++){
if (fprintf(params->fd,"\"%Lf\",\n",(long double)params->host_times[i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%Lf\"]\n}", (long double)params->host_times[params->nof_repetitions-1]) < 0 ) return -1;
if (fclose(params->fd) < 0) return -1;
return 0;
}
static int cleanUp(param_t *params){
// Free target buffers
cudaFree(params->targetBuffer);
cudaFree(params->target_times);
cudaFree(params->target_realSum);
cudaFree(params->targetMeasOH);
// Free host buffers
free(params->hostBuffer);
free(params->host_times);
return 0;
}
static void PrintUsage(const char *name) {
printf("Usage: %s <# of intervals> <size in KB> <cache mode>"
"<output JSON file name>\n", name);
}
int main(int argc, char **argv) {
if (argc != 5) {
PrintUsage(argv[0]);
return 1;
}
param_t params;
// Parse input parameter
int nof_repetitions = atoi(argv[1]);
if (nof_repetitions <= 0) {
printf("More than 0 repetitions need to be used. Got %s repetitions\n", argv[2]);
return EXIT_FAILURE;
}
int data_size = atoi(argv[2]);
if (data_size <= 0) {
printf("The buffer must be 1 or more KB. Got %s KB\n", argv[3]);
return EXIT_FAILURE;
}
/*
https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g6c9cc78ca80490386cf593b4baa35a15
cudaFuncCachePreferNone: no preference for shared memory or L1 (default)
cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache
cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory
cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory
*/
cudaFuncCache cacheMode = (cudaFuncCache)atoi(argv[3]);
if (cacheMode < cudaFuncCachePreferNone || cacheMode > cudaFuncCachePreferEqual) {
printf("cacheMode must be between 0 and 3. Got %s\n", argv[3]);
return EXIT_FAILURE;
}
params.nof_repetitions = nof_repetitions;
params.data_size = data_size*1024;
params.buffer_length = data_size*1024/sizeof(uint32_t);
params.nofBlocks = 1;
params.nofThreads = 1;
params.fd = NULL;
params.fd = fopen(argv[4],"w");
if (params.fd == NULL) {
perror("Error opening output file:");
return EXIT_FAILURE;
}
// Set CUDA device
if (CheckCUDAError(cudaSetDevice(DEVICE_NUMBER))) {
return EXIT_FAILURE;
}
// Set cache mode
if (CheckCUDAError(cudaDeviceSetCacheConfig(cacheMode))) {
return EXIT_FAILURE;
}
if (CheckCUDAError(cudaFuncSetCacheConfig(randomWalk, cacheMode))) {
return EXIT_FAILURE;
}
// Initialize parameters
if (initializeTest(¶ms) < 0) return EXIT_FAILURE;
// Run test
if (runTest(¶ms) < 0) return EXIT_FAILURE;
// Write results
if (writeResults(¶ms) < 0){
perror("Error while writing outpufile: ");
return EXIT_FAILURE;
}
// Clean up
if (cleanUp(¶ms) < 0) return EXIT_FAILURE;
printf("Finished testrun\n");
cudaDeviceReset();
return 0;
}
|
991778fe54a64f1a54cda7ea8a50d54188a68846.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "conv1DKernel.h"
#include "conv1DKernelHeaders.cuh"
#define MAX_MASK_SIZE 33
#define TILE_SIZE 512
// Dynamic allocation of constant memory is not allowed in CUDA.
__constant__ double myMask_d[MAX_MASK_SIZE];
// Simple 1D Convolution
__global__ void conv1DKernel_basic(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
double p{0};
if (i < length) {
p = myMask_d[half_mask_size] * input[i];
for (int j = 1; j <= half_mask_size; j++) {
p += myMask_d[half_mask_size + j] *
(((i - j < 0) ? 0 : input[i - j]) +
((i + j >= length) ? 0 : input[i + j]));
}
// Number of global mem accesses per thread : 1 + halfMaskSize*2 + 1 +
// halfMaskSize = 2 + 3*halfMaskSize So global nb of accesses :
// (2+3*halfMaskSize)*n
output[i] = p;
}
}
// Tiled 1D Convolution
__global__ void conv1DKernel_tiled(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
int relative_i = i - half_mask_size;
__shared__ double input_shared[TILE_SIZE + (MAX_MASK_SIZE / 2) * 2];
if (i < length) {
input_shared[tid] = relative_i < 0 ? 0 : input[relative_i];
// Fill the remaining 2*half_mask_size (at the end) using first 2*half_size
// elements
if (tid < 2 * half_mask_size) {
// temp is the size of the input portion within the block
// it's TILE_SIZE (blockDim.x) except for the 'last' portion of the
// input array which will be <=TILE_SIZE
int temp = blockDim.x * (blockIdx.x + 1) < length
? TILE_SIZE
: length - blockDim.x * blockIdx.x;
input_shared[temp + tid] =
temp + tid + blockIdx.x * blockDim.x - half_mask_size > length - 1
? 0
: input[temp + tid + blockIdx.x * blockDim.x - half_mask_size];
}
}
__syncthreads();
double p{0};
if (i < length) {
p = myMask_d[half_mask_size] * input_shared[tid + half_mask_size];
for (int j = 1; j < half_mask_size + 1; j++) {
p += myMask_d[half_mask_size + j] *
(input_shared[tid + half_mask_size - j] +
input_shared[tid + half_mask_size + j]);
}
output[i] = p;
}
}
// Simply Tiled 1D Convolution
// The idea is to load only internal cells per TILE in the scratch memory
// Ghost values can be accessed directly from input which is HOPEFULLY still
// IN THE L2 CACHE (not the DRAM ofc)
__global__ void conv1DKernel_simply_tiled(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
__shared__ double input_shared[TILE_SIZE];
if (i < length) {
input_shared[tid] = input[i];
}
__syncthreads();
double p{0};
if (i < length) {
int temp = blockDim.x * (blockIdx.x + 1) < length
? TILE_SIZE
: length - blockDim.x * blockIdx.x;
p = myMask_d[half_mask_size] * input_shared[tid];
for (int j = 1; j < half_mask_size + 1; j++) {
if (tid > half_mask_size) {
if (tid < temp - half_mask_size) {
p += myMask_d[half_mask_size + j] *
(input_shared[tid - j] + input_shared[tid + j]);
} else {
p += myMask_d[half_mask_size + j] *
(input_shared[tid - j] + ((i + j) >= length ? 0 : input[i + j]));
}
} else {
p += myMask_d[half_mask_size + j] *
(input_shared[tid + j] + ((i - j) < 0 ? 0 : input[i - j]));
}
}
output[i] = p;
}
}
// Tiled 1D Convolution with dynamic shared memory
__global__ void conv1DKernel_tiled_dynamic_shared(const double *input,
double *output, int length,
int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
int relative_i = i - half_mask_size;
extern __shared__ double input_shared[];
if (i < length) {
input_shared[tid] = relative_i < 0 ? 0 : input[relative_i];
// Fill the remaining 2*half_mask_size (at the end) using first 2*half_size
// elements
if (tid < 2 * half_mask_size) {
// temp is the size of the input portion within the block
// it's TILE_SIZE (blockDim.x) except for the 'last' portion of the
// input array which will be <=TILE_SIZE
int temp = blockDim.x * (blockIdx.x + 1) < length
? TILE_SIZE
: length - blockDim.x * blockIdx.x;
input_shared[temp + tid] =
temp + tid + blockIdx.x * blockDim.x - half_mask_size > length - 1
? 0
: input[temp + tid + blockIdx.x * blockDim.x - half_mask_size];
}
}
__syncthreads();
double p{0};
if (i < length) {
p = myMask_d[half_mask_size] * input_shared[tid + half_mask_size];
for (int j = 1; j < half_mask_size + 1; j++) {
p += myMask_d[half_mask_size + j] *
(input_shared[tid + half_mask_size - j] +
input_shared[tid + half_mask_size + j]);
}
output[i] = p;
}
}
// Nested Loop implementation (More Work per thread) 1D Convolution
__global__ void conv1DKernel_Loop(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
double p = 0;
while (i < length) {
p = myMask_d[half_mask_size] * input[i];
for (int j = 1; j <= half_mask_size; j++) {
p += myMask_d[half_mask_size + j] *
(((i - j < 0) ? 0 : input[i - j]) +
((i + j >= length) ? 0 : input[i + j]));
}
output[i] = p;
i += gridDim.x * blockDim.x;
}
}
//=======================================================================================
//=======================================================================================
//=================================== WRAPPERS
//==========================================
//=======================================================================================
//=======================================================================================
// Sequential Implementation
void conv1DSequentialLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int length) {
for (size_t i = 0; i < length; i++) {
output[i] = input[i] * myMask[half_mask_size];
for (size_t j = 1; j <= half_mask_size; j++) {
output[i] += ((((int)i - (int)j) < 0 ? 0 : input[i - j]) +
((i + j) >= length ? 0 : input[i + j])) *
myMask[half_mask_size + j];
}
}
}
// Wrapper arround loop 1D conv kernel
void conv1DKernelLoopLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// hipError_t cudaStatus;
hipMalloc((void **)&input_d, (int)(N * sizeof(double)));
hipMalloc((void **)&output_d, (int)(N * sizeof(double)));
hipMemcpy(input_d, input, N * sizeof(double), hipMemcpyHostToDevice);
// Cte memory copy
hipMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
hipSetDevice(0);
hipDeviceProp_t cudaProp;
hipGetDeviceProperties(&cudaProp, 0);
int nbMultiProcess = cudaProp.multiProcessorCount;
dim3 gridDim(100 * nbMultiProcess, 1, 1);
// Computing kernel execution time (memcopy omitted for now)
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
// Kernel Starts Here
hipLaunchKernelGGL(( conv1DKernel_Loop), dim3(gridDim), dim3(blockDim), 0, 0, input_d, output_d, N,
half_mask_size);
printf("Loop kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
hipEventRecord(end);
hipMemcpy(output, output_d, N * sizeof(double), hipMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
hipEventSynchronize(end);
hipEventElapsedTime(time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(input_d);
hipFree(output_d);
}
// Wrapper arround basic 1D conv kernel
void conv1DKernelBasicLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// hipError_t cudaStatus;
hipMalloc((void **)&input_d, (int)(N * sizeof(double)));
hipMalloc((void **)&output_d, (int)(N * sizeof(double)));
hipMemcpy(input_d, input, N * sizeof(double), hipMemcpyHostToDevice);
// Cte memory copy
hipMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
// Kernel Starts Here
hipLaunchKernelGGL(( conv1DKernel_basic), dim3(gridDim), dim3(blockDim), 0, 0, input_d, output_d, N,
half_mask_size);
printf("Basic kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
hipEventRecord(end);
hipMemcpy(output, output_d, N * sizeof(double), hipMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
hipEventSynchronize(end);
hipEventElapsedTime(time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(input_d);
hipFree(output_d);
}
// Wrapper arround tiled 1D conv kernel
void conv1DKernelTiledLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// hipError_t cudaStatus;
hipMalloc((void **)&input_d, (int)(N * sizeof(double)));
hipMalloc((void **)&output_d, (int)(N * sizeof(double)));
hipMemcpy(input_d, input, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
// Kernel Starts Here
hipLaunchKernelGGL(( conv1DKernel_tiled), dim3(gridDim), dim3(blockDim), 0, 0, input_d, output_d, N,
half_mask_size);
printf("Tiled kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
hipEventRecord(end);
hipMemcpy(output, output_d, N * sizeof(double), hipMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
hipEventSynchronize(end);
hipEventElapsedTime(time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(input_d);
hipFree(output_d);
}
// Wrapper arround simplified tiled 1D conv kernel
void conv1DKernelSimplyTiledLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// hipError_t cudaStatus;
hipMalloc((void **)&input_d, (int)(N * sizeof(double)));
hipMalloc((void **)&output_d, (int)(N * sizeof(double)));
hipMemcpy(input_d, input, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
// Kernel Starts Here
hipLaunchKernelGGL(( conv1DKernel_simply_tiled), dim3(gridDim), dim3(blockDim), 0, 0, input_d, output_d, N,
half_mask_size);
printf("Simply Tiled kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
hipEventRecord(end);
hipMemcpy(output, output_d, N * sizeof(double), hipMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
hipEventSynchronize(end);
hipEventElapsedTime(time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(input_d);
hipFree(output_d);
}
// Wrapper arround tiled 1D conv kernel with dynamic shared memory
void conv1DKernelTiledDynamicSharedLauncher(const double *input, double *output,
double *myMask, int half_mask_size,
int N, float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// hipError_t cudaStatus;
hipMalloc((void **)&input_d, (int)(N * sizeof(double)));
hipMalloc((void **)&output_d, (int)(N * sizeof(double)));
hipMemcpy(input_d, input, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
// Kernel Starts Here
// Here hal_mask_size can be decided at runtime so shared mem allocation is
// dynamic
hipLaunchKernelGGL(( conv1DKernel_tiled_dynamic_shared),
dim3(gridDim), dim3(blockDim), (TILE_SIZE + 2 * half_mask_size) * sizeof(double), 0,
input_d, output_d, N, half_mask_size);
printf("Tiled kernel with dynamic Shared memory Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
hipEventRecord(end);
hipMemcpy(output, output_d, N * sizeof(double), hipMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
hipEventSynchronize(end);
hipEventElapsedTime(time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(input_d);
hipFree(output_d);
} | 991778fe54a64f1a54cda7ea8a50d54188a68846.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include "conv1DKernel.h"
#include "conv1DKernelHeaders.cuh"
#define MAX_MASK_SIZE 33
#define TILE_SIZE 512
// Dynamic allocation of constant memory is not allowed in CUDA.
__constant__ double myMask_d[MAX_MASK_SIZE];
// Simple 1D Convolution
__global__ void conv1DKernel_basic(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
double p{0};
if (i < length) {
p = myMask_d[half_mask_size] * input[i];
for (int j = 1; j <= half_mask_size; j++) {
p += myMask_d[half_mask_size + j] *
(((i - j < 0) ? 0 : input[i - j]) +
((i + j >= length) ? 0 : input[i + j]));
}
// Number of global mem accesses per thread : 1 + halfMaskSize*2 + 1 +
// halfMaskSize = 2 + 3*halfMaskSize So global nb of accesses :
// (2+3*halfMaskSize)*n
output[i] = p;
}
}
// Tiled 1D Convolution
__global__ void conv1DKernel_tiled(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
int relative_i = i - half_mask_size;
__shared__ double input_shared[TILE_SIZE + (MAX_MASK_SIZE / 2) * 2];
if (i < length) {
input_shared[tid] = relative_i < 0 ? 0 : input[relative_i];
// Fill the remaining 2*half_mask_size (at the end) using first 2*half_size
// elements
if (tid < 2 * half_mask_size) {
// temp is the size of the input portion within the block
// it's TILE_SIZE (blockDim.x) except for the 'last' portion of the
// input array which will be <=TILE_SIZE
int temp = blockDim.x * (blockIdx.x + 1) < length
? TILE_SIZE
: length - blockDim.x * blockIdx.x;
input_shared[temp + tid] =
temp + tid + blockIdx.x * blockDim.x - half_mask_size > length - 1
? 0
: input[temp + tid + blockIdx.x * blockDim.x - half_mask_size];
}
}
__syncthreads();
double p{0};
if (i < length) {
p = myMask_d[half_mask_size] * input_shared[tid + half_mask_size];
for (int j = 1; j < half_mask_size + 1; j++) {
p += myMask_d[half_mask_size + j] *
(input_shared[tid + half_mask_size - j] +
input_shared[tid + half_mask_size + j]);
}
output[i] = p;
}
}
// Simply Tiled 1D Convolution
// The idea is to load only internal cells per TILE in the scratch memory
// Ghost values can be accessed directly from input which is HOPEFULLY still
// IN THE L2 CACHE (not the DRAM ofc)
__global__ void conv1DKernel_simply_tiled(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
__shared__ double input_shared[TILE_SIZE];
if (i < length) {
input_shared[tid] = input[i];
}
__syncthreads();
double p{0};
if (i < length) {
int temp = blockDim.x * (blockIdx.x + 1) < length
? TILE_SIZE
: length - blockDim.x * blockIdx.x;
p = myMask_d[half_mask_size] * input_shared[tid];
for (int j = 1; j < half_mask_size + 1; j++) {
if (tid > half_mask_size) {
if (tid < temp - half_mask_size) {
p += myMask_d[half_mask_size + j] *
(input_shared[tid - j] + input_shared[tid + j]);
} else {
p += myMask_d[half_mask_size + j] *
(input_shared[tid - j] + ((i + j) >= length ? 0 : input[i + j]));
}
} else {
p += myMask_d[half_mask_size + j] *
(input_shared[tid + j] + ((i - j) < 0 ? 0 : input[i - j]));
}
}
output[i] = p;
}
}
// Tiled 1D Convolution with dynamic shared memory
__global__ void conv1DKernel_tiled_dynamic_shared(const double *input,
double *output, int length,
int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
int relative_i = i - half_mask_size;
extern __shared__ double input_shared[];
if (i < length) {
input_shared[tid] = relative_i < 0 ? 0 : input[relative_i];
// Fill the remaining 2*half_mask_size (at the end) using first 2*half_size
// elements
if (tid < 2 * half_mask_size) {
// temp is the size of the input portion within the block
// it's TILE_SIZE (blockDim.x) except for the 'last' portion of the
// input array which will be <=TILE_SIZE
int temp = blockDim.x * (blockIdx.x + 1) < length
? TILE_SIZE
: length - blockDim.x * blockIdx.x;
input_shared[temp + tid] =
temp + tid + blockIdx.x * blockDim.x - half_mask_size > length - 1
? 0
: input[temp + tid + blockIdx.x * blockDim.x - half_mask_size];
}
}
__syncthreads();
double p{0};
if (i < length) {
p = myMask_d[half_mask_size] * input_shared[tid + half_mask_size];
for (int j = 1; j < half_mask_size + 1; j++) {
p += myMask_d[half_mask_size + j] *
(input_shared[tid + half_mask_size - j] +
input_shared[tid + half_mask_size + j]);
}
output[i] = p;
}
}
// Nested Loop implementation (More Work per thread) 1D Convolution
__global__ void conv1DKernel_Loop(const double *input, double *output,
int length, int half_mask_size) {
int tid = threadIdx.x;
int i = tid + blockIdx.x * blockDim.x;
double p = 0;
while (i < length) {
p = myMask_d[half_mask_size] * input[i];
for (int j = 1; j <= half_mask_size; j++) {
p += myMask_d[half_mask_size + j] *
(((i - j < 0) ? 0 : input[i - j]) +
((i + j >= length) ? 0 : input[i + j]));
}
output[i] = p;
i += gridDim.x * blockDim.x;
}
}
//=======================================================================================
//=======================================================================================
//=================================== WRAPPERS
//==========================================
//=======================================================================================
//=======================================================================================
// Sequential Implementation
void conv1DSequentialLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int length) {
for (size_t i = 0; i < length; i++) {
output[i] = input[i] * myMask[half_mask_size];
for (size_t j = 1; j <= half_mask_size; j++) {
output[i] += ((((int)i - (int)j) < 0 ? 0 : input[i - j]) +
((i + j) >= length ? 0 : input[i + j])) *
myMask[half_mask_size + j];
}
}
}
// Wrapper arround loop 1D conv kernel
void conv1DKernelLoopLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// cudaError_t cudaStatus;
cudaMalloc((void **)&input_d, (int)(N * sizeof(double)));
cudaMalloc((void **)&output_d, (int)(N * sizeof(double)));
cudaMemcpy(input_d, input, N * sizeof(double), cudaMemcpyHostToDevice);
// Cte memory copy
cudaMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
cudaSetDevice(0);
cudaDeviceProp cudaProp;
cudaGetDeviceProperties(&cudaProp, 0);
int nbMultiProcess = cudaProp.multiProcessorCount;
dim3 gridDim(100 * nbMultiProcess, 1, 1);
// Computing kernel execution time (memcopy omitted for now)
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
// Kernel Starts Here
conv1DKernel_Loop<<<gridDim, blockDim>>>(input_d, output_d, N,
half_mask_size);
printf("Loop kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
cudaEventRecord(end);
cudaMemcpy(output, output_d, N * sizeof(double), cudaMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
cudaEventSynchronize(end);
cudaEventElapsedTime(time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(input_d);
cudaFree(output_d);
}
// Wrapper arround basic 1D conv kernel
void conv1DKernelBasicLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// cudaError_t cudaStatus;
cudaMalloc((void **)&input_d, (int)(N * sizeof(double)));
cudaMalloc((void **)&output_d, (int)(N * sizeof(double)));
cudaMemcpy(input_d, input, N * sizeof(double), cudaMemcpyHostToDevice);
// Cte memory copy
cudaMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
// Kernel Starts Here
conv1DKernel_basic<<<gridDim, blockDim>>>(input_d, output_d, N,
half_mask_size);
printf("Basic kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
cudaEventRecord(end);
cudaMemcpy(output, output_d, N * sizeof(double), cudaMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
cudaEventSynchronize(end);
cudaEventElapsedTime(time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(input_d);
cudaFree(output_d);
}
// Wrapper arround tiled 1D conv kernel
void conv1DKernelTiledLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// cudaError_t cudaStatus;
cudaMalloc((void **)&input_d, (int)(N * sizeof(double)));
cudaMalloc((void **)&output_d, (int)(N * sizeof(double)));
cudaMemcpy(input_d, input, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
// Kernel Starts Here
conv1DKernel_tiled<<<gridDim, blockDim>>>(input_d, output_d, N,
half_mask_size);
printf("Tiled kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
cudaEventRecord(end);
cudaMemcpy(output, output_d, N * sizeof(double), cudaMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
cudaEventSynchronize(end);
cudaEventElapsedTime(time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(input_d);
cudaFree(output_d);
}
// Wrapper arround simplified tiled 1D conv kernel
void conv1DKernelSimplyTiledLauncher(const double *input, double *output,
double *myMask, int half_mask_size, int N,
float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// cudaError_t cudaStatus;
cudaMalloc((void **)&input_d, (int)(N * sizeof(double)));
cudaMalloc((void **)&output_d, (int)(N * sizeof(double)));
cudaMemcpy(input_d, input, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
// Kernel Starts Here
conv1DKernel_simply_tiled<<<gridDim, blockDim>>>(input_d, output_d, N,
half_mask_size);
printf("Simply Tiled kernel Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
cudaEventRecord(end);
cudaMemcpy(output, output_d, N * sizeof(double), cudaMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
cudaEventSynchronize(end);
cudaEventElapsedTime(time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(input_d);
cudaFree(output_d);
}
// Wrapper arround tiled 1D conv kernel with dynamic shared memory
void conv1DKernelTiledDynamicSharedLauncher(const double *input, double *output,
double *myMask, int half_mask_size,
int N, float *time) {
double *input_d = nullptr;
double *output_d = nullptr;
// cudaError_t cudaStatus;
cudaMalloc((void **)&input_d, (int)(N * sizeof(double)));
cudaMalloc((void **)&output_d, (int)(N * sizeof(double)));
cudaMemcpy(input_d, input, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(myMask_d, myMask, MAX_MASK_SIZE * sizeof(double));
dim3 blockDim(TILE_SIZE, 1, 1);
dim3 gridDim(ceil((float)N / TILE_SIZE), 1, 1);
// Computing kernel execution time (memcopy omitted for now)
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
// Kernel Starts Here
// Here hal_mask_size can be decided at runtime so shared mem allocation is
// dynamic
conv1DKernel_tiled_dynamic_shared<<<
gridDim, blockDim, (TILE_SIZE + 2 * half_mask_size) * sizeof(double)>>>(
input_d, output_d, N, half_mask_size);
printf("Tiled kernel with dynamic Shared memory Launched :\n");
printf("Block Dim = (%d, %d, %d) \n", blockDim.x, blockDim.y, blockDim.z);
printf("Grid Dim = (%d, %d, %d) \n", gridDim.x, gridDim.y, gridDim.z);
// Kernel Ends Here
cudaEventRecord(end);
cudaMemcpy(output, output_d, N * sizeof(double), cudaMemcpyDeviceToHost);
// Writing elapsed time to (float*) time argument
cudaEventSynchronize(end);
cudaEventElapsedTime(time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(input_d);
cudaFree(output_d);
} |
50f81218ed2cb789ee24f2c3f1a5d4df378b722f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include <stdio.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
__global__ void warmingup(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if ((tid / warpSize) % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel1(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel2(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if ((tid / warpSize) % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel3(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0;
bool ipred = (tid % 2 == 0);
if (ipred) {
ia = 100.0f;
}
if (!ipred) {
ib = 100.0f;
}
c[tid] = ia + ib;
}
int main(int argc, char **argv) {
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("%s Using Device %d: %s\n", argv[0], dev, deviceProp.name);
int size = 64;
int blockSize = 64;
if (argc > 1) blockSize = atoi(argv[1]);
if (argc > 2) size = atoi(argv[2]);
printf("Data size %d\n", size);
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution Configure (block %d grid %d)\n", block.x, grid.x);
float *d_C;
size_t nBytes = size * sizeof(float);
hipMalloc((float**)&d_C, nBytes);
double iStart, iElaps;
hipDeviceSynchronize();
iStart = cpuSecond();
hipLaunchKernelGGL(( warmingup), dim3(grid), dim3(block), 0, 0, d_C);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("warmup <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
hipLaunchKernelGGL(( mathKernel1), dim3(grid), dim3(block), 0, 0, d_C);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel1 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
hipLaunchKernelGGL(( mathKernel2), dim3(grid), dim3(block), 0, 0, d_C);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel2 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
hipLaunchKernelGGL(( mathKernel3), dim3(grid), dim3(block), 0, 0, d_C);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel3 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
/*iStart = cpuSecond();
mathKernel4<<<grid, block>>>(d_C);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel4 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);*/
hipFree(d_C);
hipDeviceReset();
return EXIT_SUCCESS;
} | 50f81218ed2cb789ee24f2c3f1a5d4df378b722f.cu | #include <sys/time.h>
#include <stdio.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
__global__ void warmingup(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if ((tid / warpSize) % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel1(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel2(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if ((tid / warpSize) % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel3(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0;
bool ipred = (tid % 2 == 0);
if (ipred) {
ia = 100.0f;
}
if (!ipred) {
ib = 100.0f;
}
c[tid] = ia + ib;
}
int main(int argc, char **argv) {
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%s Using Device %d: %s\n", argv[0], dev, deviceProp.name);
int size = 64;
int blockSize = 64;
if (argc > 1) blockSize = atoi(argv[1]);
if (argc > 2) size = atoi(argv[2]);
printf("Data size %d\n", size);
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution Configure (block %d grid %d)\n", block.x, grid.x);
float *d_C;
size_t nBytes = size * sizeof(float);
cudaMalloc((float**)&d_C, nBytes);
double iStart, iElaps;
cudaDeviceSynchronize();
iStart = cpuSecond();
warmingup<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("warmup <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
mathKernel1<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel1 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
mathKernel2<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel2 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
mathKernel3<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel3 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
/*iStart = cpuSecond();
mathKernel4<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel4 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);*/
cudaFree(d_C);
cudaDeviceReset();
return EXIT_SUCCESS;
} |
5849b744170acd1494b99a8b652f158778670693.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "repeat0.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int outStride0 = 2;
int outStride1 = 2;
int outScalarCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
repeat0), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,outStride0,outStride1,outScalarCount);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
repeat0), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,outStride0,outStride1,outScalarCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
repeat0), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,outStride0,outStride1,outScalarCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5849b744170acd1494b99a8b652f158778670693.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "repeat0.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int outStride0 = 2;
int outStride1 = 2;
int outScalarCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
repeat0<<<gridBlock,threadBlock>>>(in,out,outStride0,outStride1,outScalarCount);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
repeat0<<<gridBlock,threadBlock>>>(in,out,outStride0,outStride1,outScalarCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
repeat0<<<gridBlock,threadBlock>>>(in,out,outStride0,outStride1,outScalarCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
98b57bb11fa0acc4b280d4c1a2d2678914265f8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ void softmax_device_new_api(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i*stride] / temp - largest / temp);
sum += e;
output[i*stride] = e;
}
for (i = 0; i < n; ++i) {
output[i*stride] /= sum;
}
}
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = exp(input[i]/temp - largest/temp);
sum += e;
output[i] = e;
}
for(i = 0; i < n; ++i){
output[i] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g] * spatial;
int boff = b*stride;
softmax_device_new_api(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
} | 98b57bb11fa0acc4b280d4c1a2d2678914265f8f.cu | #include "includes.h"
__device__ void softmax_device_new_api(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i*stride] / temp - largest / temp);
sum += e;
output[i*stride] = e;
}
for (i = 0; i < n; ++i) {
output[i*stride] /= sum;
}
}
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = exp(input[i]/temp - largest/temp);
sum += e;
output[i] = e;
}
for(i = 0; i < n; ++i){
output[i] /= sum;
}
}
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= spatial*batch*groups) return;
int s = id % spatial;
id = id / spatial;
int g = id % groups;
int b = id / groups;
int goff = group_offset[g] * spatial;
int boff = b*stride;
softmax_device_new_api(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
} |
b9e69c125327f5b618203a416fb11b692c706c78.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "multiply.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *left = NULL;
hipMalloc(&left, XSIZE*YSIZE);
float *right = NULL;
hipMalloc(&right, XSIZE*YSIZE);
float *res = NULL;
hipMalloc(&res, XSIZE*YSIZE);
int dim = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, left,right,res,dim);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, left,right,res,dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, left,right,res,dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b9e69c125327f5b618203a416fb11b692c706c78.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "multiply.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *left = NULL;
cudaMalloc(&left, XSIZE*YSIZE);
float *right = NULL;
cudaMalloc(&right, XSIZE*YSIZE);
float *res = NULL;
cudaMalloc(&res, XSIZE*YSIZE);
int dim = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
multiply<<<gridBlock,threadBlock>>>(left,right,res,dim);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
multiply<<<gridBlock,threadBlock>>>(left,right,res,dim);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
multiply<<<gridBlock,threadBlock>>>(left,right,res,dim);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a822c7cabc7a00392dc319a9a08830218b4f69ae.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdexcept>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <thrust/gather.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../util.h"
// takes scattered pointers to custring_view objects and
// initializes a new NVStringsImpl
void NVStrings_init_from_custrings( NVStringsImpl* pImpl, custring_view_array d_strings, unsigned int count )
{
auto execpol = rmm::exec_policy(0);
// get individual sizes
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size());
});
// create output object
char* d_buffer = pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
return; // this is valid
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// finally, copy the strings
custring_view_array d_results = pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = custring_view::create_from(buffer,*dstr);
});
//
}
// create a new instance containing only the strings at the specified positions
// position values can be in any order and can even be repeated
NVStrings* NVStrings::gather( const int* pos, unsigned int elements, bool bdevmem )
{
unsigned int count = size();
if( count==0 || elements==0 || pos==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if( !bdevmem )
{ // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements,0));
CUDA_TRY(hipMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),hipMemcpyHostToDevice))
}
// create working memory
rmm::device_vector<custring_view*> results(elements,nullptr);
auto d_results = results.data().get();
rmm::device_vector<bool> flags(elements,false);
auto d_flags = flags.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
// do the gather
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements,
[d_strings, d_pos, count, d_results, d_flags] __device__(unsigned int idx){
int pos = d_pos[idx];
if( (pos < 0) || (pos >= count) )
d_flags[idx] = true;
else
d_results[idx] = d_strings[pos];
});
// check for invalid position values
if( thrust::count(execpol->on(0), flags.begin(), flags.end(), true) )
{
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
throw std::out_of_range("gather position value out of range");
}
// build resulting instance
NVStrings* rtn = new NVStrings(elements);
NVStrings_init_from_custrings(rtn->pImpl, d_results, elements);
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
return rtn;
}
// create a new instance containing only the strings where the corresponding mask value is true
NVStrings* NVStrings::gather( const bool* mask, bool bdevmem )
{
size_t count = size();
if( count==0 || mask==nullptr )
return new NVStrings(0);
// copy mask array to device memory if necessary
auto execpol = rmm::exec_policy(0);
const bool* d_mask = mask;
if( !bdevmem )
{
d_mask = const_cast<const bool*>(device_alloc<bool>(count,0));
CUDA_TRY(hipMemcpyAsync((void*)d_mask,mask,count*sizeof(mask[0]),hipMemcpyHostToDevice,0))
}
// create list of index positions from the mask array
rmm::device_vector<int> indexes(count);
auto d_indexes = indexes.data().get();
auto d_indexes_end = thrust::copy_if(execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count),
d_indexes, [d_mask] __device__ (int idx) { return d_mask[idx]; });
// done with the mask
if( !bdevmem )
RMM_FREE((void*)d_mask,0);
count = d_indexes_end - d_indexes;
return gather( d_indexes, count, true );
}
//
// s1 = ['a','b,'c','d']
// s2 = ['e','f']
// pos = [1,3] -- must be the same length as s2
// s3 = s1.scatter(s2,pos)
// ['a','e','c','f']
//
NVStrings* NVStrings::scatter( NVStrings& strs, const int* pos, bool bdevmem )
{
unsigned int count = size();
unsigned int elements = strs.size();
if( pos==0 )
throw std::invalid_argument("position parameter cannot be null");
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if( !bdevmem )
{ // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements,0));
CUDA_TRY(hipMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),hipMemcpyHostToDevice))
}
// The most efficient method here is to build pointer array
// applying the parameters to the specified positions and
// then build a new instance from the resulting pointers.
rmm::device_vector<custring_view*> results(count,nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
custring_view_array d_new_strings = strs.pImpl->getStringsPtr();
thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results );
thrust::scatter( execpol->on(0), d_new_strings, d_new_strings+elements, d_pos, d_results );
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
return rtn;
}
//
// s1 = ['a','b,'c','d']
// pos = [1,3]
// s3 = s1.scatter('e',pos,2)
// ['a','e','c','e']
//
NVStrings* NVStrings::scatter( const char* str, const int* pos, unsigned int elements, bool bdevmem )
{
unsigned int count = size();
if( pos==nullptr )
throw std::invalid_argument("parameter cannot be null");
auto execpol = rmm::exec_policy(0);
// copy string to device
custring_view* d_repl = custring_from_host(str);
const int* d_pos = pos;
if( !bdevmem )
{ // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements,0));
CUDA_TRY(hipMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),hipMemcpyHostToDevice))
}
// create result output array
rmm::device_vector<custring_view*> results(count,nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results );
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements,
[d_pos, count, d_repl, d_results] __device__ (unsigned int idx) {
int pos = d_pos[idx];
if( (pos >= 0) && (pos < count) )
d_results[pos] = d_repl;
});
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
RMM_FREE((void*)d_repl,0);
return rtn;
}
NVStrings* NVStrings::sublist( unsigned int start, unsigned int end, int step )
{
unsigned int count = size();
if( end > count )
end = count;
if( start > count )
start = count;
if( step==0 )
step = 1;
if( start == end )
return new NVStrings(0);
if( ((step > 0) && (start > end)) ||
((step < 0) && (start < end)) )
return new NVStrings(0);
unsigned int elems = (unsigned int)std::abs((int)(end-start));
unsigned int abs_step = (unsigned int)std::abs(step);
elems = (elems + abs_step -1)/abs_step; // adjust for steps
auto execpol = rmm::exec_policy(0);
rmm::device_vector<int> indexes(elems);
thrust::sequence(execpol->on(0),indexes.begin(),indexes.end(),(int)start,step);
return gather(indexes.data().get(),elems,true);
}
// remove the specified strings and return a new instance
NVStrings* NVStrings::remove_strings( const int* pos, unsigned int elements, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
if( elements==0 || pos==0 )
return copy();
auto execpol = rmm::exec_policy(0);
int* dpos = device_alloc<int>(elements,0);
if( bdevmem )
CUDA_TRY( hipMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),hipMemcpyDeviceToDevice))
else
CUDA_TRY( hipMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),hipMemcpyHostToDevice))
// sort the position values
thrust::sort(execpol->on(0),dpos,dpos+elements,thrust::greater<int>());
// also should remove duplicates
int* nend = thrust::unique(execpol->on(0),dpos,dpos+elements,thrust::equal_to<int>());
elements = (unsigned int)(nend - dpos);
if( count < elements )
{
RMM_FREE(dpos,0);
fprintf(stderr,"remove_strings: more positions (%u) specified than the number of strings (%u)\n",elements,count);
return nullptr;
}
// build array to hold positions which are not to be removed by marking deleted positions with -1
rmm::device_vector<int> dnpos(count);
thrust::sequence(execpol->on(0),dnpos.begin(),dnpos.end());
int* d_npos = dnpos.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements,
[dpos, d_npos, count] __device__ (unsigned int idx) {
int pos = dpos[idx];
if( (pos >= 0) && (pos < count) )
d_npos[pos] = -1;
});
// now remove the positions marked with -1
int* dend = thrust::remove_if(execpol->on(0),d_npos,d_npos+count,[] __device__ (int val) { return val < 0; });
unsigned int new_count = (unsigned int)(dend-d_npos);
// gather string pointers based on indexes in dnpos (new-positions)
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<custring_view*> results(new_count,nullptr);
custring_view_array d_results = results.data().get();
thrust::gather(execpol->on(0),d_npos,d_npos+new_count,d_strings,d_results);
// create output object from results pointers
NVStrings* rtn = new NVStrings(new_count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, new_count);
RMM_FREE(dpos,0);
return rtn;
}
// this sorts the strings into a new instance;
// a sorted strings list can improve performance by reducing divergence
NVStrings* NVStrings::sort( sorttype stype, bool ascending, bool nullfirst )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
// copy the pointers so we can sort them
rmm::device_vector<custring_view*> results(count,nullptr);
custring_view_array d_results = results.data().get();
thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results );
thrust::sort(execpol->on(0), d_results, d_results+count,
[stype, ascending, nullfirst] __device__( custring_view*& lhs, custring_view*& rhs ) {
if( lhs==0 || rhs==0 )
return (nullfirst ? rhs!=0 : lhs!=0); // null < non-null
// allow sorting by name and length
int diff = 0;
if( stype & NVStrings::length )
diff = lhs->size() - rhs->size();
if( diff==0 && (stype & NVStrings::name) )
diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
// build new instance from the sorted pointers
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings( rtn->pImpl, d_results, count );
return rtn;
}
// just provide the index order and leave the strings intact
int NVStrings::order( sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice )
{
unsigned int count = size();
unsigned int* d_indexes = indexes;
auto execpol = rmm::exec_policy(0);
if( !todevice )
d_indexes = device_alloc<unsigned int>(count,0);
thrust::sequence(execpol->on(0), d_indexes, d_indexes+count);
//
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::sort(execpol->on(0), d_indexes, d_indexes+count,
[d_strings, stype, ascending, nullfirst] __device__( unsigned int& lidx, unsigned int& ridx ) {
custring_view* lhs = d_strings[lidx];
custring_view* rhs = d_strings[ridx];
if( lhs==0 || rhs==0 )
return (nullfirst ? rhs!=0 : lhs!=0);
// allow sorting by name and length
int diff = 0;
if( stype & NVStrings::length )
diff = lhs->size() - rhs->size();
if( diff==0 && (stype & NVStrings::name) )
diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
//
if( !todevice )
{
CUDA_TRY(hipMemcpyAsync(indexes,d_indexes,count*sizeof(unsigned int),hipMemcpyDeviceToHost))
RMM_FREE(d_indexes,0);
}
return 0;
}
| a822c7cabc7a00392dc319a9a08830218b4f69ae.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdexcept>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <thrust/gather.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../util.h"
// takes scattered pointers to custring_view objects and
// initializes a new NVStringsImpl
void NVStrings_init_from_custrings( NVStringsImpl* pImpl, custring_view_array d_strings, unsigned int count )
{
auto execpol = rmm::exec_policy(0);
// get individual sizes
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size());
});
// create output object
char* d_buffer = pImpl->createMemoryFor(d_sizes);
if( d_buffer==0 )
return; // this is valid
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin());
// finally, copy the strings
custring_view_array d_results = pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
d_results[idx] = custring_view::create_from(buffer,*dstr);
});
//
}
// create a new instance containing only the strings at the specified positions
// position values can be in any order and can even be repeated
NVStrings* NVStrings::gather( const int* pos, unsigned int elements, bool bdevmem )
{
unsigned int count = size();
if( count==0 || elements==0 || pos==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if( !bdevmem )
{ // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements,0));
CUDA_TRY(cudaMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),cudaMemcpyHostToDevice))
}
// create working memory
rmm::device_vector<custring_view*> results(elements,nullptr);
auto d_results = results.data().get();
rmm::device_vector<bool> flags(elements,false);
auto d_flags = flags.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
// do the gather
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements,
[d_strings, d_pos, count, d_results, d_flags] __device__(unsigned int idx){
int pos = d_pos[idx];
if( (pos < 0) || (pos >= count) )
d_flags[idx] = true;
else
d_results[idx] = d_strings[pos];
});
// check for invalid position values
if( thrust::count(execpol->on(0), flags.begin(), flags.end(), true) )
{
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
throw std::out_of_range("gather position value out of range");
}
// build resulting instance
NVStrings* rtn = new NVStrings(elements);
NVStrings_init_from_custrings(rtn->pImpl, d_results, elements);
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
return rtn;
}
// create a new instance containing only the strings where the corresponding mask value is true
NVStrings* NVStrings::gather( const bool* mask, bool bdevmem )
{
size_t count = size();
if( count==0 || mask==nullptr )
return new NVStrings(0);
// copy mask array to device memory if necessary
auto execpol = rmm::exec_policy(0);
const bool* d_mask = mask;
if( !bdevmem )
{
d_mask = const_cast<const bool*>(device_alloc<bool>(count,0));
CUDA_TRY(cudaMemcpyAsync((void*)d_mask,mask,count*sizeof(mask[0]),cudaMemcpyHostToDevice,0))
}
// create list of index positions from the mask array
rmm::device_vector<int> indexes(count);
auto d_indexes = indexes.data().get();
auto d_indexes_end = thrust::copy_if(execpol->on(0), thrust::make_counting_iterator<int>(0), thrust::make_counting_iterator<int>(count),
d_indexes, [d_mask] __device__ (int idx) { return d_mask[idx]; });
// done with the mask
if( !bdevmem )
RMM_FREE((void*)d_mask,0);
count = d_indexes_end - d_indexes;
return gather( d_indexes, count, true );
}
//
// s1 = ['a','b,'c','d']
// s2 = ['e','f']
// pos = [1,3] -- must be the same length as s2
// s3 = s1.scatter(s2,pos)
// ['a','e','c','f']
//
NVStrings* NVStrings::scatter( NVStrings& strs, const int* pos, bool bdevmem )
{
unsigned int count = size();
unsigned int elements = strs.size();
if( pos==0 )
throw std::invalid_argument("position parameter cannot be null");
auto execpol = rmm::exec_policy(0);
const int* d_pos = pos;
if( !bdevmem )
{ // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements,0));
CUDA_TRY(cudaMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),cudaMemcpyHostToDevice))
}
// The most efficient method here is to build pointer array
// applying the parameters to the specified positions and
// then build a new instance from the resulting pointers.
rmm::device_vector<custring_view*> results(count,nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
custring_view_array d_new_strings = strs.pImpl->getStringsPtr();
thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results );
thrust::scatter( execpol->on(0), d_new_strings, d_new_strings+elements, d_pos, d_results );
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
return rtn;
}
//
// s1 = ['a','b,'c','d']
// pos = [1,3]
// s3 = s1.scatter('e',pos,2)
// ['a','e','c','e']
//
NVStrings* NVStrings::scatter( const char* str, const int* pos, unsigned int elements, bool bdevmem )
{
unsigned int count = size();
if( pos==nullptr )
throw std::invalid_argument("parameter cannot be null");
auto execpol = rmm::exec_policy(0);
// copy string to device
custring_view* d_repl = custring_from_host(str);
const int* d_pos = pos;
if( !bdevmem )
{ // copy indexes to device memory
d_pos = const_cast<const int*>(device_alloc<int>(elements,0));
CUDA_TRY(cudaMemcpyAsync((void*)d_pos,pos,elements*sizeof(int),cudaMemcpyHostToDevice))
}
// create result output array
rmm::device_vector<custring_view*> results(count,nullptr);
auto d_results = results.data().get();
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results );
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements,
[d_pos, count, d_repl, d_results] __device__ (unsigned int idx) {
int pos = d_pos[idx];
if( (pos >= 0) && (pos < count) )
d_results[pos] = d_repl;
});
// build resulting instance
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, count);
if( !bdevmem )
RMM_FREE((void*)d_pos,0);
RMM_FREE((void*)d_repl,0);
return rtn;
}
NVStrings* NVStrings::sublist( unsigned int start, unsigned int end, int step )
{
unsigned int count = size();
if( end > count )
end = count;
if( start > count )
start = count;
if( step==0 )
step = 1;
if( start == end )
return new NVStrings(0);
if( ((step > 0) && (start > end)) ||
((step < 0) && (start < end)) )
return new NVStrings(0);
unsigned int elems = (unsigned int)std::abs((int)(end-start));
unsigned int abs_step = (unsigned int)std::abs(step);
elems = (elems + abs_step -1)/abs_step; // adjust for steps
auto execpol = rmm::exec_policy(0);
rmm::device_vector<int> indexes(elems);
thrust::sequence(execpol->on(0),indexes.begin(),indexes.end(),(int)start,step);
return gather(indexes.data().get(),elems,true);
}
// remove the specified strings and return a new instance
NVStrings* NVStrings::remove_strings( const int* pos, unsigned int elements, bool bdevmem )
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
if( elements==0 || pos==0 )
return copy();
auto execpol = rmm::exec_policy(0);
int* dpos = device_alloc<int>(elements,0);
if( bdevmem )
CUDA_TRY( cudaMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),cudaMemcpyDeviceToDevice))
else
CUDA_TRY( cudaMemcpyAsync((void*)dpos,pos,elements*sizeof(unsigned int),cudaMemcpyHostToDevice))
// sort the position values
thrust::sort(execpol->on(0),dpos,dpos+elements,thrust::greater<int>());
// also should remove duplicates
int* nend = thrust::unique(execpol->on(0),dpos,dpos+elements,thrust::equal_to<int>());
elements = (unsigned int)(nend - dpos);
if( count < elements )
{
RMM_FREE(dpos,0);
fprintf(stderr,"remove_strings: more positions (%u) specified than the number of strings (%u)\n",elements,count);
return nullptr;
}
// build array to hold positions which are not to be removed by marking deleted positions with -1
rmm::device_vector<int> dnpos(count);
thrust::sequence(execpol->on(0),dnpos.begin(),dnpos.end());
int* d_npos = dnpos.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elements,
[dpos, d_npos, count] __device__ (unsigned int idx) {
int pos = dpos[idx];
if( (pos >= 0) && (pos < count) )
d_npos[pos] = -1;
});
// now remove the positions marked with -1
int* dend = thrust::remove_if(execpol->on(0),d_npos,d_npos+count,[] __device__ (int val) { return val < 0; });
unsigned int new_count = (unsigned int)(dend-d_npos);
// gather string pointers based on indexes in dnpos (new-positions)
custring_view** d_strings = pImpl->getStringsPtr();
rmm::device_vector<custring_view*> results(new_count,nullptr);
custring_view_array d_results = results.data().get();
thrust::gather(execpol->on(0),d_npos,d_npos+new_count,d_strings,d_results);
// create output object from results pointers
NVStrings* rtn = new NVStrings(new_count);
NVStrings_init_from_custrings(rtn->pImpl, d_results, new_count);
RMM_FREE(dpos,0);
return rtn;
}
// this sorts the strings into a new instance;
// a sorted strings list can improve performance by reducing divergence
NVStrings* NVStrings::sort( sorttype stype, bool ascending, bool nullfirst )
{
unsigned int count = size();
custring_view_array d_strings = pImpl->getStringsPtr();
auto execpol = rmm::exec_policy(0);
// copy the pointers so we can sort them
rmm::device_vector<custring_view*> results(count,nullptr);
custring_view_array d_results = results.data().get();
thrust::copy( execpol->on(0), d_strings, d_strings+count, d_results );
thrust::sort(execpol->on(0), d_results, d_results+count,
[stype, ascending, nullfirst] __device__( custring_view*& lhs, custring_view*& rhs ) {
if( lhs==0 || rhs==0 )
return (nullfirst ? rhs!=0 : lhs!=0); // null < non-null
// allow sorting by name and length
int diff = 0;
if( stype & NVStrings::length )
diff = lhs->size() - rhs->size();
if( diff==0 && (stype & NVStrings::name) )
diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
// build new instance from the sorted pointers
NVStrings* rtn = new NVStrings(count);
NVStrings_init_from_custrings( rtn->pImpl, d_results, count );
return rtn;
}
// just provide the index order and leave the strings intact
int NVStrings::order( sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice )
{
unsigned int count = size();
unsigned int* d_indexes = indexes;
auto execpol = rmm::exec_policy(0);
if( !todevice )
d_indexes = device_alloc<unsigned int>(count,0);
thrust::sequence(execpol->on(0), d_indexes, d_indexes+count);
//
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::sort(execpol->on(0), d_indexes, d_indexes+count,
[d_strings, stype, ascending, nullfirst] __device__( unsigned int& lidx, unsigned int& ridx ) {
custring_view* lhs = d_strings[lidx];
custring_view* rhs = d_strings[ridx];
if( lhs==0 || rhs==0 )
return (nullfirst ? rhs!=0 : lhs!=0);
// allow sorting by name and length
int diff = 0;
if( stype & NVStrings::length )
diff = lhs->size() - rhs->size();
if( diff==0 && (stype & NVStrings::name) )
diff = lhs->compare(*rhs);
return (ascending ? (diff < 0) : (diff > 0));
});
//
if( !todevice )
{
CUDA_TRY(cudaMemcpyAsync(indexes,d_indexes,count*sizeof(unsigned int),cudaMemcpyDeviceToHost))
RMM_FREE(d_indexes,0);
}
return 0;
}
|
aae980fb266d0a55d888a3f7353004a09ff29fdc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* reach_Sequential_GPU.cpp
*
* Created on: 18-April-2015
* Author: amit
*/
#include "core_system/Reachability/GPU_Reach/reach_Sequential_GPU.cuh"
#include "core_system/math/Gimplex/simplex.cuh"
#include "core_system/math/Bulk_LP_Solver/bulk_LP_Solver.h"
#include "boost/timer/timer.hpp"
#include <list>
//Correct implementation for All sizes of Block_LPs
void bulk_Solver(math::matrix<double> constraint_matrix,
std::vector<double> boundValue, math::matrix<float> list_obj_funs,
unsigned int number_of_streams, unsigned int no_lps_possible,
std::vector<float> &res) {
unsigned int tot_lp = list_obj_funs.size1();
std::cout << "Total LPs " << tot_lp << std::endl;
//unsigned int lp_block_size = 28672; //4004; //183500; //input how many LPs you want to solve at a time ??????
//unsigned int lp_block_size = tot_lp; //4004; //183500; //input how many LPs you want to solve at a time ??????
unsigned int lp_block_size = no_lps_possible;
unsigned int number_of_blocks;
bool equalBlockSize = true;
if (tot_lp % lp_block_size == 0) {
number_of_blocks = tot_lp / lp_block_size;
equalBlockSize = true;
} else {
number_of_blocks = (tot_lp / lp_block_size) + 1; //This Last Block with LESS LPs must be taken care
equalBlockSize = false;
}
std::cout << "Total Blocks " << number_of_blocks << std::endl;
std::list<block_lp> bulk_lps; //list of sub-division of LPs
struct block_lp myLPList;
myLPList.block_obj_coeff.resize(lp_block_size, list_obj_funs.size2());
if (equalBlockSize == true) { //equal block size :: All blocks are of equal size
//Equal-Block-Size so making it fit for OMP-Parallel
for (int i = 0; i < number_of_blocks; i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now
//Did not get to test this parallelizing
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++; //now index is just the number of partitions 2 or 3 or 4 may be
}
} //end of all LPs
//std::cout << "\nEqual Block Size\n";
} //end of equal-block-size
if (equalBlockSize == false) { //unequal block size :: Last block has less LPs so solving separately
//std::cout << "\nUnEqual Block Size!!!!\n";
int count = 0;
//Equal-Block-Size so making it fit for OMP-Parallel for 1st part
for (int i = 0; i < (number_of_blocks - 1); i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now for all equal-block-size expect the last block
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) { //will have one less block(i.e., Skips the last block)
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++;
}
} //end of all LPs
//Taking care about the Last Block with LESS number of LPs which will be SKIPPED in the above if-statement
//Reading the remaining LAST BLOCK of LPs which was SKIPPED in the above if-statement
struct block_lp myLPList2;
int last_block_size = tot_lp - (tot_lp / lp_block_size) * lp_block_size;
myLPList2.block_obj_coeff.resize(last_block_size,
list_obj_funs.size2());
unsigned int index = 0;
unsigned int lp_number;
lp_number = (number_of_blocks - 1) * lp_block_size; //starting of Last Block
#pragma omp parallel for
for (int lp_left = lp_number; lp_left < tot_lp; lp_left++) {
index = lp_left - lp_number; //index starts from 0 to last LP
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
myLPList2.block_obj_coeff(index, i) = list_obj_funs(lp_left, i);
}
}
bulk_lps.push_back(myLPList2); //pushing the Last block
} //end of unequal_block_size
std::list<block_lp_result> bulk_result;
struct block_lp_result eachBlock;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
unsigned int each_bulk_size = (*it).block_obj_coeff.size1();
//std::cout << "each_bulk_size = " << each_bulk_size << std::endl;
eachBlock.results.resize(each_bulk_size);
Simplex lp_problem(each_bulk_size); //GPU computation
lp_problem.setConstratint(constraint_matrix, boundValue);
lp_problem.ComputeLP((*it).block_obj_coeff, number_of_streams); //actual GPU computation
eachBlock.results = lp_problem.getResultAll();
bulk_result.push_back(eachBlock);
// std::cout<<"Result Computed\n";
}
res.resize(tot_lp);
unsigned int index_res = 0;
for (std::list<block_lp_result>::iterator it = bulk_result.begin();
it != bulk_result.end(); it++) {
unsigned int block_result_size = (*it).results.size();
for (unsigned int i = 0; i < block_result_size; i++) {
res[index_res] = (*it).results[i];
index_res++;
}
}
//std::cout << "Result size = " << res.size() << std::endl;
}
void bulk_Solver_With_UnitBall(int UnitBall,
math::matrix<double> constraint_matrix, std::vector<double> boundValue,
math::matrix<float> list_obj_funs, unsigned int number_of_streams,
unsigned int no_lps_possible, std::vector<float> &result_X,
std::vector<float> &result_UnitBall) {
unsigned int tot_lp = list_obj_funs.size1();
std::cout << "Total LPs " << tot_lp << std::endl;
//unsigned int lp_block_size = 28672; //4004; //183500; //input how many LPs you want to solve at a time ??????
//unsigned int lp_block_size = tot_lp; //4004; //183500; //input how many LPs you want to solve at a time ??????
unsigned int lp_block_size = no_lps_possible;
unsigned int number_of_blocks;
bool equalBlockSize = true;
if (tot_lp % lp_block_size == 0) {
number_of_blocks = tot_lp / lp_block_size;
equalBlockSize = true;
} else {
number_of_blocks = (tot_lp / lp_block_size) + 1; //This Last Block with LESS LPs must be taken care
equalBlockSize = false;
}
std::cout << "Total Blocks " << number_of_blocks << std::endl;
std::list<block_lp> bulk_lps; //list of sub-division of LPs
struct block_lp myLPList;
myLPList.block_obj_coeff.resize(lp_block_size, list_obj_funs.size2());
if (equalBlockSize == true) { //equal block size :: All blocks are of equal size
//Equal-Block-Size so making it fit for OMP-Parallel
for (int i = 0; i < number_of_blocks; i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now
//Did not get to test this parallelizing
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++; //now index is just the number of partitions 2 or 3 or 4 may be
}
} //end of all LPs
//std::cout << "\nEqual Block Size\n";
} //end of equal-block-size
if (equalBlockSize == false) { //unequal block size :: Last block has less LPs so solving separately
//std::cout << "\nUnEqual Block Size!!!!\n";
int count = 0;
//Equal-Block-Size so making it fit for OMP-Parallel for 1st part
for (int i = 0; i < (number_of_blocks - 1); i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now for all equal-block-size expect the last block
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) { //will have one less block(i.e., Skips the last block)
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++;
}
} //end of all LPs
//Taking care about the Last Block with LESS number of LPs which will be SKIPPED in the above if-statement
//Reading the remaining LAST BLOCK of LPs which was SKIPPED in the above if-statement
struct block_lp myLPList2;
int last_block_size = tot_lp - (tot_lp / lp_block_size) * lp_block_size;
myLPList2.block_obj_coeff.resize(last_block_size,
list_obj_funs.size2());
unsigned int index = 0;
unsigned int lp_number;
lp_number = (number_of_blocks - 1) * lp_block_size; //starting of Last Block
#pragma omp parallel for
for (int lp_left = lp_number; lp_left < tot_lp; lp_left++) {
index = lp_left - lp_number; //index starts from 0 to last LP
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
myLPList2.block_obj_coeff(index, i) = list_obj_funs(lp_left, i);
}
}
bulk_lps.push_back(myLPList2); //pushing the Last block
} //end of unequal_block_size
std::list<block_lp_result> bulk_result;
struct block_lp_result eachBlock;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
unsigned int each_bulk_size = (*it).block_obj_coeff.size1();
//std::cout << "each_bulk_size = " << each_bulk_size << std::endl;
eachBlock.results.resize(each_bulk_size);
Simplex Solver(UnitBall, each_bulk_size); //GPU computation
Solver.setConstratint(constraint_matrix, boundValue, UnitBall);
Solver.ComputeLP((*it).block_obj_coeff, UnitBall, number_of_streams); //actual GPU computation
Solver.getResult_X(eachBlock.results);
Solver.getResult_UnitBall(eachBlock.results_UnitBall);
bulk_result.push_back(eachBlock);
// std::cout<<"Result Computed\n";
}
//std::vector<float> res(tot_lp);
result_X.resize(tot_lp);
result_UnitBall.resize(tot_lp);
unsigned int index_res = 0;
for (std::list<block_lp_result>::iterator it = bulk_result.begin();
it != bulk_result.end(); it++) {
unsigned int block_result_size = (*it).results.size();
for (unsigned int i = 0; i < block_result_size; i++) {
//res[index_res] = (*it).results[i];
result_X[index_res] = (*it).results[i];
result_UnitBall[index_res] = (*it).results_UnitBall[i];
index_res++;
}
}
}
/*
* After optimising the duplicate Support Function computation
*/
void reachabilitySequential_GPU(unsigned int boundedTotIteration, Dynamics& SystemDynamics,
supportFunctionProvider::ptr Initial,
ReachabilityParameters& ReachParameters, polytope::ptr invariant,
bool isInvariantExist, int lp_solver_type_choosen,
unsigned int number_of_streams, int Solver_GLPK_Gurobi_GPU,
template_polyhedra::ptr & reachableRegion) {
//template_polyhedra::ptr reachRegion;
typedef typename boost::numeric::ublas::matrix<double>::size_type size_type;
unsigned int NewTotalIteration = ReachParameters.Iterations;
bool U_empty = false;
int num_inv = invariant->getColumnVector().size(); //number of Invariant's constriants
std::vector<double> inv_bounds;
inv_bounds = invariant->getColumnVector();
math::matrix<double> inv_directions;
inv_directions = invariant->getCoeffMatrix();
// cout<<"Inv_directions are :: "<<inv_directions<<std::endl;
// cout<<"inv_bounds are :: ";
// for (int i=0;i<num_inv;i++){
// cout<<inv_bounds[i]<<"\t";
// }
// std::cout << "num_inv = "<<num_inv<<"\n";
if (isInvariantExist == true) { //if invariant exist. Computing
std::cout << "Yes Invariant Exist!!!";
NewTotalIteration = boundedTotIteration;
std::cout << "NewTotalIteration = " << NewTotalIteration << std::endl;
} //End of Invariant Directions
if (NewTotalIteration <= 1) {
template_polyhedra::ptr poly_empty;
//return poly_empty; //NO need to proceed Algorithm further
reachableRegion = poly_empty;
}
//reachableRegion = template_polyhedra::ptr(new template_polyhedra());
//cout<<"reachableRegion->getTotalIterations() 1 = " <<reachableRegion->getTotalIterations()<<std::endl;
if (SystemDynamics.U->getIsEmpty()) { //polytope U can be empty set
U_empty = true;
//std::cout<<"U is Empty!!!!";
}
int Solver = Solver_GLPK_Gurobi_GPU; //1 for CPU solver(GLPK); //2 for CPU solver(Gurobi); //3 for GPU solver(Gimplex)
// ************* Generation of Directions Begins ***************
//std::vector<AllDirection> Direction_List;
unsigned int numVectors = ReachParameters.Directions.size1();
unsigned int totalDirList1 = numVectors * (NewTotalIteration + 1); //1 extra for loop1
math::matrix<float> List_for_X0(totalDirList1,
ReachParameters.Directions.size2());
unsigned int totalDirList2 = numVectors * NewTotalIteration; //'n' dirs for each 'n' loops
math::matrix<float> List_for_U(totalDirList2,
ReachParameters.Directions.size2());
/*std::cout << "\nNumber of Directions/LPs for X0 = " << totalDirList1;
if (!U_empty) {
std::cout << "\nNumber of Directions/LPs for U = " << totalDirList2;
}*/
std::list<std::vector<double> > List_X0;
std::list<std::vector<double> > List_U;
//cout<<"reachableRegion->getTotalIterations() 2 = " <<reachableRegion->getTotalIterations()<<std::endl;
boost::timer::cpu_timer DirectionsGenerate_time;
DirectionsGenerate_time.start();
if (Solver == 3) {
//for OMP --cuda not supporting OMP-- so added library "lgomp"; build-stage -Xcompiler -fopenmp
int numCoresAvail = omp_get_num_procs(); //get the number of cores
getDirectionList_X0_and_U(numCoresAvail, ReachParameters, NewTotalIteration,
List_for_X0, List_for_U, U_empty, SystemDynamics); //Optimized into a single function the 2 Tasks
} else {
//Only for profiling GLPK solver Time for comparison with boundary value implementation
getDirectionList_X0_and_U_OnlyForGLPK(ReachParameters,
NewTotalIteration, List_X0, List_U, U_empty, SystemDynamics); //Optimized into a single function the 2 Tasks
}
DirectionsGenerate_time.stop();
double wall_clock1;
wall_clock1 = DirectionsGenerate_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
double return_Time1 = wall_clock1 / (double) 1000;
std::cout
<< "\nDirections Generation(parallel): Boost Time taken:Wall (in Seconds) = "
<< return_Time1 << std::endl;
// ************* Generation of Directions Ends ***************
int dimension = Initial->getSystemDimension();
int Min_Or_Max = 2;
size_type row = numVectors, col = NewTotalIteration;
math::matrix<double> MatrixValue(row, col);
std::vector<float> supp_func_X0, supp_func_U, supp_func_UnitBall,
result_dotProduct;
//cout<<"reachableRegion->getTotalIterations() 3 = " <<reachableRegion->getTotalIterations()<<std::endl;
int device;
hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
double MemorySize = props.totalGlobalMem / 1024; //converting into KiloBytes
// std::cout << "\nMemorySize = " << MemorySize;
// std::cout << "Each LP Size (KiloBytes) = " << eachLP_Size << std::endl;
unsigned int no_lps_possible;
//cout<<"reachableRegion->getTotalIterations() 3b = " <<reachableRegion->getTotalIterations()<<std::endl;
if (Solver == 3) { // ************ GRAPHIC PROCESSING UNIT Computations ********
//result obtained from GPU interface // OMP can also be tried and compared
bool IsBoundedLPSolver = true; //Remember to make this false when testing General Gimplex
double eachLP_Size;
if (IsBoundedLPSolver) {
/*
* ToDo::recompute the LP size based on NEW Implementataion of Single Kernel
* For bounded LP Solver:: each lp size is just objective function size * number of LPs to be solved
* and a constant factor:: size of the bound vectors
*/
eachLP_Size = ReachParameters.Directions.size2() * sizeof(float);
eachLP_Size = eachLP_Size / (double) 1024; //converted into KiloBytes
unsigned int boundValueSize =
ReachParameters.X0->getColumnVector().size()
* sizeof(float);
no_lps_possible = (MemorySize - boundValueSize) / eachLP_Size; //Taking less by integer_casting NO PROBLEM
std::cout << "Number of LP per bulk Possible is " << no_lps_possible
<< std::endl;
// cout<<"reachableRegion->getTotalIterations() 3c = " <<reachableRegion->getTotalIterations()<<std::endl;
} else {
/*
* lp_block_size can be computed from the Constraint_matrix Model
MaxSize per LP = row * (col + row + row_artificial + 4) * sizeof(float)
*/
eachLP_Size = (ReachParameters.X0->getCoeffMatrix().size1() + 1)
* (2 * ReachParameters.X0->getCoeffMatrix().size2()
+ ReachParameters.X0->getCoeffMatrix().size1() + 4)
* sizeof(float);
eachLP_Size = eachLP_Size / (double) 1024; //converted into KiloBytes
no_lps_possible = MemorySize / eachLP_Size; //Taking less by integer_casting NO PROBLEM
std::cout << "Number of LP per bulk Possible = " << no_lps_possible
<< std::endl;
}
std::cout << "totalDirList1 LP = " << totalDirList1 << std::endl;
bool single_bulk = true; //false -- if multiple bulk is required to be processed/solved
if (totalDirList1 > no_lps_possible) { //totalDirList1 is the Maximum
single_bulk = false;
}
boost::timer::cpu_timer onlyGimplex_time;
onlyGimplex_time.start();
if (single_bulk) { //If single bulk is solved this code-section takes less time than else-part
if (IsBoundedLPSolver) {
//:: Solve in a single call for both X0 and supFunUnitBall to same multiple memory transfer and separately for U
if (!U_empty) { //polytope U can be empty set
std::cout << "totalDirList2 LP = " << totalDirList2
<< std::endl;
Simplex simplex_for_U(totalDirList2);
simplex_for_U.setConstratint(
SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector());
simplex_for_U.ComputeLP(List_for_U, number_of_streams);
supp_func_U = simplex_for_U.getResultAll();
} //working
//compute only X0 and supp_unitBall in one kernel
int UnitBall = 1; //just to have different signature for the overloaded functions of class Simplex
Simplex solver(UnitBall, totalDirList1);
solver.setConstratint(ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector(), UnitBall);
std::cout << "New imple with dotProduction:: Started\n";
// cout<<"reachableRegion->getTotalIterations() 3d = " <<reachableRegion->getTotalIterations()<<std::endl;
//solver.ComputeLP(List_for_X0, UnitBall, number_of_streams); OLD method
solver.ComputeLP(List_for_X0, UnitBall, number_of_streams,
SystemDynamics.C);
//todo:: some memory issue exist here
// cout<<"reachableRegion->getTotalIterations() 3e = " <<reachableRegion->getTotalIterations()<<std::endl;
solver.getResult_X(supp_func_X0); //return the result as argument
solver.getResult_UnitBall(supp_func_UnitBall);
//std::cout<<"supp_func_UnitBall.size = "<<supp_func_UnitBall.size();
solver.getResult_dotProduct(result_dotProduct);
//std::cout<<"result_dotProduct.size = "<<result_dotProduct.size();
// std::cout<<"result_dotProduct Values :: ";
// for (int i=0;i<result_dotProduct.size();i++){
// cout<<result_dotProduct[i]<<"\t";
// }
std::cout << "New imple with dotProduction:: Ended\n";
} else { //OLD implementation with single call of kernel for X0 and U
//TODO::But now missing supp_fun_UnitBall_infinity_norm
Simplex simplex_for_X0(totalDirList1), simplex_for_U(
totalDirList2);
simplex_for_X0.setConstratint(
ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector());
if (!U_empty) { //polytope U can be empty set
simplex_for_U.setConstratint(
SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector());
} //else { U_empty = true;}
simplex_for_X0.ComputeLP(List_for_X0, number_of_streams);
supp_func_X0 = simplex_for_X0.getResultAll();
if (!U_empty) {
simplex_for_U.ComputeLP(List_for_U, number_of_streams);
supp_func_U = simplex_for_U.getResultAll();
}
}
// cout<<"reachableRegion->getTotalIterations() 4 = " <<reachableRegion->getTotalIterations()<<std::endl;
std::cout << "Single Bulk";
} else { //IF SOLVING BY DIVISION IS REQUIRED
if (IsBoundedLPSolver) {
//:: Solve in a single call for both X0 and supFunUnitBall to same multiple memory transfer and separately for U
int UnitBall = 1; //just to have different signature for the overloaded functions of class Simplex
bulk_Solver_With_UnitBall(UnitBall,
ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector(), List_for_X0,
number_of_streams, no_lps_possible, supp_func_X0,
supp_func_UnitBall); //ONLY UnitBall result will extra
if (!U_empty) { //polytope U can be empty set //NO CHANGE REQUIRED HERE IN BULK SOLVER
bulk_Solver(SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector(), List_for_U,
number_of_streams, no_lps_possible, supp_func_U);
}
} else {
//OLD implementation with single call of kernel for X0 and U
//TODO::But now missing supp_fun_UnitBall_infinity_norm
}
}
onlyGimplex_time.stop();
double wall_clock, user_clock, system_clock;
wall_clock = onlyGimplex_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
//user_clock = onlyGimplex_time.elapsed().user / 1000000;
//system_clock = onlyGimplex_time.elapsed().system / 1000000;
double return_Time = wall_clock / (double) 1000;
std::cout
<< "\nGPU-simplex Solver: Boost Time taken:Wall (in Seconds) = "
<< return_Time << std::endl;
//std::cout << "\nGPU-simplex Boost Time taken:User (in Seconds) = " << user_clock / (double) 1000 << std::endl;
//std::cout << "\nGPU-simplex Boost Time taken:System (in Seconds) = " << system_clock / (double) 1000 << std::endl;
// *********************** GPU computation Over *********************
}
// cout<<"reachableRegion->getTotalIterations() 5 = " <<reachableRegion->getTotalIterations()<<std::endl;
if (Solver >= 1 && Solver < 3) { // ************ CPU Solver ****************
//Todo::Similarly i have to implement Solver for UnitBall_infinity_norm if i have to use Final loop for reachAlgorithm
boost::timer::cpu_timer onlyGimplex_time;
onlyGimplex_time.start();
bulk_lp_solver simplex_for_X0(Solver), simplex_for_U(Solver); //Solver = 1 for GLPK; = 2 for Gurobi
bool U_empty = false;
simplex_for_X0.setMaxMin(2); //2 for Maximum
simplex_for_X0.setConstratint(ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector());
if (!SystemDynamics.U->getIsEmpty()) { //polytope U can be empty set
simplex_for_U.setMaxMin(2); //2 for Maximum
simplex_for_U.setConstratint(SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector());
} else {
U_empty = true;
}
//simplex_for_X0.ComputeLP(List_for_X0);
simplex_for_X0.ComputeLP_ListVector(List_X0); //only for GLPK comparison
supp_func_X0 = simplex_for_X0.getResultAll();
if (!U_empty) {
//simplex_for_U.ComputeLP(List_for_U);
simplex_for_U.ComputeLP_ListVector(List_U); //only for GLPK comparison
supp_func_U = simplex_for_U.getResultAll();
}
onlyGimplex_time.stop();
double wall_clock, user_clock, system_clock;
wall_clock = onlyGimplex_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
double return_Time = wall_clock / (double) 1000;
std::cout
<< "\nCPU(GLPK/Gurobi) Solver: Boost Time taken:Wall (in Seconds) = "
<< return_Time << std::endl;
} // ************ CPU Solver Over ****************
//unsigned int index = 0, index_X0 = 0, index_U = 0;//indices upto totalDirList1 and totalDirList2
//cout<<"reachableRegion->getTotalIterations() 6 = " <<reachableRegion->getTotalIterations()<<std::endl;
std::cout << "\n Before Final Reach Algorithm ";
std::cout << std::endl;
//Breaking here for TESTING/Reading LP_Solver
// return template_polyhedra(MatrixValue, ReachParameters.Directions);
boost::timer::cpu_timer reachLoop_time;
reachLoop_time.start();
#pragma omp parallel for
for (unsigned int eachDirection = 0; eachDirection < numVectors;
eachDirection++) {
unsigned int index_X0, index_U; //making the index suitable for parallelizing
//unsigned int index; //index = eachDirection * NewTotalIteration;
//here i have a list of result of Supp_fun_Of_UnitBall_infinity_norm
if (eachDirection == 0) { //only starting loop begins with 0
index_X0 = eachDirection * NewTotalIteration;
} else { //
index_X0 = eachDirection * NewTotalIteration + eachDirection; //only X0(list_X0) has 2 directions for first-iteration
}
if (!U_empty) {
index_U = eachDirection * NewTotalIteration;
}
double res1;
double term1, term2, term3, term3a, term3b, res2, term3c = 0.0;
double zIInitial = 0.0, zI = 0.0, zV = 0.0;
double sVariable = 0.0, s1Variable; //initialize s0
std::vector<double> rVariable(dimension), r1Variable(dimension);
unsigned int loopIteration = 0;
// std::cout<<"Testing 1\n";
// ************** Omega Function ********************
res1 = supp_func_X0[index_X0]; //X0->SF(direction) // 0
// std::cout<<"Testing 2\n";
//term3b = support_unitball_infnorm(Direction_List[index].direction);
term3b = (double) supp_func_UnitBall[index_X0]; // needed 0
if (!SystemDynamics.isEmptyC) {
term3c = ReachParameters.time_step * result_dotProduct[index_X0];
}
// std::cout<<"Testing 3\n";
index_X0++; // made 1
term1 = supp_func_X0[index_X0]; //X0->SF(phi_trans_dir) // 1
// std::cout<<"Testing 4\n";
index_X0++; // made 2
if (!U_empty) {
term2 = ReachParameters.time_step * supp_func_U[index_U]; //U->SF(Btrans_dir)
index_U++;
} else
term2 = 0;
term3a = ReachParameters.result_alfa; //compute_alfa(ReachParameters.time_step,system_dynamics,Initial_X0);
term3 = term3a * term3b;
res2 = term1 + term2 + term3 + term3c; //term3c Added
//zIInitial = (res1 > res2 ? res1:res2);
if (res1 > res2)
zIInitial = res1;
else
zIInitial = res2;
// ************** Omega Function ********************
MatrixValue(eachDirection, loopIteration) = zIInitial; // index++;
loopIteration++;
for (; loopIteration < NewTotalIteration;) { //Now stopping condition is only "shm_NewTotalIteration"
// ************** W_Support Function ********************
double result, res_sup;
if (!U_empty) {
res1 = ReachParameters.time_step * supp_func_U[index_U - 1]; //replace previous value
//index_U++;
} else {
res1 = 0;
}
double beta = ReachParameters.result_beta;
//res_sup = (double) support_unitball_infnorm(Direction_List[index].direction);
//res_sup = (double) supp_func_UnitBall[d_index]; d_index++; //Should replace from previous computation
//res_sup = term3b; //replaced from previous steps
/*if (loopIteration == 1) // needed 0 again here
res_sup = supp_func_UnitBall[index_X0 - 2]; //Should replace from previous computation*/
//double res_beta = beta * res_sup;
double res_beta = beta * term3b;
result = res1 + res_beta + term3c; //Added term3c
zV = result;
// ************** W_Support Function ********************
s1Variable = sVariable + zV;
// ************** Omega Function ********************
//double res1;
res1 = supp_func_X0[index_X0 - 1]; ////replace previous value.... X0->SF(direction) // (2 -1)=1
double term1, term2, term3, term3a, res2;
term1 = supp_func_X0[index_X0]; //X0->SF(phi_trans_dir) // 2
index_X0++; // made 3
if (!U_empty) {
term2 = ReachParameters.time_step * supp_func_U[index_U]; //U->SF(Btrans_dir)
index_U++;
} else {
term2 = 0;
}
term3a = ReachParameters.result_alfa; //compute_alfa(ReachParameters.time_step,system_dynamics,Initial_X0);
//term3b = support_unitball_infnorm(Direction_List[index - 1].Phi_trans_dir);
//term3b = support_unitball_infnorm(Direction_List[index].direction1);
if (loopIteration == 1) {
term3b = (double) supp_func_UnitBall[index_X0 - 2]; //Compute here //needed 1
if (!SystemDynamics.isEmptyC) {
term3c = ReachParameters.time_step
* result_dotProduct[index_X0 - 2];
}
} else {
term3b = (double) supp_func_UnitBall[index_X0 - 1];
if (!SystemDynamics.isEmptyC) {
term3c = ReachParameters.time_step
* result_dotProduct[index_X0 - 1];
}
}
term3 = term3a * term3b;
res2 = term1 + term2 + term3 + term3c;
//zIInitial = (res1 > res2 ? res1:res2);
if (res1 > res2)
zI = res1;
else
zI = res2;
// ************** Omega Function ********************
double TempOmega;
TempOmega = zI + s1Variable; //Y1
MatrixValue(eachDirection, loopIteration) = TempOmega; //Y1
sVariable = s1Variable; // index++;
loopIteration++; //for the next Omega-iteration or Time-bound
} //end of all Iterations of each vector/direction
} //end of for each vector/directions
//cout<<"reachableRegion->getTotalIterations() = 6" <<reachableRegion->getTotalIterations()<<std::endl;
reachLoop_time.stop();
double wall_clock;
wall_clock = reachLoop_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
double reach_Time = wall_clock / (double) 1000;
std::cout << "\nFinal Reach Loop Time:Wall (in Seconds) = " << reach_Time
<< std::endl;
/** Appending invariant directions and invariant constraints/bounds(alfa)
** Goal : To truncate the reachable region within the Invariant region */
//int num_inv = invariant->getColumnVector().size(); //number of Invariant's constriants
//std::cout<<"working 2ab"<<std::endl;
if (isInvariantExist == true) { //if invariant exist. Computing
//std::cout<<"Test inside 1a\n";
math::matrix<double> inv_sfm;
// std::cout<<"num_inv = "<<num_inv <<"\n";
// std::cout<<"working"<<std::endl;
inv_sfm.resize(num_inv, NewTotalIteration);
for (int eachInvDirection = 0; eachInvDirection < num_inv;
eachInvDirection++) {
//std::cout<<"working"<<std::endl;
for (unsigned int i = 0; i < NewTotalIteration; i++) {
//inv_sfm(eachInvDirection, i) = invariant->getColumnVector()[eachInvDirection];
inv_sfm(eachInvDirection, i) = inv_bounds[eachInvDirection];
}
}
// std::cout<<"MatrixValue is ::"<<MatrixValue<<std::endl;
// std::cout<<"inv_sfm is ::"<<inv_sfm<<std::endl;
// std::cout<<"inv_directions is ::"<<inv_directions<<std::endl;
// std::cout<<"ReachParameters.Directions is ::"<<ReachParameters.Directions<<std::endl;
//return template_polyhedra::ptr(new template_polyhedra(MatrixValue, inv_sfm, ReachParameters.Directions, invariant->getCoeffMatrix()));
// std::cout<<"working a2"<<std::endl;
reachableRegion = template_polyhedra::ptr(new template_polyhedra());
// std::cout<<"reachRegion size = "<<reachableRegion->getTotalIterations()<<std::endl;
// std::cout<<"working 2b"<<std::endl;
reachableRegion->setTemplateDirections(ReachParameters.Directions);
//std::cout<<"working 2b"<<std::endl;
reachableRegion->setMatrix_InvariantBound(inv_sfm);
//std::cout<<"working 3"<<std::endl;
reachableRegion->setInvariantDirections(inv_directions);
//std::cout<<"working 4"<<std::endl;
reachableRegion->setMatrixSupportFunction(MatrixValue);
//return template_polyhedra::ptr( new template_polyhedra(MatrixValue, inv_sfm, ReachParameters.Directions, inv_directions));
} else {
reachableRegion = template_polyhedra::ptr(new template_polyhedra());
reachableRegion->setMatrixSupportFunction(MatrixValue);
reachableRegion->setTemplateDirections(ReachParameters.Directions);
// return template_polyhedra::ptr( new template_polyhedra(MatrixValue, ReachParameters.Directions));
}
//std::cout<<"working 5"<<std::endl;
//return reachRegion;
}
| aae980fb266d0a55d888a3f7353004a09ff29fdc.cu | /*
* reach_Sequential_GPU.cpp
*
* Created on: 18-April-2015
* Author: amit
*/
#include "core_system/Reachability/GPU_Reach/reach_Sequential_GPU.cuh"
#include "core_system/math/Gimplex/simplex.cuh"
#include "core_system/math/Bulk_LP_Solver/bulk_LP_Solver.h"
#include "boost/timer/timer.hpp"
#include <list>
//Correct implementation for All sizes of Block_LPs
void bulk_Solver(math::matrix<double> constraint_matrix,
std::vector<double> boundValue, math::matrix<float> list_obj_funs,
unsigned int number_of_streams, unsigned int no_lps_possible,
std::vector<float> &res) {
unsigned int tot_lp = list_obj_funs.size1();
std::cout << "Total LPs " << tot_lp << std::endl;
//unsigned int lp_block_size = 28672; //4004; //183500; //input how many LPs you want to solve at a time ??????
//unsigned int lp_block_size = tot_lp; //4004; //183500; //input how many LPs you want to solve at a time ??????
unsigned int lp_block_size = no_lps_possible;
unsigned int number_of_blocks;
bool equalBlockSize = true;
if (tot_lp % lp_block_size == 0) {
number_of_blocks = tot_lp / lp_block_size;
equalBlockSize = true;
} else {
number_of_blocks = (tot_lp / lp_block_size) + 1; //This Last Block with LESS LPs must be taken care
equalBlockSize = false;
}
std::cout << "Total Blocks " << number_of_blocks << std::endl;
std::list<block_lp> bulk_lps; //list of sub-division of LPs
struct block_lp myLPList;
myLPList.block_obj_coeff.resize(lp_block_size, list_obj_funs.size2());
if (equalBlockSize == true) { //equal block size :: All blocks are of equal size
//Equal-Block-Size so making it fit for OMP-Parallel
for (int i = 0; i < number_of_blocks; i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now
//Did not get to test this parallelizing
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++; //now index is just the number of partitions 2 or 3 or 4 may be
}
} //end of all LPs
//std::cout << "\nEqual Block Size\n";
} //end of equal-block-size
if (equalBlockSize == false) { //unequal block size :: Last block has less LPs so solving separately
//std::cout << "\nUnEqual Block Size!!!!\n";
int count = 0;
//Equal-Block-Size so making it fit for OMP-Parallel for 1st part
for (int i = 0; i < (number_of_blocks - 1); i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now for all equal-block-size expect the last block
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) { //will have one less block(i.e., Skips the last block)
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++;
}
} //end of all LPs
//Taking care about the Last Block with LESS number of LPs which will be SKIPPED in the above if-statement
//Reading the remaining LAST BLOCK of LPs which was SKIPPED in the above if-statement
struct block_lp myLPList2;
int last_block_size = tot_lp - (tot_lp / lp_block_size) * lp_block_size;
myLPList2.block_obj_coeff.resize(last_block_size,
list_obj_funs.size2());
unsigned int index = 0;
unsigned int lp_number;
lp_number = (number_of_blocks - 1) * lp_block_size; //starting of Last Block
#pragma omp parallel for
for (int lp_left = lp_number; lp_left < tot_lp; lp_left++) {
index = lp_left - lp_number; //index starts from 0 to last LP
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
myLPList2.block_obj_coeff(index, i) = list_obj_funs(lp_left, i);
}
}
bulk_lps.push_back(myLPList2); //pushing the Last block
} //end of unequal_block_size
std::list<block_lp_result> bulk_result;
struct block_lp_result eachBlock;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
unsigned int each_bulk_size = (*it).block_obj_coeff.size1();
//std::cout << "each_bulk_size = " << each_bulk_size << std::endl;
eachBlock.results.resize(each_bulk_size);
Simplex lp_problem(each_bulk_size); //GPU computation
lp_problem.setConstratint(constraint_matrix, boundValue);
lp_problem.ComputeLP((*it).block_obj_coeff, number_of_streams); //actual GPU computation
eachBlock.results = lp_problem.getResultAll();
bulk_result.push_back(eachBlock);
// std::cout<<"Result Computed\n";
}
res.resize(tot_lp);
unsigned int index_res = 0;
for (std::list<block_lp_result>::iterator it = bulk_result.begin();
it != bulk_result.end(); it++) {
unsigned int block_result_size = (*it).results.size();
for (unsigned int i = 0; i < block_result_size; i++) {
res[index_res] = (*it).results[i];
index_res++;
}
}
//std::cout << "Result size = " << res.size() << std::endl;
}
void bulk_Solver_With_UnitBall(int UnitBall,
math::matrix<double> constraint_matrix, std::vector<double> boundValue,
math::matrix<float> list_obj_funs, unsigned int number_of_streams,
unsigned int no_lps_possible, std::vector<float> &result_X,
std::vector<float> &result_UnitBall) {
unsigned int tot_lp = list_obj_funs.size1();
std::cout << "Total LPs " << tot_lp << std::endl;
//unsigned int lp_block_size = 28672; //4004; //183500; //input how many LPs you want to solve at a time ??????
//unsigned int lp_block_size = tot_lp; //4004; //183500; //input how many LPs you want to solve at a time ??????
unsigned int lp_block_size = no_lps_possible;
unsigned int number_of_blocks;
bool equalBlockSize = true;
if (tot_lp % lp_block_size == 0) {
number_of_blocks = tot_lp / lp_block_size;
equalBlockSize = true;
} else {
number_of_blocks = (tot_lp / lp_block_size) + 1; //This Last Block with LESS LPs must be taken care
equalBlockSize = false;
}
std::cout << "Total Blocks " << number_of_blocks << std::endl;
std::list<block_lp> bulk_lps; //list of sub-division of LPs
struct block_lp myLPList;
myLPList.block_obj_coeff.resize(lp_block_size, list_obj_funs.size2());
if (equalBlockSize == true) { //equal block size :: All blocks are of equal size
//Equal-Block-Size so making it fit for OMP-Parallel
for (int i = 0; i < number_of_blocks; i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now
//Did not get to test this parallelizing
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++; //now index is just the number of partitions 2 or 3 or 4 may be
}
} //end of all LPs
//std::cout << "\nEqual Block Size\n";
} //end of equal-block-size
if (equalBlockSize == false) { //unequal block size :: Last block has less LPs so solving separately
//std::cout << "\nUnEqual Block Size!!!!\n";
int count = 0;
//Equal-Block-Size so making it fit for OMP-Parallel for 1st part
for (int i = 0; i < (number_of_blocks - 1); i++) {
bulk_lps.push_back(myLPList);
} //iterator is ready now for all equal-block-size expect the last block
#pragma omp parallel for
for (unsigned int lp_number = 0; lp_number < lp_block_size;
lp_number++) {
int index = 0;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) { //will have one less block(i.e., Skips the last block)
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
(*it).block_obj_coeff(lp_number, i) = list_obj_funs(
lp_number + index * lp_block_size, i);
}
index++;
}
} //end of all LPs
//Taking care about the Last Block with LESS number of LPs which will be SKIPPED in the above if-statement
//Reading the remaining LAST BLOCK of LPs which was SKIPPED in the above if-statement
struct block_lp myLPList2;
int last_block_size = tot_lp - (tot_lp / lp_block_size) * lp_block_size;
myLPList2.block_obj_coeff.resize(last_block_size,
list_obj_funs.size2());
unsigned int index = 0;
unsigned int lp_number;
lp_number = (number_of_blocks - 1) * lp_block_size; //starting of Last Block
#pragma omp parallel for
for (int lp_left = lp_number; lp_left < tot_lp; lp_left++) {
index = lp_left - lp_number; //index starts from 0 to last LP
for (unsigned int i = 0; i < list_obj_funs.size2(); i++) {
myLPList2.block_obj_coeff(index, i) = list_obj_funs(lp_left, i);
}
}
bulk_lps.push_back(myLPList2); //pushing the Last block
} //end of unequal_block_size
std::list<block_lp_result> bulk_result;
struct block_lp_result eachBlock;
for (std::list<block_lp>::iterator it = bulk_lps.begin();
it != bulk_lps.end(); it++) {
unsigned int each_bulk_size = (*it).block_obj_coeff.size1();
//std::cout << "each_bulk_size = " << each_bulk_size << std::endl;
eachBlock.results.resize(each_bulk_size);
Simplex Solver(UnitBall, each_bulk_size); //GPU computation
Solver.setConstratint(constraint_matrix, boundValue, UnitBall);
Solver.ComputeLP((*it).block_obj_coeff, UnitBall, number_of_streams); //actual GPU computation
Solver.getResult_X(eachBlock.results);
Solver.getResult_UnitBall(eachBlock.results_UnitBall);
bulk_result.push_back(eachBlock);
// std::cout<<"Result Computed\n";
}
//std::vector<float> res(tot_lp);
result_X.resize(tot_lp);
result_UnitBall.resize(tot_lp);
unsigned int index_res = 0;
for (std::list<block_lp_result>::iterator it = bulk_result.begin();
it != bulk_result.end(); it++) {
unsigned int block_result_size = (*it).results.size();
for (unsigned int i = 0; i < block_result_size; i++) {
//res[index_res] = (*it).results[i];
result_X[index_res] = (*it).results[i];
result_UnitBall[index_res] = (*it).results_UnitBall[i];
index_res++;
}
}
}
/*
* After optimising the duplicate Support Function computation
*/
void reachabilitySequential_GPU(unsigned int boundedTotIteration, Dynamics& SystemDynamics,
supportFunctionProvider::ptr Initial,
ReachabilityParameters& ReachParameters, polytope::ptr invariant,
bool isInvariantExist, int lp_solver_type_choosen,
unsigned int number_of_streams, int Solver_GLPK_Gurobi_GPU,
template_polyhedra::ptr & reachableRegion) {
//template_polyhedra::ptr reachRegion;
typedef typename boost::numeric::ublas::matrix<double>::size_type size_type;
unsigned int NewTotalIteration = ReachParameters.Iterations;
bool U_empty = false;
int num_inv = invariant->getColumnVector().size(); //number of Invariant's constriants
std::vector<double> inv_bounds;
inv_bounds = invariant->getColumnVector();
math::matrix<double> inv_directions;
inv_directions = invariant->getCoeffMatrix();
// cout<<"Inv_directions are :: "<<inv_directions<<std::endl;
// cout<<"inv_bounds are :: ";
// for (int i=0;i<num_inv;i++){
// cout<<inv_bounds[i]<<"\t";
// }
// std::cout << "num_inv = "<<num_inv<<"\n";
if (isInvariantExist == true) { //if invariant exist. Computing
std::cout << "Yes Invariant Exist!!!";
NewTotalIteration = boundedTotIteration;
std::cout << "NewTotalIteration = " << NewTotalIteration << std::endl;
} //End of Invariant Directions
if (NewTotalIteration <= 1) {
template_polyhedra::ptr poly_empty;
//return poly_empty; //NO need to proceed Algorithm further
reachableRegion = poly_empty;
}
//reachableRegion = template_polyhedra::ptr(new template_polyhedra());
//cout<<"reachableRegion->getTotalIterations() 1 = " <<reachableRegion->getTotalIterations()<<std::endl;
if (SystemDynamics.U->getIsEmpty()) { //polytope U can be empty set
U_empty = true;
//std::cout<<"U is Empty!!!!";
}
int Solver = Solver_GLPK_Gurobi_GPU; //1 for CPU solver(GLPK); //2 for CPU solver(Gurobi); //3 for GPU solver(Gimplex)
// ************* Generation of Directions Begins ***************
//std::vector<AllDirection> Direction_List;
unsigned int numVectors = ReachParameters.Directions.size1();
unsigned int totalDirList1 = numVectors * (NewTotalIteration + 1); //1 extra for loop1
math::matrix<float> List_for_X0(totalDirList1,
ReachParameters.Directions.size2());
unsigned int totalDirList2 = numVectors * NewTotalIteration; //'n' dirs for each 'n' loops
math::matrix<float> List_for_U(totalDirList2,
ReachParameters.Directions.size2());
/*std::cout << "\nNumber of Directions/LPs for X0 = " << totalDirList1;
if (!U_empty) {
std::cout << "\nNumber of Directions/LPs for U = " << totalDirList2;
}*/
std::list<std::vector<double> > List_X0;
std::list<std::vector<double> > List_U;
//cout<<"reachableRegion->getTotalIterations() 2 = " <<reachableRegion->getTotalIterations()<<std::endl;
boost::timer::cpu_timer DirectionsGenerate_time;
DirectionsGenerate_time.start();
if (Solver == 3) {
//for OMP --cuda not supporting OMP-- so added library "lgomp"; build-stage -Xcompiler -fopenmp
int numCoresAvail = omp_get_num_procs(); //get the number of cores
getDirectionList_X0_and_U(numCoresAvail, ReachParameters, NewTotalIteration,
List_for_X0, List_for_U, U_empty, SystemDynamics); //Optimized into a single function the 2 Tasks
} else {
//Only for profiling GLPK solver Time for comparison with boundary value implementation
getDirectionList_X0_and_U_OnlyForGLPK(ReachParameters,
NewTotalIteration, List_X0, List_U, U_empty, SystemDynamics); //Optimized into a single function the 2 Tasks
}
DirectionsGenerate_time.stop();
double wall_clock1;
wall_clock1 = DirectionsGenerate_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
double return_Time1 = wall_clock1 / (double) 1000;
std::cout
<< "\nDirections Generation(parallel): Boost Time taken:Wall (in Seconds) = "
<< return_Time1 << std::endl;
// ************* Generation of Directions Ends ***************
int dimension = Initial->getSystemDimension();
int Min_Or_Max = 2;
size_type row = numVectors, col = NewTotalIteration;
math::matrix<double> MatrixValue(row, col);
std::vector<float> supp_func_X0, supp_func_U, supp_func_UnitBall,
result_dotProduct;
//cout<<"reachableRegion->getTotalIterations() 3 = " <<reachableRegion->getTotalIterations()<<std::endl;
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
double MemorySize = props.totalGlobalMem / 1024; //converting into KiloBytes
// std::cout << "\nMemorySize = " << MemorySize;
// std::cout << "Each LP Size (KiloBytes) = " << eachLP_Size << std::endl;
unsigned int no_lps_possible;
//cout<<"reachableRegion->getTotalIterations() 3b = " <<reachableRegion->getTotalIterations()<<std::endl;
if (Solver == 3) { // ************ GRAPHIC PROCESSING UNIT Computations ********
//result obtained from GPU interface // OMP can also be tried and compared
bool IsBoundedLPSolver = true; //Remember to make this false when testing General Gimplex
double eachLP_Size;
if (IsBoundedLPSolver) {
/*
* ToDo::recompute the LP size based on NEW Implementataion of Single Kernel
* For bounded LP Solver:: each lp size is just objective function size * number of LPs to be solved
* and a constant factor:: size of the bound vectors
*/
eachLP_Size = ReachParameters.Directions.size2() * sizeof(float);
eachLP_Size = eachLP_Size / (double) 1024; //converted into KiloBytes
unsigned int boundValueSize =
ReachParameters.X0->getColumnVector().size()
* sizeof(float);
no_lps_possible = (MemorySize - boundValueSize) / eachLP_Size; //Taking less by integer_casting NO PROBLEM
std::cout << "Number of LP per bulk Possible is " << no_lps_possible
<< std::endl;
// cout<<"reachableRegion->getTotalIterations() 3c = " <<reachableRegion->getTotalIterations()<<std::endl;
} else {
/*
* lp_block_size can be computed from the Constraint_matrix Model
MaxSize per LP = row * (col + row + row_artificial + 4) * sizeof(float)
*/
eachLP_Size = (ReachParameters.X0->getCoeffMatrix().size1() + 1)
* (2 * ReachParameters.X0->getCoeffMatrix().size2()
+ ReachParameters.X0->getCoeffMatrix().size1() + 4)
* sizeof(float);
eachLP_Size = eachLP_Size / (double) 1024; //converted into KiloBytes
no_lps_possible = MemorySize / eachLP_Size; //Taking less by integer_casting NO PROBLEM
std::cout << "Number of LP per bulk Possible = " << no_lps_possible
<< std::endl;
}
std::cout << "totalDirList1 LP = " << totalDirList1 << std::endl;
bool single_bulk = true; //false -- if multiple bulk is required to be processed/solved
if (totalDirList1 > no_lps_possible) { //totalDirList1 is the Maximum
single_bulk = false;
}
boost::timer::cpu_timer onlyGimplex_time;
onlyGimplex_time.start();
if (single_bulk) { //If single bulk is solved this code-section takes less time than else-part
if (IsBoundedLPSolver) {
//:: Solve in a single call for both X0 and supFunUnitBall to same multiple memory transfer and separately for U
if (!U_empty) { //polytope U can be empty set
std::cout << "totalDirList2 LP = " << totalDirList2
<< std::endl;
Simplex simplex_for_U(totalDirList2);
simplex_for_U.setConstratint(
SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector());
simplex_for_U.ComputeLP(List_for_U, number_of_streams);
supp_func_U = simplex_for_U.getResultAll();
} //working
//compute only X0 and supp_unitBall in one kernel
int UnitBall = 1; //just to have different signature for the overloaded functions of class Simplex
Simplex solver(UnitBall, totalDirList1);
solver.setConstratint(ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector(), UnitBall);
std::cout << "New imple with dotProduction:: Started\n";
// cout<<"reachableRegion->getTotalIterations() 3d = " <<reachableRegion->getTotalIterations()<<std::endl;
//solver.ComputeLP(List_for_X0, UnitBall, number_of_streams); OLD method
solver.ComputeLP(List_for_X0, UnitBall, number_of_streams,
SystemDynamics.C);
//todo:: some memory issue exist here
// cout<<"reachableRegion->getTotalIterations() 3e = " <<reachableRegion->getTotalIterations()<<std::endl;
solver.getResult_X(supp_func_X0); //return the result as argument
solver.getResult_UnitBall(supp_func_UnitBall);
//std::cout<<"supp_func_UnitBall.size = "<<supp_func_UnitBall.size();
solver.getResult_dotProduct(result_dotProduct);
//std::cout<<"result_dotProduct.size = "<<result_dotProduct.size();
// std::cout<<"result_dotProduct Values :: ";
// for (int i=0;i<result_dotProduct.size();i++){
// cout<<result_dotProduct[i]<<"\t";
// }
std::cout << "New imple with dotProduction:: Ended\n";
} else { //OLD implementation with single call of kernel for X0 and U
//TODO::But now missing supp_fun_UnitBall_infinity_norm
Simplex simplex_for_X0(totalDirList1), simplex_for_U(
totalDirList2);
simplex_for_X0.setConstratint(
ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector());
if (!U_empty) { //polytope U can be empty set
simplex_for_U.setConstratint(
SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector());
} //else { U_empty = true;}
simplex_for_X0.ComputeLP(List_for_X0, number_of_streams);
supp_func_X0 = simplex_for_X0.getResultAll();
if (!U_empty) {
simplex_for_U.ComputeLP(List_for_U, number_of_streams);
supp_func_U = simplex_for_U.getResultAll();
}
}
// cout<<"reachableRegion->getTotalIterations() 4 = " <<reachableRegion->getTotalIterations()<<std::endl;
std::cout << "Single Bulk";
} else { //IF SOLVING BY DIVISION IS REQUIRED
if (IsBoundedLPSolver) {
//:: Solve in a single call for both X0 and supFunUnitBall to same multiple memory transfer and separately for U
int UnitBall = 1; //just to have different signature for the overloaded functions of class Simplex
bulk_Solver_With_UnitBall(UnitBall,
ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector(), List_for_X0,
number_of_streams, no_lps_possible, supp_func_X0,
supp_func_UnitBall); //ONLY UnitBall result will extra
if (!U_empty) { //polytope U can be empty set //NO CHANGE REQUIRED HERE IN BULK SOLVER
bulk_Solver(SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector(), List_for_U,
number_of_streams, no_lps_possible, supp_func_U);
}
} else {
//OLD implementation with single call of kernel for X0 and U
//TODO::But now missing supp_fun_UnitBall_infinity_norm
}
}
onlyGimplex_time.stop();
double wall_clock, user_clock, system_clock;
wall_clock = onlyGimplex_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
//user_clock = onlyGimplex_time.elapsed().user / 1000000;
//system_clock = onlyGimplex_time.elapsed().system / 1000000;
double return_Time = wall_clock / (double) 1000;
std::cout
<< "\nGPU-simplex Solver: Boost Time taken:Wall (in Seconds) = "
<< return_Time << std::endl;
//std::cout << "\nGPU-simplex Boost Time taken:User (in Seconds) = " << user_clock / (double) 1000 << std::endl;
//std::cout << "\nGPU-simplex Boost Time taken:System (in Seconds) = " << system_clock / (double) 1000 << std::endl;
// *********************** GPU computation Over *********************
}
// cout<<"reachableRegion->getTotalIterations() 5 = " <<reachableRegion->getTotalIterations()<<std::endl;
if (Solver >= 1 && Solver < 3) { // ************ CPU Solver ****************
//Todo::Similarly i have to implement Solver for UnitBall_infinity_norm if i have to use Final loop for reachAlgorithm
boost::timer::cpu_timer onlyGimplex_time;
onlyGimplex_time.start();
bulk_lp_solver simplex_for_X0(Solver), simplex_for_U(Solver); //Solver = 1 for GLPK; = 2 for Gurobi
bool U_empty = false;
simplex_for_X0.setMaxMin(2); //2 for Maximum
simplex_for_X0.setConstratint(ReachParameters.X0->getCoeffMatrix(),
ReachParameters.X0->getColumnVector());
if (!SystemDynamics.U->getIsEmpty()) { //polytope U can be empty set
simplex_for_U.setMaxMin(2); //2 for Maximum
simplex_for_U.setConstratint(SystemDynamics.U->getCoeffMatrix(),
SystemDynamics.U->getColumnVector());
} else {
U_empty = true;
}
//simplex_for_X0.ComputeLP(List_for_X0);
simplex_for_X0.ComputeLP_ListVector(List_X0); //only for GLPK comparison
supp_func_X0 = simplex_for_X0.getResultAll();
if (!U_empty) {
//simplex_for_U.ComputeLP(List_for_U);
simplex_for_U.ComputeLP_ListVector(List_U); //only for GLPK comparison
supp_func_U = simplex_for_U.getResultAll();
}
onlyGimplex_time.stop();
double wall_clock, user_clock, system_clock;
wall_clock = onlyGimplex_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
double return_Time = wall_clock / (double) 1000;
std::cout
<< "\nCPU(GLPK/Gurobi) Solver: Boost Time taken:Wall (in Seconds) = "
<< return_Time << std::endl;
} // ************ CPU Solver Over ****************
//unsigned int index = 0, index_X0 = 0, index_U = 0;//indices upto totalDirList1 and totalDirList2
//cout<<"reachableRegion->getTotalIterations() 6 = " <<reachableRegion->getTotalIterations()<<std::endl;
std::cout << "\n Before Final Reach Algorithm ";
std::cout << std::endl;
//Breaking here for TESTING/Reading LP_Solver
// return template_polyhedra(MatrixValue, ReachParameters.Directions);
boost::timer::cpu_timer reachLoop_time;
reachLoop_time.start();
#pragma omp parallel for
for (unsigned int eachDirection = 0; eachDirection < numVectors;
eachDirection++) {
unsigned int index_X0, index_U; //making the index suitable for parallelizing
//unsigned int index; //index = eachDirection * NewTotalIteration;
//here i have a list of result of Supp_fun_Of_UnitBall_infinity_norm
if (eachDirection == 0) { //only starting loop begins with 0
index_X0 = eachDirection * NewTotalIteration;
} else { //
index_X0 = eachDirection * NewTotalIteration + eachDirection; //only X0(list_X0) has 2 directions for first-iteration
}
if (!U_empty) {
index_U = eachDirection * NewTotalIteration;
}
double res1;
double term1, term2, term3, term3a, term3b, res2, term3c = 0.0;
double zIInitial = 0.0, zI = 0.0, zV = 0.0;
double sVariable = 0.0, s1Variable; //initialize s0
std::vector<double> rVariable(dimension), r1Variable(dimension);
unsigned int loopIteration = 0;
// std::cout<<"Testing 1\n";
// ************** Omega Function ********************
res1 = supp_func_X0[index_X0]; //X0->SF(direction) // 0
// std::cout<<"Testing 2\n";
//term3b = support_unitball_infnorm(Direction_List[index].direction);
term3b = (double) supp_func_UnitBall[index_X0]; // needed 0
if (!SystemDynamics.isEmptyC) {
term3c = ReachParameters.time_step * result_dotProduct[index_X0];
}
// std::cout<<"Testing 3\n";
index_X0++; // made 1
term1 = supp_func_X0[index_X0]; //X0->SF(phi_trans_dir) // 1
// std::cout<<"Testing 4\n";
index_X0++; // made 2
if (!U_empty) {
term2 = ReachParameters.time_step * supp_func_U[index_U]; //U->SF(Btrans_dir)
index_U++;
} else
term2 = 0;
term3a = ReachParameters.result_alfa; //compute_alfa(ReachParameters.time_step,system_dynamics,Initial_X0);
term3 = term3a * term3b;
res2 = term1 + term2 + term3 + term3c; //term3c Added
//zIInitial = (res1 > res2 ? res1:res2);
if (res1 > res2)
zIInitial = res1;
else
zIInitial = res2;
// ************** Omega Function ********************
MatrixValue(eachDirection, loopIteration) = zIInitial; // index++;
loopIteration++;
for (; loopIteration < NewTotalIteration;) { //Now stopping condition is only "shm_NewTotalIteration"
// ************** W_Support Function ********************
double result, res_sup;
if (!U_empty) {
res1 = ReachParameters.time_step * supp_func_U[index_U - 1]; //replace previous value
//index_U++;
} else {
res1 = 0;
}
double beta = ReachParameters.result_beta;
//res_sup = (double) support_unitball_infnorm(Direction_List[index].direction);
//res_sup = (double) supp_func_UnitBall[d_index]; d_index++; //Should replace from previous computation
//res_sup = term3b; //replaced from previous steps
/*if (loopIteration == 1) // needed 0 again here
res_sup = supp_func_UnitBall[index_X0 - 2]; //Should replace from previous computation*/
//double res_beta = beta * res_sup;
double res_beta = beta * term3b;
result = res1 + res_beta + term3c; //Added term3c
zV = result;
// ************** W_Support Function ********************
s1Variable = sVariable + zV;
// ************** Omega Function ********************
//double res1;
res1 = supp_func_X0[index_X0 - 1]; ////replace previous value.... X0->SF(direction) // (2 -1)=1
double term1, term2, term3, term3a, res2;
term1 = supp_func_X0[index_X0]; //X0->SF(phi_trans_dir) // 2
index_X0++; // made 3
if (!U_empty) {
term2 = ReachParameters.time_step * supp_func_U[index_U]; //U->SF(Btrans_dir)
index_U++;
} else {
term2 = 0;
}
term3a = ReachParameters.result_alfa; //compute_alfa(ReachParameters.time_step,system_dynamics,Initial_X0);
//term3b = support_unitball_infnorm(Direction_List[index - 1].Phi_trans_dir);
//term3b = support_unitball_infnorm(Direction_List[index].direction1);
if (loopIteration == 1) {
term3b = (double) supp_func_UnitBall[index_X0 - 2]; //Compute here //needed 1
if (!SystemDynamics.isEmptyC) {
term3c = ReachParameters.time_step
* result_dotProduct[index_X0 - 2];
}
} else {
term3b = (double) supp_func_UnitBall[index_X0 - 1];
if (!SystemDynamics.isEmptyC) {
term3c = ReachParameters.time_step
* result_dotProduct[index_X0 - 1];
}
}
term3 = term3a * term3b;
res2 = term1 + term2 + term3 + term3c;
//zIInitial = (res1 > res2 ? res1:res2);
if (res1 > res2)
zI = res1;
else
zI = res2;
// ************** Omega Function ********************
double TempOmega;
TempOmega = zI + s1Variable; //Y1
MatrixValue(eachDirection, loopIteration) = TempOmega; //Y1
sVariable = s1Variable; // index++;
loopIteration++; //for the next Omega-iteration or Time-bound
} //end of all Iterations of each vector/direction
} //end of for each vector/directions
//cout<<"reachableRegion->getTotalIterations() = 6" <<reachableRegion->getTotalIterations()<<std::endl;
reachLoop_time.stop();
double wall_clock;
wall_clock = reachLoop_time.elapsed().wall / 1000000; //convert nanoseconds to milliseconds
double reach_Time = wall_clock / (double) 1000;
std::cout << "\nFinal Reach Loop Time:Wall (in Seconds) = " << reach_Time
<< std::endl;
/** Appending invariant directions and invariant constraints/bounds(alfa)
** Goal : To truncate the reachable region within the Invariant region */
//int num_inv = invariant->getColumnVector().size(); //number of Invariant's constriants
//std::cout<<"working 2ab"<<std::endl;
if (isInvariantExist == true) { //if invariant exist. Computing
//std::cout<<"Test inside 1a\n";
math::matrix<double> inv_sfm;
// std::cout<<"num_inv = "<<num_inv <<"\n";
// std::cout<<"working"<<std::endl;
inv_sfm.resize(num_inv, NewTotalIteration);
for (int eachInvDirection = 0; eachInvDirection < num_inv;
eachInvDirection++) {
//std::cout<<"working"<<std::endl;
for (unsigned int i = 0; i < NewTotalIteration; i++) {
//inv_sfm(eachInvDirection, i) = invariant->getColumnVector()[eachInvDirection];
inv_sfm(eachInvDirection, i) = inv_bounds[eachInvDirection];
}
}
// std::cout<<"MatrixValue is ::"<<MatrixValue<<std::endl;
// std::cout<<"inv_sfm is ::"<<inv_sfm<<std::endl;
// std::cout<<"inv_directions is ::"<<inv_directions<<std::endl;
// std::cout<<"ReachParameters.Directions is ::"<<ReachParameters.Directions<<std::endl;
//return template_polyhedra::ptr(new template_polyhedra(MatrixValue, inv_sfm, ReachParameters.Directions, invariant->getCoeffMatrix()));
// std::cout<<"working a2"<<std::endl;
reachableRegion = template_polyhedra::ptr(new template_polyhedra());
// std::cout<<"reachRegion size = "<<reachableRegion->getTotalIterations()<<std::endl;
// std::cout<<"working 2b"<<std::endl;
reachableRegion->setTemplateDirections(ReachParameters.Directions);
//std::cout<<"working 2b"<<std::endl;
reachableRegion->setMatrix_InvariantBound(inv_sfm);
//std::cout<<"working 3"<<std::endl;
reachableRegion->setInvariantDirections(inv_directions);
//std::cout<<"working 4"<<std::endl;
reachableRegion->setMatrixSupportFunction(MatrixValue);
//return template_polyhedra::ptr( new template_polyhedra(MatrixValue, inv_sfm, ReachParameters.Directions, inv_directions));
} else {
reachableRegion = template_polyhedra::ptr(new template_polyhedra());
reachableRegion->setMatrixSupportFunction(MatrixValue);
reachableRegion->setTemplateDirections(ReachParameters.Directions);
// return template_polyhedra::ptr( new template_polyhedra(MatrixValue, ReachParameters.Directions));
}
//std::cout<<"working 5"<<std::endl;
//return reachRegion;
}
|
ee34ce5a91a677e5bc9bc6dbb39d56bb263f67a5.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <rocblas.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <helpers/logger.h>
#include "../cublasHelper.h"
#include "config.h"
#ifdef HAVE_CUDNN
#include <cudnn.h>
#endif
namespace sd {
std::mutex CublasHelper::_mutex;
static void* handle_() {
auto _handle = new hipblasHandle_t();
auto status = hipblasCreate(_handle); // initialize CUBLAS context
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle creation failed !", status);
return reinterpret_cast<void*>(_handle);
}
static void* solver_() {
auto cusolverH = new hipsolverDnHandle_t();
auto status = hipsolverDnCreate(cusolverH);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("cuSolver handle creation failed !", status);
return cusolverH;
}
static void* cudnn_() {
#ifdef HAVE_CUDNN
auto cudnnH = new cudnnHandle_t();
auto status = cudnnCreate(cudnnH);
if (status != CUDNN_STATUS_SUCCESS) throw cuda_exception::build("cuDNN handle creation failed !", status);
return cudnnH;
#endif
return nullptr;
}
static void destroyHandle_(void* handle) {
auto ch = reinterpret_cast<hipblasHandle_t*>(handle);
auto status = hipblasDestroy(*ch);
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle destruction failed !", status);
delete ch;
}
CublasHelper::CublasHelper() {
auto numDevices = AffinityManager::numberOfDevices();
auto currentDevice = AffinityManager::currentDeviceId();
_cache.resize(numDevices);
_solvers.resize(numDevices);
_cudnn.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
AffinityManager::setCurrentNativeDevice(e);
_cache[e] = handle_();
_solvers[e] = solver_();
_cudnn[e] = cudnn_();
}
// don't forget to restore back original device
AffinityManager::setCurrentNativeDevice(currentDevice);
}
CublasHelper::~CublasHelper() {
auto numDevices = AffinityManager::numberOfDevices();
for (int e = 0; e < numDevices; e++) destroyHandle_(_cache[e]);
}
CublasHelper& CublasHelper::getInstance() {
static CublasHelper instance;
return instance;
}
void* CublasHelper::cudnn() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _cudnn.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cudnn[deviceId];
}
void* CublasHelper::handle() {
auto deviceId = AffinityManager::currentDeviceId();
return handle(deviceId);
}
void* CublasHelper::solver() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _solvers.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _solvers[deviceId];
}
void* CublasHelper::handle(int deviceId) {
if (deviceId < 0 || deviceId > _cache.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cache[deviceId];
}
} // namespace sd
| ee34ce5a91a677e5bc9bc6dbb39d56bb263f67a5.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <cublas_v2.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <helpers/logger.h>
#include "../cublasHelper.h"
#include "config.h"
#ifdef HAVE_CUDNN
#include <cudnn.h>
#endif
namespace sd {
std::mutex CublasHelper::_mutex;
static void* handle_() {
auto _handle = new cublasHandle_t();
auto status = cublasCreate_v2(_handle); // initialize CUBLAS context
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle creation failed !", status);
return reinterpret_cast<void*>(_handle);
}
static void* solver_() {
auto cusolverH = new cusolverDnHandle_t();
auto status = cusolverDnCreate(cusolverH);
if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("cuSolver handle creation failed !", status);
return cusolverH;
}
static void* cudnn_() {
#ifdef HAVE_CUDNN
auto cudnnH = new cudnnHandle_t();
auto status = cudnnCreate(cudnnH);
if (status != CUDNN_STATUS_SUCCESS) throw cuda_exception::build("cuDNN handle creation failed !", status);
return cudnnH;
#endif
return nullptr;
}
static void destroyHandle_(void* handle) {
auto ch = reinterpret_cast<cublasHandle_t*>(handle);
auto status = cublasDestroy_v2(*ch);
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle destruction failed !", status);
delete ch;
}
CublasHelper::CublasHelper() {
auto numDevices = AffinityManager::numberOfDevices();
auto currentDevice = AffinityManager::currentDeviceId();
_cache.resize(numDevices);
_solvers.resize(numDevices);
_cudnn.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
AffinityManager::setCurrentNativeDevice(e);
_cache[e] = handle_();
_solvers[e] = solver_();
_cudnn[e] = cudnn_();
}
// don't forget to restore back original device
AffinityManager::setCurrentNativeDevice(currentDevice);
}
CublasHelper::~CublasHelper() {
auto numDevices = AffinityManager::numberOfDevices();
for (int e = 0; e < numDevices; e++) destroyHandle_(_cache[e]);
}
CublasHelper& CublasHelper::getInstance() {
static CublasHelper instance;
return instance;
}
void* CublasHelper::cudnn() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _cudnn.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cudnn[deviceId];
}
void* CublasHelper::handle() {
auto deviceId = AffinityManager::currentDeviceId();
return handle(deviceId);
}
void* CublasHelper::solver() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _solvers.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _solvers[deviceId];
}
void* CublasHelper::handle(int deviceId) {
if (deviceId < 0 || deviceId > _cache.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cache[deviceId];
}
} // namespace sd
|
5ed0edede1f878a41176be20b6db7be20e1b1997.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void CalcOutPut(const T* in_data,
const size_t* in_lod,
const size_t lod_len,
const int64_t win_size,
const int64_t pad_value,
T* out_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_lod[lod_len - 1]) {
int end_idx = 0;
// Get LoD interval of index
for (int i = 1; i < lod_len; ++i) {
if (index < in_lod[i]) {
end_idx = in_lod[i];
break;
}
}
for (size_t i = 0; i < win_size; ++i) {
int word_pos = index + i;
out_data[index * win_size + i] =
word_pos < end_idx ? in_data[word_pos] : pad_value;
}
}
}
template <typename T, typename DeviceContext>
class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<phi::DenseTensor>("X");
auto* out = context.Output<phi::DenseTensor>("Out");
int win_size = context.Attr<int>("win_size");
int pad_value = context.Attr<int>("pad_value");
auto in_dims = in->dims();
auto in_lod = in->lod();
PADDLE_ENFORCE_EQ(
static_cast<uint64_t>(in_dims[0]),
in_lod[0].back(),
platform::errors::InvalidArgument(
"The actual input data's size mismatched with LoD information."
"Received input data size is %d (actual) vs %d (loD information).",
static_cast<uint64_t>(in_dims[0]),
in_lod[0].back()));
/* Generate enumerate sequence set */
auto stream = context.cuda_device_context().stream();
auto lod0 = in_lod[0];
auto in_len = in->numel();
auto in_data = in->data<T>();
out->Resize({in_dims[0], win_size});
auto out_data = out->mutable_data<T>(context.GetPlace());
// Copy LoD to GPU
phi::MixVector<size_t> mixv_lod0(&lod0);
const size_t* dev_in_lod_ptr = mixv_lod0.CUDAData(context.GetPlace());
// Calc output tensor
hipLaunchKernelGGL(( CalcOutPut), dim3((in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream,
in_data, dev_in_lod_ptr, lod0.size(), win_size, pad_value, out_data);
out->set_lod(in->lod());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(sequence_enumerate,
GPU,
ALL_LAYOUT,
ops::SequenceEnumerateOpCUDAKernel,
int32_t,
int64_t) {}
| 5ed0edede1f878a41176be20b6db7be20e1b1997.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using phi::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void CalcOutPut(const T* in_data,
const size_t* in_lod,
const size_t lod_len,
const int64_t win_size,
const int64_t pad_value,
T* out_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < in_lod[lod_len - 1]) {
int end_idx = 0;
// Get LoD interval of index
for (int i = 1; i < lod_len; ++i) {
if (index < in_lod[i]) {
end_idx = in_lod[i];
break;
}
}
for (size_t i = 0; i < win_size; ++i) {
int word_pos = index + i;
out_data[index * win_size + i] =
word_pos < end_idx ? in_data[word_pos] : pad_value;
}
}
}
template <typename T, typename DeviceContext>
class SequenceEnumerateOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<phi::DenseTensor>("X");
auto* out = context.Output<phi::DenseTensor>("Out");
int win_size = context.Attr<int>("win_size");
int pad_value = context.Attr<int>("pad_value");
auto in_dims = in->dims();
auto in_lod = in->lod();
PADDLE_ENFORCE_EQ(
static_cast<uint64_t>(in_dims[0]),
in_lod[0].back(),
platform::errors::InvalidArgument(
"The actual input data's size mismatched with LoD information."
"Received input data size is %d (actual) vs %d (loD information).",
static_cast<uint64_t>(in_dims[0]),
in_lod[0].back()));
/* Generate enumerate sequence set */
auto stream = context.cuda_device_context().stream();
auto lod0 = in_lod[0];
auto in_len = in->numel();
auto in_data = in->data<T>();
out->Resize({in_dims[0], win_size});
auto out_data = out->mutable_data<T>(context.GetPlace());
// Copy LoD to GPU
phi::MixVector<size_t> mixv_lod0(&lod0);
const size_t* dev_in_lod_ptr = mixv_lod0.CUDAData(context.GetPlace());
// Calc output tensor
CalcOutPut<<<(in_len - 1) / PADDLE_CUDA_NUM_THREADS + 1,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(
in_data, dev_in_lod_ptr, lod0.size(), win_size, pad_value, out_data);
out->set_lod(in->lod());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(sequence_enumerate,
GPU,
ALL_LAYOUT,
ops::SequenceEnumerateOpCUDAKernel,
int32_t,
int64_t) {}
|
610d23f0bf6f3a830da0eeadc42db712c030fe33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file
* Copyright (c) 2011-2019, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_encoder.h"
#include <libgpujpeg/gpujpeg_util.h>
#define WARPS_NUM 8
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_encoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
/**
* Huffman coding tables in constant memory - each has 257 items (256 + 1 extra)
* There are are 4 of them - one after another, in following order:
* - luminance (Y) AC
* - luminance (Y) DC
* - chroma (cb/cr) AC
* - chroma (cb/cr) DC
*/
__device__ uint32_t gpujpeg_huffman_gpu_lut[(256 + 1) * 4];
/**
* Value decomposition in constant memory (input range from -4096 to 4095 ... both inclusive)
* Mapping from coefficient value into the code for the value ind its bit size.
*/
__device__ unsigned int gpujpeg_huffman_value_decomposition[8 * 1024];
/** Allocate huffman tables in constant memory */
__device__ struct gpujpeg_table_huffman_encoder gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_TYPE_COUNT][GPUJPEG_HUFFMAN_TYPE_COUNT];
struct gpujpeg_huffman_gpu_encoder
{
/** Size of occupied part of output buffer */
unsigned int * d_gpujpeg_huffman_output_byte_count;
};
/**
* Initializes coefficient decomposition table in global memory. (CC >= 2.0)
* Output table is a mapping from some value into its code and bit size.
*/
__global__ static void
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel() {
// fetch some value
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int value = tid - 4096;
// decompose it
unsigned int value_code = value;
int absolute = value;
if ( value < 0 ) {
// valu eis now absolute value of input
absolute = -absolute;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
value_code--;
}
// Find the number of bits needed for the magnitude of the coefficient
unsigned int value_nbits = 0;
while ( absolute ) {
value_nbits++;
absolute >>= 1;
}
// save result packed into unsigned int (value bits are left aligned in MSBs and size is right aligned in LSBs)
gpujpeg_huffman_value_decomposition[tid] = value_nbits | (value_code << (32 - value_nbits));
}
#if __CUDA_ARCH__ >= 200
/**
* Adds up to 32 bits at once into ouptut buffer, applying byte stuffing.
* Codeword value must be aligned to left (most significant bits). (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int & remaining_bits, int & byte_count, int & bit_count, uint8_t * const out_ptr, const unsigned int packed_code_word)
{
// decompose packed codeword into the msb-aligned value and bit-length of the value
const unsigned int code_word = packed_code_word & ~31;
const unsigned int code_bit_size = packed_code_word & 31;
// concatenate with remaining bits
remaining_bits |= code_word >> bit_count;
bit_count += code_bit_size;
// flush some bytes if have more than 8 bits
if (bit_count >= 8) {
do {
const unsigned int out_byte = remaining_bits >> 24;
out_ptr[byte_count++] = out_byte;
if(0xff == out_byte) {
// keep zero byte after each 0xFF (buffer is expected to be zeroed)
out_ptr[byte_count++] = 0;
}
remaining_bits <<= 8;
bit_count -= 8;
} while (bit_count >= 8);
// keep only remaining bits in the buffer
remaining_bits = code_word << (code_bit_size - bit_count);
remaining_bits &= 0xfffffffe << (31 - bit_count);
}
}
/**
* Given some huffman table offset, RLE zero count and coefficient value,
* this returns huffman codeword for the value (packed in 27 MSBs)
* together with its bit size (in 5 LSBs). (CC >= 2.0)
*/
__device__ static unsigned int
gpujpeg_huffman_gpu_encode_value(const int preceding_zero_count, const int coefficient,
const int huffman_lut_offset)
{
// value bits are in MSBs (left aligned) and bit size of the value is in LSBs (right aligned)
const unsigned int packed_value = gpujpeg_huffman_value_decomposition[4096 + coefficient];
// decompose value info into upshifted value and value's bit size
const int value_nbits = packed_value & 0xf;
const unsigned int value_code = packed_value & ~0xf;
// find prefix of the codeword and size of the prefix
const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits;
const unsigned int packed_prefix = gpujpeg_huffman_gpu_lut[huffman_lut_idx];
const unsigned int prefix_nbits = packed_prefix & 31;
// compose packed codeword with its size
return (packed_prefix + value_nbits) | (value_code >> prefix_nbits);
}
/**
* Flush remaining codewords from buffer in shared memory to global memory output buffer. (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_flush_codewords(unsigned int * const s_out, unsigned int * &data_compressed, int & remaining_codewords, const int tid) {
// this works for up to 4 * 32 remaining codewords
if(remaining_codewords) {
// pad remaining codewords with extra zero-sized codewords, not to have to use special case in serialization kernel, which saves 4 codewords at once
s_out[remaining_codewords + tid] = 0;
// save all remaining codewords at once (together with some zero sized padding codewords)
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
// update codeword counter
data_compressed += remaining_codewords;
remaining_codewords = 0;
}
}
#ifndef FULL_MASK
#define FULL_MASK 0xffffffffu
#endif
// compat
#if CUDART_VERSION < 9000
#define __ballot_sync(set, pred) __ballot(pred)
#endif
/**
* Encode one 8x8 block (CC >= 2.0)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(const int16_t * block, unsigned int * &data_compressed, unsigned int * const s_out,
int & remaining_codewords, const int last_dc_idx, int tid, const int huffman_lut_offset)
{
// each thread loads a pair of values (pair after zigzag reordering)
const int load_idx = tid * 2;
int in_even = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx]];
const int in_odd = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx + 1]];
// compute preceding zero count for even coefficient (actually compute the count multiplied by 16)
const unsigned int nonzero_mask = (1 << tid) - 1;
const unsigned int nonzero_bitmap_0 = 1 | __ballot_sync(FULL_MASK, in_even); // DC is always treated as nonzero
const unsigned int nonzero_bitmap_1 = __ballot_sync(FULL_MASK, in_odd);
const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1;
const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask);
int zeros_before_even = 2 * (zero_pair_count + tid - 32);
if((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) {
zeros_before_even += 1;
}
// true if any nonzero pixel follows thread's odd pixel
const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask;
// count of consecutive zeros before odd value (either one more than
// even if even is zero or none if even value itself is nonzero)
// (the count is actually multiplied by 16)
int zeros_before_odd = in_even || !tid ? 0 : zeros_before_even + 1;
// clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited)
// otherwise only trim extra bits from the counts of following zeros
const int zero_count_mask = nonzero_follows ? 0xF : 0;
zeros_before_even &= zero_count_mask;
zeros_before_odd &= zero_count_mask;
// pointer to LUT for encoding thread's even value
// (only thread #0 uses DC table, others use AC table)
int even_lut_offset = huffman_lut_offset;
// first thread handles special DC coefficient
if(0 == tid) {
// first thread uses DC part of the table for its even value
even_lut_offset += 256 + 1;
// update last DC coefficient (saved at the special place at the end of the shared bufer)
const int original_in_even = in_even;
in_even -= ((int*)s_out)[last_dc_idx];
((int*)s_out)[last_dc_idx] = original_in_even;
}
// last thread handles special block-termination symbol
if(0 == ((tid ^ 31) | in_odd)) {
// this causes selection of huffman symbol at index 256 (which contains the termination symbol)
zeros_before_odd = 16;
}
// each thread gets codeword for its two pixels
unsigned int even_code = gpujpeg_huffman_gpu_encode_value(zeros_before_even, in_even, even_lut_offset);
unsigned int odd_code = gpujpeg_huffman_gpu_encode_value(zeros_before_odd, in_odd, huffman_lut_offset);
// concatenate both codewords into one if they are short enough
const unsigned int even_code_size = even_code & 31;
const unsigned int odd_code_size = odd_code & 31;
const unsigned int total_size = even_code_size + odd_code_size;
if(total_size <= 27) {
even_code = total_size | ((odd_code & ~31) >> even_code_size) | (even_code & ~31);
odd_code = 0;
}
// each thread get number of preceding nonzero codewords and total number of nonzero codewords in this block
const unsigned int even_codeword_presence = __ballot_sync(FULL_MASK, even_code);
const unsigned int odd_codeword_presence = __ballot_sync(FULL_MASK, odd_code);
const int codeword_offset = __popc(nonzero_mask & even_codeword_presence)
+ __popc(nonzero_mask & odd_codeword_presence);
// each thread saves its values into temporary shared buffer
if(even_code) {
s_out[remaining_codewords + codeword_offset] = even_code;
if(odd_code) {
s_out[remaining_codewords + codeword_offset + 1] = odd_code;
}
}
// advance count of codewords in shared memory buffer
remaining_codewords += __popc(odd_codeword_presence) + __popc(even_codeword_presence);
// flush some codewords to global memory if there are too many of them in shared buffer
const int flush_count = 32 * 4; // = half of the buffer
if(remaining_codewords > flush_count) {
// move first half of the buffer into output buffer in global memory and update output pointer
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
data_compressed += flush_count;
// shift remaining codewords to begin of the buffer and update their count
((uint4*)s_out)[tid] = ((uint4*)s_out)[flush_count / 4 + tid]; // 4 for 4 uints in uint4
remaining_codewords -= flush_count;
}
// nothing to fail here
return 0;
}
#endif // #if __CUDA_ARCH__ >= 200
/**
* Huffman encoder kernel (For compute capability >= 2.0)
*
* @return void
*/
template <bool CONTINUOUS_BLOCK_LIST>
#if __CUDA_ARCH__ >= 200
__launch_bounds__(WARPS_NUM * 32, 1024 / (WARPS_NUM * 32))
#endif
__global__ static void
gpujpeg_huffman_encoder_encode_kernel_warp(
struct gpujpeg_segment* d_segment,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* const d_block_list,
int16_t* const d_data_quantized,
struct gpujpeg_component* const d_component,
const int comp_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
#if __CUDA_ARCH__ >= 200
int warpidx = threadIdx.x >> 5;
int tid = threadIdx.x & 31;
__shared__ uint4 s_out_all[(64 + 1) * WARPS_NUM];
unsigned int * s_out = (unsigned int*)(s_out_all + warpidx * (64 + 1));
// Number of remaining codewords in shared buffer
int remaining_codewords = 0;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_index = block_idx * WARPS_NUM + warpidx;
// first thread initializes compact output size for next kernel
if(0 == tid && 0 == warpidx && 0 == block_idx) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// stop if out of segment bounds
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Initialize last DC coefficients
if(tid < 3) {
s_out[256 + tid] = 0;
}
// Prepare data pointers
unsigned int * data_compressed = (unsigned int*)(d_data_compressed + segment->data_temp_index);
unsigned int * data_compressed_start = data_compressed;
// Pre-add thread ID to output pointer (it's allways used only with it)
data_compressed += (tid * 4);
// Encode all block in segment
if(CONTINUOUS_BLOCK_LIST) {
// Get component for current scan
const struct gpujpeg_component* component = &d_component[segment->scan_index];
// mcu size of the component
const int comp_mcu_size = component->mcu_size;
// Get component data for MCU (first block)
const int16_t* block = component->d_data_quantized + (segment->scan_segment_index * component->segment_mcu_count) * comp_mcu_size;
// Get huffman table offset
const int huffman_table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0 : (256 + 1) * 2; // possibly skips luminance tables
// Encode MCUs in segment
for (int block_count = segment->mcu_count; block_count--;) {
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, 256, tid, huffman_table_offset);
// Advance to next block
block += comp_mcu_size;
}
} else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = 256 + (packed_block_info & 0x7f);
// Get offset to right part of huffman table
const int huffman_table_offset = packed_block_info & 0x80 ? (256 + 1) * 2 : 0; // possibly skips luminance tables
// Source data pointer
int16_t* block = &d_data_quantized[packed_block_info >> 8];
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, last_dc_idx, tid, huffman_table_offset);
}
}
// flush remaining codewords
gpujpeg_huffman_gpu_encoder_flush_codewords(s_out, data_compressed, remaining_codewords, tid);
// Set number of codewords.
if (tid == 0 ) {
segment->data_compressed_size = data_compressed - data_compressed_start;
}
#endif // #if __CUDA_ARCH__ >= 200
}
#define SERIALIZATION_THREADS_PER_TBLOCK 192
/**
* Codeword serialization kernel (CC >= 2.0).
*
* @return void
*/
#if __CUDA_ARCH__ >= 200
__launch_bounds__(SERIALIZATION_THREADS_PER_TBLOCK, 1536 / SERIALIZATION_THREADS_PER_TBLOCK)
#endif
__global__ static void
gpujpeg_huffman_encoder_serialization_kernel(
struct gpujpeg_segment* d_segment,
int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
#if __CUDA_ARCH__ >= 200
// Temp buffer for all threads of the threadblock
__shared__ uint4 s_temp_all[2 * SERIALIZATION_THREADS_PER_TBLOCK];
// Thread's 32 bytes in shared memory for output composition
uint4 * const s_temp = s_temp_all + threadIdx.x * 2;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
int segment_index = block_idx * SERIALIZATION_THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
// Thread's segment
struct gpujpeg_segment* const segment = &d_segment[segment_index];
// Input and output pointers
const int data_offset = segment->data_temp_index;
uint4 * const d_dest_stream_start = (uint4*)(d_dest + data_offset);
uint4 * d_dest_stream = d_dest_stream_start;
const uint4 * d_src_codewords = (uint4*)(d_src + data_offset);
// number of bytes in the temp buffer, remaining bits and their count
int byte_count = 0, bit_count = 0;
unsigned int remaining_bits = 0;
// "data_compressed_size" is now initialized to number of codewords to be serialized
for(int cword_tuple_count = (segment->data_compressed_size + 3) >> 2; cword_tuple_count--; ) // reading 4 codewords at once
{
// read 4 codewords and advance input pointer to next ones
const uint4 cwords = *(d_src_codewords++);
// encode first pair of codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.x);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.y);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
// encode other two codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.z);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.w);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
}
// Emit left bits
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, 0xfe000007);
// Terminate codestream with restart marker
((uint8_t*)s_temp)[byte_count + 0] = 0xFF;
((uint8_t*)s_temp)[byte_count + 1] = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
// flush remaining bytes
d_dest_stream[0] = s_temp[0];
d_dest_stream[1] = s_temp[1];
// Set compressed size
segment->data_compressed_size = (d_dest_stream - d_dest_stream_start) * 16 + byte_count + 2;
#endif // #if __CUDA_ARCH__ >= 200
}
/**
* Huffman coder compact output allocation kernel - serially reserves
* some space for compressed output of segments in output buffer.
* (For CC 1.0 - a workaround for missing atomic operations.)
*
* Only single threadblock with 512 threads is launched.
*/
__global__ static void
gpujpeg_huffman_encoder_allocation_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// offsets of segments
__shared__ unsigned int s_segment_offsets[512];
// cumulative sum of bytes of all segments
unsigned int total_byte_count = 0;
// iterate over all segments
const unsigned int segment_idx_end = (segment_count + 511) & ~511;
for(unsigned int segment_idx = threadIdx.x; segment_idx < segment_idx_end; segment_idx += 512) {
// all threads load byte sizes of their segments (rounded up to next multiple of 16 B) into the shared array
s_segment_offsets[threadIdx.x] = segment_idx < segment_count
? (d_segment[segment_idx].data_compressed_size + 15) & ~15
: 0;
// first thread runs a sort of serial prefix sum over the segment sizes to get their offsets
__syncthreads();
if(0 == threadIdx.x) {
#pragma unroll 4
for(int i = 0; i < 512; i++) {
const unsigned int segment_size = s_segment_offsets[i];
s_segment_offsets[i] = total_byte_count;
total_byte_count += segment_size;
}
}
__syncthreads();
// all threads write offsets back into corresponding segment structures
if(segment_idx < segment_count) {
d_segment[segment_idx].data_compressed_index = s_segment_offsets[threadIdx.x];
}
}
// first thread finally saves the total sum of bytes needed for compressed data
if(threadIdx.x == 0) {
*d_gpujpeg_huffman_output_byte_count = total_byte_count;
}
}
/**
* Huffman coder output compaction kernel.
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_compaction_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// get some segment (size of threadblocks is 32 x N, so threadIdx.y is warp index)
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_idx = threadIdx.y + block_idx * blockDim.y;
if(segment_idx >= segment_count) {
return;
}
// temp variables for all warps
__shared__ uint4* volatile s_out_ptrs[WARPS_NUM];
// get info about the segment
const unsigned int segment_byte_count = (d_segment[segment_idx].data_compressed_size + 15) & ~15; // number of bytes rounded up to multiple of 16
const unsigned int segment_in_offset = d_segment[segment_idx].data_temp_index; // this should be aligned at least to 16byte boundary
// first thread of each warp reserves space in output buffer
if(0 == threadIdx.x) {
// Either load precomputed output offset (for CC 1.0) or compute it now (for CCs with atomic operations)
#if __CUDA_ARCH__ == 100
const unsigned int segment_out_offset = d_segment[segment_idx].data_compressed_index;
#else
const unsigned int segment_out_offset = atomicAdd(d_gpujpeg_huffman_output_byte_count, segment_byte_count);
d_segment[segment_idx].data_compressed_index = segment_out_offset;
#endif
s_out_ptrs[threadIdx.y] = (uint4*)(d_dest + segment_out_offset);
}
// we need to synchronize all our warps here to ensure s_out_ptrs is guaranteed to be provided on any thread.
__syncthreads();
// all threads read output buffer offset for their segment and prepare input and output pointers and number of copy iterations
const uint4 * d_in = threadIdx.x + (uint4*)(d_src + segment_in_offset);
uint4 * d_out = threadIdx.x + s_out_ptrs[threadIdx.y];
unsigned int copy_iterations = segment_byte_count / 512; // 512 is number of bytes copied in each iteration (32 threads * 16 bytes per thread)
// copy the data!
while(copy_iterations--) {
*d_out = *d_in;
d_out += 32;
d_in += 32;
}
// copy remaining bytes (less than 512 bytes)
if((threadIdx.x * 16) < (segment_byte_count & 511)) {
*d_out = *d_in;
}
}
// Threadblock size for CC 1.x kernel
#define THREAD_BLOCK_SIZE 48
/**
* Write one byte to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, value) { \
*data_compressed = (uint8_t)(value); \
data_compressed++; }
/**
* Write two bytes to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Two-byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_2byte(data_compressed, value) { \
*data_compressed = (uint8_t)(((value) >> 8) & 0xFF); \
data_compressed++; \
*data_compressed = (uint8_t)((value) & 0xFF); \
data_compressed++; }
/**
* Write marker to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @oaran marker Marker to write (JPEG_MARKER_...)
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_marker(data_compressed, marker) { \
*data_compressed = 0xFF;\
data_compressed++; \
*data_compressed = (uint8_t)(marker); \
data_compressed++; }
/**
* Output bits to the file. Only the right 24 bits of put_buffer are used;
* the valid bits are left-justified in this part. At most 16 bits can be
* passed to EmitBits in one call, and we never retain more than 7 bits
* in put_buffer between calls, so 24 bits are sufficient. Version for CC 1.x
*
* @param coder Huffman coder structure
* @param code Huffman code
* @param size Size in bits of the Huffman code
* @return void
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int code, int size, int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// This routine is heavily used, so it's worth coding tightly
int _put_buffer = (int)code;
int _put_bits = put_bits;
// If size is 0, caller used an invalid Huffman table entry
if ( size == 0 )
return -1;
// Mask off any extra bits in code
_put_buffer &= (((int)1) << size) - 1;
// New number of bits in buffer
_put_bits += size;
// Align incoming bits
_put_buffer <<= 24 - _put_bits;
// And merge with old buffer contents
_put_buffer |= put_value;
// If there are more than 8 bits, write it out
unsigned char uc;
while ( _put_bits >= 8 ) {
// Write one byte out
uc = (unsigned char) ((_put_buffer >> 16) & 0xFF);
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
// If need to stuff a zero byte
if ( uc == 0xFF ) {
// Write zero byte out
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, 0);
}
_put_buffer <<= 8;
_put_bits -= 8;
}
// update state variables
put_value = _put_buffer;
put_bits = _put_bits;
return 0;
}
/**
* Emit left bits (CC 1.x)
*
* @param coder Huffman coder structure
* @return void
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_left_bits(int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// Fill 7 bits with ones
if ( gpujpeg_huffman_gpu_encoder_emit_bits(0x7F, 7, put_value, put_bits, data_compressed) != 0 )
return;
//unsigned char uc = (unsigned char) ((put_value >> 16) & 0xFF);
// Write one byte out
//gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
put_value = 0;
put_bits = 0;
}
/**
* Encode one 8x8 block (for CC 1.x)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(int & put_value, int & put_bits, int & dc, int16_t* data, uint8_t* & data_compressed,
struct gpujpeg_table_huffman_encoder* d_table_dc, struct gpujpeg_table_huffman_encoder* d_table_ac)
{
typedef uint64_t loading_t;
const int loading_iteration_count = 64 * 2 / sizeof(loading_t);
// Load block to shared memory
__shared__ int16_t s_data[64 * THREAD_BLOCK_SIZE];
for ( int i = 0; i < loading_iteration_count; i++ ) {
((loading_t*)s_data)[loading_iteration_count * threadIdx.x + i] = ((loading_t*)data)[i];
}
int data_start = 64 * threadIdx.x;
// Encode the DC coefficient difference per section F.1.2.1
int temp = s_data[data_start + 0] - dc;
dc = s_data[data_start + 0];
int temp2 = temp;
if ( temp < 0 ) {
// Temp is abs value of input
temp = -temp;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
int nbits = 0;
while ( temp ) {
nbits++;
temp >>= 1;
}
// Write category number
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_dc->code[nbits], d_table_dc->size[nbits], put_value, put_bits, data_compressed) != 0 ) {
return -1;
}
// Write category offset (EmitBits rejects calls with size 0)
if ( nbits ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
}
// Encode the AC coefficients per section F.1.2.2 (r = run length of zeros)
int r = 0;
for ( int k = 1; k < 64; k++ )
{
temp = s_data[data_start + gpujpeg_huffman_gpu_encoder_order_natural[k]];
if ( temp == 0 ) {
r++;
}
else {
// If run length > 15, must emit special run-length-16 codes (0xF0)
while ( r > 15 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0xF0], d_table_ac->size[0xF0], put_value, put_bits, data_compressed) != 0 )
return -1;
r -= 16;
}
temp2 = temp;
if ( temp < 0 ) {
// temp is abs value of input
temp = -temp;
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
// there must be at least one 1 bit
nbits = 1;
while ( (temp >>= 1) )
nbits++;
// Emit Huffman symbol for run length / number of bits
int i = (r << 4) + nbits;
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[i], d_table_ac->size[i], put_value, put_bits, data_compressed) != 0 )
return -1;
// Write Category offset
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
r = 0;
}
}
// If all the left coefs were zero, emit an end-of-block code
if ( r > 0 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0], d_table_ac->size[0], put_value, put_bits, data_compressed) != 0 )
return -1;
}
return 0;
}
/**
* Huffman encoder kernel (for CC 1.x)
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_encode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
unsigned int * d_gpujpeg_huffman_output_byte_count
)
{
int segment_index = blockIdx.x * blockDim.x + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// first thread initializes compact output size for next kernel
if(0 == segment_index) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// Initialize huffman coder
int put_value = 0;
int put_bits = 0;
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Prepare data pointers
uint8_t* data_compressed = &d_data_compressed[segment->data_temp_index];
uint8_t* data_compressed_start = data_compressed;
// Non-interleaving mode
if ( comp_count == 1 ) {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Get component for current scan
struct gpujpeg_component* component = &d_component[segment->scan_index];
// Get component data for MCU
int16_t* block = &component->d_data_quantized[(segment_index * component->segment_mcu_count + mcu_index) * component->mcu_size];
// Get coder parameters
int & component_dc = dc[segment->scan_index];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac) != 0 )
break;
}
}
// Interleaving mode
else {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//assert(segment->scan_index == 0);
for ( int comp = 0; comp < comp_count; comp++ ) {
struct gpujpeg_component* component = &d_component[comp];
// Prepare mcu indexes
int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// Compute base data index
int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all vertical 8x8 blocks
for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// Compute base row data index
int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all horizontal 8x8 blocks
for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// Compute 8x8 block data index
int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
// Get component data for MCU
int16_t* block = &component->d_data_quantized[data_index];
// Get coder parameters
int & component_dc = dc[comp];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac);
}
}
}
}
}
// Emit left bits
if ( put_bits > 0 )
gpujpeg_huffman_gpu_encoder_emit_left_bits(put_value, put_bits, data_compressed);
// Output restart marker
int restart_marker = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
gpujpeg_huffman_gpu_encoder_marker(data_compressed, restart_marker);
// Set compressed size
segment->data_compressed_size = data_compressed - data_compressed_start;
}
/** Adds packed coefficients into the GPU version of Huffman lookup table. */
void
gpujpeg_huffman_gpu_add_packed_table(uint32_t * const dest, const struct gpujpeg_table_huffman_encoder * const src, const bool is_ac) {
// make a upshifted copy of the table for GPU encoding
for ( int i = 0; i <= 256; i++ ) {
const int size = src->size[i & 0xFF];
dest[i] = (src->code[i & 0xFF] << (32 - size)) | size;
}
// reserve first index in GPU version of AC table for special purposes
if ( is_ac ) {
dest[0] = 0;
}
}
/* Documented at declaration */
struct gpujpeg_huffman_gpu_encoder *
gpujpeg_huffman_gpu_encoder_create(const struct gpujpeg_encoder * encoder)
{
struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder = (struct gpujpeg_huffman_gpu_encoder *) malloc(sizeof(struct gpujpeg_huffman_gpu_encoder));
if ( huffman_gpu_encoder == NULL ) {
return NULL;
}
memset(huffman_gpu_encoder, 0, sizeof(struct gpujpeg_huffman_gpu_encoder));
// Allocate
hipMalloc((void**)&huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int));
gpujpeg_cuda_check_error("Allocation of huffman output byte count failed", return NULL);
// Initialize decomposition lookup table
hipFuncSetCacheConfig(gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel, hipFuncCachePreferShared);
hipLaunchKernelGGL(( gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel), dim3(32), dim3(256), 0, 0, ); // 8192 threads total
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Decomposition LUT initialization failed", return NULL);
// compose GPU version of the huffman LUT and copy it into GPU memory (for CC >= 2.0)
uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4];
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 0, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 1, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], false);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 2, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 3, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC], false);
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_lut,
gpujpeg_huffman_cpu_lut,
(256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_lut),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman LUT copy)", return NULL);
// Copy original Huffman coding tables to GPU memory (for CC 1.x)
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_table_huffman,
&encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
sizeof(gpujpeg_huffman_gpu_encoder_table_huffman),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman coding table)", return NULL);
// Copy natural order to constant device memory
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (natural order copy)", return NULL);
// Configure more shared memory for all kernels
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<true>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<false>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_serialization_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_compaction_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_allocation_kernel, hipFuncCachePreferShared);
return huffman_gpu_encoder;
}
void
gpujpeg_huffman_gpu_encoder_destroy(struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder)
{
assert(huffman_gpu_encoder != NULL);
if (huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count != NULL) {
hipFree(huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
}
free(huffman_gpu_encoder);
}
/**
* Get grid size for specified count of threadblocks. (Grid size is limited
* to 65536 in both directions, so if we need more threadblocks, we must use
* both x and y coordinates.)
*/
dim3
gpujpeg_huffman_gpu_encoder_grid_size(int tblock_count)
{
dim3 size(tblock_count);
while(size.x > 0xffff) {
size.x = (size.x + 1) >> 1;
size.y <<= 1;
}
return size;
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_encode(struct gpujpeg_encoder* encoder, struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder, unsigned int * output_byte_count)
{
// Get coder
struct gpujpeg_coder* coder = &encoder->coder;
assert(coder->param.restart_interval > 0);
// Select encoder kernel which either expects continuos segments of blocks or uses block lists
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Select encoder kernel based on compute capability
if ( encoder->coder.cuda_cc_major < 2 ) {
// Run kernel
dim3 thread(THREAD_BLOCK_SIZE);
dim3 grid(gpujpeg_div_and_round_up(coder->segment_count, thread.x));
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel), dim3(grid), dim3(thread), 0, *(encoder->stream),
coder->d_component,
coder->d_segment,
comp_count,
coder->segment_count,
coder->d_temp_huffman,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
// Run encoder kernel
dim3 thread(32 * WARPS_NUM);
dim3 grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, (thread.x / 32)));
if(comp_count == 1) {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel_warp<true>), dim3(grid), dim3(thread), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel_warp<false>), dim3(grid), dim3(thread), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
}
// Run codeword serialization kernel
const int num_serialization_tblocks = gpujpeg_div_and_round_up(coder->segment_count, SERIALIZATION_THREADS_PER_TBLOCK);
const dim3 serialization_grid = gpujpeg_huffman_gpu_encoder_grid_size(num_serialization_tblocks);
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_serialization_kernel), dim3(num_serialization_tblocks), dim3(SERIALIZATION_THREADS_PER_TBLOCK), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_temp_huffman
);
gpujpeg_cuda_check_error("Codeword serialization failed", return -1);
}
// No atomic operations in CC 1.0 => run output size computation kernel to allocate the output buffer space
if ( encoder->coder.cuda_cc_major == 1 && encoder->coder.cuda_cc_minor == 0 ) {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_allocation_kernel), dim3(1), dim3(512), 0, *(encoder->stream), coder->d_segment, coder->segment_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
gpujpeg_cuda_check_error("Huffman encoder output allocation failed", return -1);
}
// Run output compaction kernel (one warp per segment)
const dim3 compaction_thread(32, WARPS_NUM);
const dim3 compaction_grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, WARPS_NUM));
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_compaction_kernel), dim3(compaction_grid), dim3(compaction_thread), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_temp_huffman,
coder->d_data_compressed,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman output compaction failed", return -1);
// Read and return number of occupied bytes
hipMemcpyAsync(output_byte_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int), hipMemcpyDeviceToHost, *(encoder->stream));
gpujpeg_cuda_check_error("Huffman output size getting failed", return -1);
// indicate success
return 0;
}
| 610d23f0bf6f3a830da0eeadc42db712c030fe33.cu | /**
* @file
* Copyright (c) 2011-2019, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_huffman_gpu_encoder.h"
#include <libgpujpeg/gpujpeg_util.h>
#define WARPS_NUM 8
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_encoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
/**
* Huffman coding tables in constant memory - each has 257 items (256 + 1 extra)
* There are are 4 of them - one after another, in following order:
* - luminance (Y) AC
* - luminance (Y) DC
* - chroma (cb/cr) AC
* - chroma (cb/cr) DC
*/
__device__ uint32_t gpujpeg_huffman_gpu_lut[(256 + 1) * 4];
/**
* Value decomposition in constant memory (input range from -4096 to 4095 ... both inclusive)
* Mapping from coefficient value into the code for the value ind its bit size.
*/
__device__ unsigned int gpujpeg_huffman_value_decomposition[8 * 1024];
/** Allocate huffman tables in constant memory */
__device__ struct gpujpeg_table_huffman_encoder gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_TYPE_COUNT][GPUJPEG_HUFFMAN_TYPE_COUNT];
struct gpujpeg_huffman_gpu_encoder
{
/** Size of occupied part of output buffer */
unsigned int * d_gpujpeg_huffman_output_byte_count;
};
/**
* Initializes coefficient decomposition table in global memory. (CC >= 2.0)
* Output table is a mapping from some value into its code and bit size.
*/
__global__ static void
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel() {
// fetch some value
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int value = tid - 4096;
// decompose it
unsigned int value_code = value;
int absolute = value;
if ( value < 0 ) {
// valu eis now absolute value of input
absolute = -absolute;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
value_code--;
}
// Find the number of bits needed for the magnitude of the coefficient
unsigned int value_nbits = 0;
while ( absolute ) {
value_nbits++;
absolute >>= 1;
}
// save result packed into unsigned int (value bits are left aligned in MSBs and size is right aligned in LSBs)
gpujpeg_huffman_value_decomposition[tid] = value_nbits | (value_code << (32 - value_nbits));
}
#if __CUDA_ARCH__ >= 200
/**
* Adds up to 32 bits at once into ouptut buffer, applying byte stuffing.
* Codeword value must be aligned to left (most significant bits). (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int & remaining_bits, int & byte_count, int & bit_count, uint8_t * const out_ptr, const unsigned int packed_code_word)
{
// decompose packed codeword into the msb-aligned value and bit-length of the value
const unsigned int code_word = packed_code_word & ~31;
const unsigned int code_bit_size = packed_code_word & 31;
// concatenate with remaining bits
remaining_bits |= code_word >> bit_count;
bit_count += code_bit_size;
// flush some bytes if have more than 8 bits
if (bit_count >= 8) {
do {
const unsigned int out_byte = remaining_bits >> 24;
out_ptr[byte_count++] = out_byte;
if(0xff == out_byte) {
// keep zero byte after each 0xFF (buffer is expected to be zeroed)
out_ptr[byte_count++] = 0;
}
remaining_bits <<= 8;
bit_count -= 8;
} while (bit_count >= 8);
// keep only remaining bits in the buffer
remaining_bits = code_word << (code_bit_size - bit_count);
remaining_bits &= 0xfffffffe << (31 - bit_count);
}
}
/**
* Given some huffman table offset, RLE zero count and coefficient value,
* this returns huffman codeword for the value (packed in 27 MSBs)
* together with its bit size (in 5 LSBs). (CC >= 2.0)
*/
__device__ static unsigned int
gpujpeg_huffman_gpu_encode_value(const int preceding_zero_count, const int coefficient,
const int huffman_lut_offset)
{
// value bits are in MSBs (left aligned) and bit size of the value is in LSBs (right aligned)
const unsigned int packed_value = gpujpeg_huffman_value_decomposition[4096 + coefficient];
// decompose value info into upshifted value and value's bit size
const int value_nbits = packed_value & 0xf;
const unsigned int value_code = packed_value & ~0xf;
// find prefix of the codeword and size of the prefix
const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits;
const unsigned int packed_prefix = gpujpeg_huffman_gpu_lut[huffman_lut_idx];
const unsigned int prefix_nbits = packed_prefix & 31;
// compose packed codeword with its size
return (packed_prefix + value_nbits) | (value_code >> prefix_nbits);
}
/**
* Flush remaining codewords from buffer in shared memory to global memory output buffer. (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_flush_codewords(unsigned int * const s_out, unsigned int * &data_compressed, int & remaining_codewords, const int tid) {
// this works for up to 4 * 32 remaining codewords
if(remaining_codewords) {
// pad remaining codewords with extra zero-sized codewords, not to have to use special case in serialization kernel, which saves 4 codewords at once
s_out[remaining_codewords + tid] = 0;
// save all remaining codewords at once (together with some zero sized padding codewords)
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
// update codeword counter
data_compressed += remaining_codewords;
remaining_codewords = 0;
}
}
#ifndef FULL_MASK
#define FULL_MASK 0xffffffffu
#endif
// compat
#if CUDART_VERSION < 9000
#define __ballot_sync(set, pred) __ballot(pred)
#endif
/**
* Encode one 8x8 block (CC >= 2.0)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(const int16_t * block, unsigned int * &data_compressed, unsigned int * const s_out,
int & remaining_codewords, const int last_dc_idx, int tid, const int huffman_lut_offset)
{
// each thread loads a pair of values (pair after zigzag reordering)
const int load_idx = tid * 2;
int in_even = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx]];
const int in_odd = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx + 1]];
// compute preceding zero count for even coefficient (actually compute the count multiplied by 16)
const unsigned int nonzero_mask = (1 << tid) - 1;
const unsigned int nonzero_bitmap_0 = 1 | __ballot_sync(FULL_MASK, in_even); // DC is always treated as nonzero
const unsigned int nonzero_bitmap_1 = __ballot_sync(FULL_MASK, in_odd);
const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1;
const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask);
int zeros_before_even = 2 * (zero_pair_count + tid - 32);
if((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) {
zeros_before_even += 1;
}
// true if any nonzero pixel follows thread's odd pixel
const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask;
// count of consecutive zeros before odd value (either one more than
// even if even is zero or none if even value itself is nonzero)
// (the count is actually multiplied by 16)
int zeros_before_odd = in_even || !tid ? 0 : zeros_before_even + 1;
// clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited)
// otherwise only trim extra bits from the counts of following zeros
const int zero_count_mask = nonzero_follows ? 0xF : 0;
zeros_before_even &= zero_count_mask;
zeros_before_odd &= zero_count_mask;
// pointer to LUT for encoding thread's even value
// (only thread #0 uses DC table, others use AC table)
int even_lut_offset = huffman_lut_offset;
// first thread handles special DC coefficient
if(0 == tid) {
// first thread uses DC part of the table for its even value
even_lut_offset += 256 + 1;
// update last DC coefficient (saved at the special place at the end of the shared bufer)
const int original_in_even = in_even;
in_even -= ((int*)s_out)[last_dc_idx];
((int*)s_out)[last_dc_idx] = original_in_even;
}
// last thread handles special block-termination symbol
if(0 == ((tid ^ 31) | in_odd)) {
// this causes selection of huffman symbol at index 256 (which contains the termination symbol)
zeros_before_odd = 16;
}
// each thread gets codeword for its two pixels
unsigned int even_code = gpujpeg_huffman_gpu_encode_value(zeros_before_even, in_even, even_lut_offset);
unsigned int odd_code = gpujpeg_huffman_gpu_encode_value(zeros_before_odd, in_odd, huffman_lut_offset);
// concatenate both codewords into one if they are short enough
const unsigned int even_code_size = even_code & 31;
const unsigned int odd_code_size = odd_code & 31;
const unsigned int total_size = even_code_size + odd_code_size;
if(total_size <= 27) {
even_code = total_size | ((odd_code & ~31) >> even_code_size) | (even_code & ~31);
odd_code = 0;
}
// each thread get number of preceding nonzero codewords and total number of nonzero codewords in this block
const unsigned int even_codeword_presence = __ballot_sync(FULL_MASK, even_code);
const unsigned int odd_codeword_presence = __ballot_sync(FULL_MASK, odd_code);
const int codeword_offset = __popc(nonzero_mask & even_codeword_presence)
+ __popc(nonzero_mask & odd_codeword_presence);
// each thread saves its values into temporary shared buffer
if(even_code) {
s_out[remaining_codewords + codeword_offset] = even_code;
if(odd_code) {
s_out[remaining_codewords + codeword_offset + 1] = odd_code;
}
}
// advance count of codewords in shared memory buffer
remaining_codewords += __popc(odd_codeword_presence) + __popc(even_codeword_presence);
// flush some codewords to global memory if there are too many of them in shared buffer
const int flush_count = 32 * 4; // = half of the buffer
if(remaining_codewords > flush_count) {
// move first half of the buffer into output buffer in global memory and update output pointer
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
data_compressed += flush_count;
// shift remaining codewords to begin of the buffer and update their count
((uint4*)s_out)[tid] = ((uint4*)s_out)[flush_count / 4 + tid]; // 4 for 4 uints in uint4
remaining_codewords -= flush_count;
}
// nothing to fail here
return 0;
}
#endif // #if __CUDA_ARCH__ >= 200
/**
* Huffman encoder kernel (For compute capability >= 2.0)
*
* @return void
*/
template <bool CONTINUOUS_BLOCK_LIST>
#if __CUDA_ARCH__ >= 200
__launch_bounds__(WARPS_NUM * 32, 1024 / (WARPS_NUM * 32))
#endif
__global__ static void
gpujpeg_huffman_encoder_encode_kernel_warp(
struct gpujpeg_segment* d_segment,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* const d_block_list,
int16_t* const d_data_quantized,
struct gpujpeg_component* const d_component,
const int comp_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
#if __CUDA_ARCH__ >= 200
int warpidx = threadIdx.x >> 5;
int tid = threadIdx.x & 31;
__shared__ uint4 s_out_all[(64 + 1) * WARPS_NUM];
unsigned int * s_out = (unsigned int*)(s_out_all + warpidx * (64 + 1));
// Number of remaining codewords in shared buffer
int remaining_codewords = 0;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_index = block_idx * WARPS_NUM + warpidx;
// first thread initializes compact output size for next kernel
if(0 == tid && 0 == warpidx && 0 == block_idx) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// stop if out of segment bounds
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Initialize last DC coefficients
if(tid < 3) {
s_out[256 + tid] = 0;
}
// Prepare data pointers
unsigned int * data_compressed = (unsigned int*)(d_data_compressed + segment->data_temp_index);
unsigned int * data_compressed_start = data_compressed;
// Pre-add thread ID to output pointer (it's allways used only with it)
data_compressed += (tid * 4);
// Encode all block in segment
if(CONTINUOUS_BLOCK_LIST) {
// Get component for current scan
const struct gpujpeg_component* component = &d_component[segment->scan_index];
// mcu size of the component
const int comp_mcu_size = component->mcu_size;
// Get component data for MCU (first block)
const int16_t* block = component->d_data_quantized + (segment->scan_segment_index * component->segment_mcu_count) * comp_mcu_size;
// Get huffman table offset
const int huffman_table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0 : (256 + 1) * 2; // possibly skips luminance tables
// Encode MCUs in segment
for (int block_count = segment->mcu_count; block_count--;) {
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, 256, tid, huffman_table_offset);
// Advance to next block
block += comp_mcu_size;
}
} else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = 256 + (packed_block_info & 0x7f);
// Get offset to right part of huffman table
const int huffman_table_offset = packed_block_info & 0x80 ? (256 + 1) * 2 : 0; // possibly skips luminance tables
// Source data pointer
int16_t* block = &d_data_quantized[packed_block_info >> 8];
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, last_dc_idx, tid, huffman_table_offset);
}
}
// flush remaining codewords
gpujpeg_huffman_gpu_encoder_flush_codewords(s_out, data_compressed, remaining_codewords, tid);
// Set number of codewords.
if (tid == 0 ) {
segment->data_compressed_size = data_compressed - data_compressed_start;
}
#endif // #if __CUDA_ARCH__ >= 200
}
#define SERIALIZATION_THREADS_PER_TBLOCK 192
/**
* Codeword serialization kernel (CC >= 2.0).
*
* @return void
*/
#if __CUDA_ARCH__ >= 200
__launch_bounds__(SERIALIZATION_THREADS_PER_TBLOCK, 1536 / SERIALIZATION_THREADS_PER_TBLOCK)
#endif
__global__ static void
gpujpeg_huffman_encoder_serialization_kernel(
struct gpujpeg_segment* d_segment,
int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
#if __CUDA_ARCH__ >= 200
// Temp buffer for all threads of the threadblock
__shared__ uint4 s_temp_all[2 * SERIALIZATION_THREADS_PER_TBLOCK];
// Thread's 32 bytes in shared memory for output composition
uint4 * const s_temp = s_temp_all + threadIdx.x * 2;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
int segment_index = block_idx * SERIALIZATION_THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
// Thread's segment
struct gpujpeg_segment* const segment = &d_segment[segment_index];
// Input and output pointers
const int data_offset = segment->data_temp_index;
uint4 * const d_dest_stream_start = (uint4*)(d_dest + data_offset);
uint4 * d_dest_stream = d_dest_stream_start;
const uint4 * d_src_codewords = (uint4*)(d_src + data_offset);
// number of bytes in the temp buffer, remaining bits and their count
int byte_count = 0, bit_count = 0;
unsigned int remaining_bits = 0;
// "data_compressed_size" is now initialized to number of codewords to be serialized
for(int cword_tuple_count = (segment->data_compressed_size + 3) >> 2; cword_tuple_count--; ) // reading 4 codewords at once
{
// read 4 codewords and advance input pointer to next ones
const uint4 cwords = *(d_src_codewords++);
// encode first pair of codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.x);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.y);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
// encode other two codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.z);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.w);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
}
// Emit left bits
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, 0xfe000007);
// Terminate codestream with restart marker
((uint8_t*)s_temp)[byte_count + 0] = 0xFF;
((uint8_t*)s_temp)[byte_count + 1] = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
// flush remaining bytes
d_dest_stream[0] = s_temp[0];
d_dest_stream[1] = s_temp[1];
// Set compressed size
segment->data_compressed_size = (d_dest_stream - d_dest_stream_start) * 16 + byte_count + 2;
#endif // #if __CUDA_ARCH__ >= 200
}
/**
* Huffman coder compact output allocation kernel - serially reserves
* some space for compressed output of segments in output buffer.
* (For CC 1.0 - a workaround for missing atomic operations.)
*
* Only single threadblock with 512 threads is launched.
*/
__global__ static void
gpujpeg_huffman_encoder_allocation_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// offsets of segments
__shared__ unsigned int s_segment_offsets[512];
// cumulative sum of bytes of all segments
unsigned int total_byte_count = 0;
// iterate over all segments
const unsigned int segment_idx_end = (segment_count + 511) & ~511;
for(unsigned int segment_idx = threadIdx.x; segment_idx < segment_idx_end; segment_idx += 512) {
// all threads load byte sizes of their segments (rounded up to next multiple of 16 B) into the shared array
s_segment_offsets[threadIdx.x] = segment_idx < segment_count
? (d_segment[segment_idx].data_compressed_size + 15) & ~15
: 0;
// first thread runs a sort of serial prefix sum over the segment sizes to get their offsets
__syncthreads();
if(0 == threadIdx.x) {
#pragma unroll 4
for(int i = 0; i < 512; i++) {
const unsigned int segment_size = s_segment_offsets[i];
s_segment_offsets[i] = total_byte_count;
total_byte_count += segment_size;
}
}
__syncthreads();
// all threads write offsets back into corresponding segment structures
if(segment_idx < segment_count) {
d_segment[segment_idx].data_compressed_index = s_segment_offsets[threadIdx.x];
}
}
// first thread finally saves the total sum of bytes needed for compressed data
if(threadIdx.x == 0) {
*d_gpujpeg_huffman_output_byte_count = total_byte_count;
}
}
/**
* Huffman coder output compaction kernel.
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_compaction_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// get some segment (size of threadblocks is 32 x N, so threadIdx.y is warp index)
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_idx = threadIdx.y + block_idx * blockDim.y;
if(segment_idx >= segment_count) {
return;
}
// temp variables for all warps
__shared__ uint4* volatile s_out_ptrs[WARPS_NUM];
// get info about the segment
const unsigned int segment_byte_count = (d_segment[segment_idx].data_compressed_size + 15) & ~15; // number of bytes rounded up to multiple of 16
const unsigned int segment_in_offset = d_segment[segment_idx].data_temp_index; // this should be aligned at least to 16byte boundary
// first thread of each warp reserves space in output buffer
if(0 == threadIdx.x) {
// Either load precomputed output offset (for CC 1.0) or compute it now (for CCs with atomic operations)
#if __CUDA_ARCH__ == 100
const unsigned int segment_out_offset = d_segment[segment_idx].data_compressed_index;
#else
const unsigned int segment_out_offset = atomicAdd(d_gpujpeg_huffman_output_byte_count, segment_byte_count);
d_segment[segment_idx].data_compressed_index = segment_out_offset;
#endif
s_out_ptrs[threadIdx.y] = (uint4*)(d_dest + segment_out_offset);
}
// we need to synchronize all our warps here to ensure s_out_ptrs is guaranteed to be provided on any thread.
__syncthreads();
// all threads read output buffer offset for their segment and prepare input and output pointers and number of copy iterations
const uint4 * d_in = threadIdx.x + (uint4*)(d_src + segment_in_offset);
uint4 * d_out = threadIdx.x + s_out_ptrs[threadIdx.y];
unsigned int copy_iterations = segment_byte_count / 512; // 512 is number of bytes copied in each iteration (32 threads * 16 bytes per thread)
// copy the data!
while(copy_iterations--) {
*d_out = *d_in;
d_out += 32;
d_in += 32;
}
// copy remaining bytes (less than 512 bytes)
if((threadIdx.x * 16) < (segment_byte_count & 511)) {
*d_out = *d_in;
}
}
// Threadblock size for CC 1.x kernel
#define THREAD_BLOCK_SIZE 48
/**
* Write one byte to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, value) { \
*data_compressed = (uint8_t)(value); \
data_compressed++; }
/**
* Write two bytes to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Two-byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_2byte(data_compressed, value) { \
*data_compressed = (uint8_t)(((value) >> 8) & 0xFF); \
data_compressed++; \
*data_compressed = (uint8_t)((value) & 0xFF); \
data_compressed++; }
/**
* Write marker to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @oaran marker Marker to write (JPEG_MARKER_...)
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_marker(data_compressed, marker) { \
*data_compressed = 0xFF;\
data_compressed++; \
*data_compressed = (uint8_t)(marker); \
data_compressed++; }
/**
* Output bits to the file. Only the right 24 bits of put_buffer are used;
* the valid bits are left-justified in this part. At most 16 bits can be
* passed to EmitBits in one call, and we never retain more than 7 bits
* in put_buffer between calls, so 24 bits are sufficient. Version for CC 1.x
*
* @param coder Huffman coder structure
* @param code Huffman code
* @param size Size in bits of the Huffman code
* @return void
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int code, int size, int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// This routine is heavily used, so it's worth coding tightly
int _put_buffer = (int)code;
int _put_bits = put_bits;
// If size is 0, caller used an invalid Huffman table entry
if ( size == 0 )
return -1;
// Mask off any extra bits in code
_put_buffer &= (((int)1) << size) - 1;
// New number of bits in buffer
_put_bits += size;
// Align incoming bits
_put_buffer <<= 24 - _put_bits;
// And merge with old buffer contents
_put_buffer |= put_value;
// If there are more than 8 bits, write it out
unsigned char uc;
while ( _put_bits >= 8 ) {
// Write one byte out
uc = (unsigned char) ((_put_buffer >> 16) & 0xFF);
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
// If need to stuff a zero byte
if ( uc == 0xFF ) {
// Write zero byte out
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, 0);
}
_put_buffer <<= 8;
_put_bits -= 8;
}
// update state variables
put_value = _put_buffer;
put_bits = _put_bits;
return 0;
}
/**
* Emit left bits (CC 1.x)
*
* @param coder Huffman coder structure
* @return void
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_left_bits(int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// Fill 7 bits with ones
if ( gpujpeg_huffman_gpu_encoder_emit_bits(0x7F, 7, put_value, put_bits, data_compressed) != 0 )
return;
//unsigned char uc = (unsigned char) ((put_value >> 16) & 0xFF);
// Write one byte out
//gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
put_value = 0;
put_bits = 0;
}
/**
* Encode one 8x8 block (for CC 1.x)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(int & put_value, int & put_bits, int & dc, int16_t* data, uint8_t* & data_compressed,
struct gpujpeg_table_huffman_encoder* d_table_dc, struct gpujpeg_table_huffman_encoder* d_table_ac)
{
typedef uint64_t loading_t;
const int loading_iteration_count = 64 * 2 / sizeof(loading_t);
// Load block to shared memory
__shared__ int16_t s_data[64 * THREAD_BLOCK_SIZE];
for ( int i = 0; i < loading_iteration_count; i++ ) {
((loading_t*)s_data)[loading_iteration_count * threadIdx.x + i] = ((loading_t*)data)[i];
}
int data_start = 64 * threadIdx.x;
// Encode the DC coefficient difference per section F.1.2.1
int temp = s_data[data_start + 0] - dc;
dc = s_data[data_start + 0];
int temp2 = temp;
if ( temp < 0 ) {
// Temp is abs value of input
temp = -temp;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
int nbits = 0;
while ( temp ) {
nbits++;
temp >>= 1;
}
// Write category number
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_dc->code[nbits], d_table_dc->size[nbits], put_value, put_bits, data_compressed) != 0 ) {
return -1;
}
// Write category offset (EmitBits rejects calls with size 0)
if ( nbits ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
}
// Encode the AC coefficients per section F.1.2.2 (r = run length of zeros)
int r = 0;
for ( int k = 1; k < 64; k++ )
{
temp = s_data[data_start + gpujpeg_huffman_gpu_encoder_order_natural[k]];
if ( temp == 0 ) {
r++;
}
else {
// If run length > 15, must emit special run-length-16 codes (0xF0)
while ( r > 15 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0xF0], d_table_ac->size[0xF0], put_value, put_bits, data_compressed) != 0 )
return -1;
r -= 16;
}
temp2 = temp;
if ( temp < 0 ) {
// temp is abs value of input
temp = -temp;
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
// there must be at least one 1 bit
nbits = 1;
while ( (temp >>= 1) )
nbits++;
// Emit Huffman symbol for run length / number of bits
int i = (r << 4) + nbits;
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[i], d_table_ac->size[i], put_value, put_bits, data_compressed) != 0 )
return -1;
// Write Category offset
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
r = 0;
}
}
// If all the left coefs were zero, emit an end-of-block code
if ( r > 0 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0], d_table_ac->size[0], put_value, put_bits, data_compressed) != 0 )
return -1;
}
return 0;
}
/**
* Huffman encoder kernel (for CC 1.x)
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_encode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
unsigned int * d_gpujpeg_huffman_output_byte_count
)
{
int segment_index = blockIdx.x * blockDim.x + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// first thread initializes compact output size for next kernel
if(0 == segment_index) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// Initialize huffman coder
int put_value = 0;
int put_bits = 0;
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Prepare data pointers
uint8_t* data_compressed = &d_data_compressed[segment->data_temp_index];
uint8_t* data_compressed_start = data_compressed;
// Non-interleaving mode
if ( comp_count == 1 ) {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Get component for current scan
struct gpujpeg_component* component = &d_component[segment->scan_index];
// Get component data for MCU
int16_t* block = &component->d_data_quantized[(segment_index * component->segment_mcu_count + mcu_index) * component->mcu_size];
// Get coder parameters
int & component_dc = dc[segment->scan_index];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac) != 0 )
break;
}
}
// Interleaving mode
else {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//assert(segment->scan_index == 0);
for ( int comp = 0; comp < comp_count; comp++ ) {
struct gpujpeg_component* component = &d_component[comp];
// Prepare mcu indexes
int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// Compute base data index
int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all vertical 8x8 blocks
for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// Compute base row data index
int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all horizontal 8x8 blocks
for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// Compute 8x8 block data index
int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
// Get component data for MCU
int16_t* block = &component->d_data_quantized[data_index];
// Get coder parameters
int & component_dc = dc[comp];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac);
}
}
}
}
}
// Emit left bits
if ( put_bits > 0 )
gpujpeg_huffman_gpu_encoder_emit_left_bits(put_value, put_bits, data_compressed);
// Output restart marker
int restart_marker = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
gpujpeg_huffman_gpu_encoder_marker(data_compressed, restart_marker);
// Set compressed size
segment->data_compressed_size = data_compressed - data_compressed_start;
}
/** Adds packed coefficients into the GPU version of Huffman lookup table. */
void
gpujpeg_huffman_gpu_add_packed_table(uint32_t * const dest, const struct gpujpeg_table_huffman_encoder * const src, const bool is_ac) {
// make a upshifted copy of the table for GPU encoding
for ( int i = 0; i <= 256; i++ ) {
const int size = src->size[i & 0xFF];
dest[i] = (src->code[i & 0xFF] << (32 - size)) | size;
}
// reserve first index in GPU version of AC table for special purposes
if ( is_ac ) {
dest[0] = 0;
}
}
/* Documented at declaration */
struct gpujpeg_huffman_gpu_encoder *
gpujpeg_huffman_gpu_encoder_create(const struct gpujpeg_encoder * encoder)
{
struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder = (struct gpujpeg_huffman_gpu_encoder *) malloc(sizeof(struct gpujpeg_huffman_gpu_encoder));
if ( huffman_gpu_encoder == NULL ) {
return NULL;
}
memset(huffman_gpu_encoder, 0, sizeof(struct gpujpeg_huffman_gpu_encoder));
// Allocate
cudaMalloc((void**)&huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int));
gpujpeg_cuda_check_error("Allocation of huffman output byte count failed", return NULL);
// Initialize decomposition lookup table
cudaFuncSetCacheConfig(gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel, cudaFuncCachePreferShared);
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel<<<32, 256>>>(); // 8192 threads total
cudaDeviceSynchronize();
gpujpeg_cuda_check_error("Decomposition LUT initialization failed", return NULL);
// compose GPU version of the huffman LUT and copy it into GPU memory (for CC >= 2.0)
uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4];
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 0, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 1, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], false);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 2, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 3, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC], false);
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_lut,
gpujpeg_huffman_cpu_lut,
(256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_lut),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman LUT copy)", return NULL);
// Copy original Huffman coding tables to GPU memory (for CC 1.x)
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_table_huffman,
&encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
sizeof(gpujpeg_huffman_gpu_encoder_table_huffman),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman coding table)", return NULL);
// Copy natural order to constant device memory
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (natural order copy)", return NULL);
// Configure more shared memory for all kernels
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<true>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<false>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_serialization_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_compaction_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_allocation_kernel, cudaFuncCachePreferShared);
return huffman_gpu_encoder;
}
void
gpujpeg_huffman_gpu_encoder_destroy(struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder)
{
assert(huffman_gpu_encoder != NULL);
if (huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count != NULL) {
cudaFree(huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
}
free(huffman_gpu_encoder);
}
/**
* Get grid size for specified count of threadblocks. (Grid size is limited
* to 65536 in both directions, so if we need more threadblocks, we must use
* both x and y coordinates.)
*/
dim3
gpujpeg_huffman_gpu_encoder_grid_size(int tblock_count)
{
dim3 size(tblock_count);
while(size.x > 0xffff) {
size.x = (size.x + 1) >> 1;
size.y <<= 1;
}
return size;
}
/* Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_encode(struct gpujpeg_encoder* encoder, struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder, unsigned int * output_byte_count)
{
// Get coder
struct gpujpeg_coder* coder = &encoder->coder;
assert(coder->param.restart_interval > 0);
// Select encoder kernel which either expects continuos segments of blocks or uses block lists
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Select encoder kernel based on compute capability
if ( encoder->coder.cuda_cc_major < 2 ) {
// Run kernel
dim3 thread(THREAD_BLOCK_SIZE);
dim3 grid(gpujpeg_div_and_round_up(coder->segment_count, thread.x));
gpujpeg_huffman_encoder_encode_kernel<<<grid, thread, 0, *(encoder->stream)>>>(
coder->d_component,
coder->d_segment,
comp_count,
coder->segment_count,
coder->d_temp_huffman,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
// Run encoder kernel
dim3 thread(32 * WARPS_NUM);
dim3 grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, (thread.x / 32)));
if(comp_count == 1) {
gpujpeg_huffman_encoder_encode_kernel_warp<true><<<grid, thread, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
gpujpeg_huffman_encoder_encode_kernel_warp<false><<<grid, thread, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
}
// Run codeword serialization kernel
const int num_serialization_tblocks = gpujpeg_div_and_round_up(coder->segment_count, SERIALIZATION_THREADS_PER_TBLOCK);
const dim3 serialization_grid = gpujpeg_huffman_gpu_encoder_grid_size(num_serialization_tblocks);
gpujpeg_huffman_encoder_serialization_kernel<<<num_serialization_tblocks, SERIALIZATION_THREADS_PER_TBLOCK, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_temp_huffman
);
gpujpeg_cuda_check_error("Codeword serialization failed", return -1);
}
// No atomic operations in CC 1.0 => run output size computation kernel to allocate the output buffer space
if ( encoder->coder.cuda_cc_major == 1 && encoder->coder.cuda_cc_minor == 0 ) {
gpujpeg_huffman_encoder_allocation_kernel<<<1, 512, 0, *(encoder->stream)>>>(coder->d_segment, coder->segment_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
gpujpeg_cuda_check_error("Huffman encoder output allocation failed", return -1);
}
// Run output compaction kernel (one warp per segment)
const dim3 compaction_thread(32, WARPS_NUM);
const dim3 compaction_grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, WARPS_NUM));
gpujpeg_huffman_encoder_compaction_kernel<<<compaction_grid, compaction_thread, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_temp_huffman,
coder->d_data_compressed,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman output compaction failed", return -1);
// Read and return number of occupied bytes
cudaMemcpyAsync(output_byte_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int), cudaMemcpyDeviceToHost, *(encoder->stream));
gpujpeg_cuda_check_error("Huffman output size getting failed", return -1);
// indicate success
return 0;
}
|
ba20caf8643463ebe0fe27d37ee68db186fc434b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "im2col.h"
#include "hip/hip_runtime.h"
}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK), 0, 0,
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
// --------------------------------
// https://github.com/AlexeyAB/darknet/blob/master/src/im2col_kernels.cu
// CUDA: use 512 threads per block
const int CAFFE_CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
__global__ void im2col_gpu_kernel_ext(const int n, const float* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
float* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const float* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu_ext(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
float* data_col)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_ext << <CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> >(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
check_error(hipPeekAtLastError());
} | ba20caf8643463ebe0fe27d37ee68db186fc434b.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "im2col.h"
#include "cuda.h"
}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK>>>(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
// --------------------------------
// https://github.com/AlexeyAB/darknet/blob/master/src/im2col_kernels.cu
// CUDA: use 512 threads per block
const int CAFFE_CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
__global__ void im2col_gpu_kernel_ext(const int n, const float* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
float* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const float* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu_ext(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
float* data_col)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_ext << <CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> >(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
check_error(cudaPeekAtLastError());
} |
73e5483e9c0954a47a686fe4e346827c8499bd42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Maryam Dehnavi, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2020 Bogdan Simion and Maryam Dehnavi
* -------------
*/
#include "kernels.h"
#include <stdio.h>
#include <string>
#include <unistd.h>
#include <sys/sysinfo.h>
#include <time.h>
#include <stdint.h>
#define max_threads 1024
void run_kernel3(const int8_t *filter, int32_t dimension, const int32_t *input,
int32_t *output, int32_t width, int32_t height)
{
// Calculate blocks and threads
int pixel_count = width * height;
int32_t num_threads = min(max_threads, pixel_count);
// int32_t num_blocks = (pixel_count + num_threads - 1) / num_threads;
int32_t num_blocks = (height + (num_threads - 1)) / num_threads;
int32_t blocks_reduction = (pixel_count + num_threads - 1) / num_threads;
// printf("num_threads: %ld, num_blocks: %ld \n", num_threads, num_blocks);
hipLaunchKernelGGL(( kernel3), dim3(num_blocks), dim3(num_threads), 0, 0, filter, dimension, input, output, width, height);
// init global min & max
int32_t *global_min;
int32_t *global_max;
hipMalloc(&global_min, pixel_count*sizeof(int32_t));
hipMalloc(&global_max, pixel_count*sizeof(int32_t));
hipMemcpy(global_min, output, pixel_count * sizeof(int32_t), hipMemcpyDeviceToDevice);
hipMemcpy(global_max, output, pixel_count * sizeof(int32_t), hipMemcpyDeviceToDevice);
int shMemSize = (num_threads <= 32) ? 4 * num_threads * sizeof(int32_t) : 2* num_threads * sizeof(int32_t);
hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(num_threads), shMemSize, 0, global_min, global_max, pixel_count);
while (blocks_reduction > 1) {
int n = blocks_reduction;
blocks_reduction = (blocks_reduction + max_threads - 1) / max_threads;
shMemSize = (num_threads <= 32) ? 4 * num_threads * sizeof(int32_t) : 2* num_threads * sizeof(int32_t);
hipLaunchKernelGGL(( reduction), dim3(blocks_reduction), dim3(num_threads), shMemSize, 0, global_min, global_max, n);
}
// normalize 3
hipLaunchKernelGGL(( normalize3), dim3(num_blocks), dim3(num_threads), 0, 0, output, width, height, global_min, global_max);
hipFree(global_min);
hipFree(global_max);
}
__global__ void kernel3(const int8_t *filter, int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height){
for (int i = 0; i < width; i++){
int row = idx;
int column = i;
output[idx * width + i] = apply2d_gpu(filter,dimension,input,width,height,row,column);
}
}
}
__global__ void normalize3(int32_t *image, int32_t width, int32_t height,
int32_t *smallest, int32_t *biggest)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height && smallest[0] != biggest[0]){
for (int i = 0; i < width; i++){
image[idx * width + i] = ((image[idx * width + i] - smallest[0]) * 255) / (biggest[0] - smallest[0]);
}
}
}
| 73e5483e9c0954a47a686fe4e346827c8499bd42.cu | /* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Maryam Dehnavi, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2020 Bogdan Simion and Maryam Dehnavi
* -------------
*/
#include "kernels.h"
#include <stdio.h>
#include <string>
#include <unistd.h>
#include <sys/sysinfo.h>
#include <time.h>
#include <stdint.h>
#define max_threads 1024
void run_kernel3(const int8_t *filter, int32_t dimension, const int32_t *input,
int32_t *output, int32_t width, int32_t height)
{
// Calculate blocks and threads
int pixel_count = width * height;
int32_t num_threads = min(max_threads, pixel_count);
// int32_t num_blocks = (pixel_count + num_threads - 1) / num_threads;
int32_t num_blocks = (height + (num_threads - 1)) / num_threads;
int32_t blocks_reduction = (pixel_count + num_threads - 1) / num_threads;
// printf("num_threads: %ld, num_blocks: %ld \n", num_threads, num_blocks);
kernel3<<<num_blocks, num_threads>>>(filter, dimension, input, output, width, height);
// init global min & max
int32_t *global_min;
int32_t *global_max;
cudaMalloc(&global_min, pixel_count*sizeof(int32_t));
cudaMalloc(&global_max, pixel_count*sizeof(int32_t));
cudaMemcpy(global_min, output, pixel_count * sizeof(int32_t), cudaMemcpyDeviceToDevice);
cudaMemcpy(global_max, output, pixel_count * sizeof(int32_t), cudaMemcpyDeviceToDevice);
int shMemSize = (num_threads <= 32) ? 4 * num_threads * sizeof(int32_t) : 2* num_threads * sizeof(int32_t);
reduction<<<blocks_reduction, num_threads, shMemSize>>>(global_min, global_max, pixel_count);
while (blocks_reduction > 1) {
int n = blocks_reduction;
blocks_reduction = (blocks_reduction + max_threads - 1) / max_threads;
shMemSize = (num_threads <= 32) ? 4 * num_threads * sizeof(int32_t) : 2* num_threads * sizeof(int32_t);
reduction<<<blocks_reduction, num_threads, shMemSize>>>(global_min, global_max, n);
}
// normalize 3
normalize3<<<num_blocks, num_threads>>>(output, width, height, global_min, global_max);
cudaFree(global_min);
cudaFree(global_max);
}
__global__ void kernel3(const int8_t *filter, int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height){
for (int i = 0; i < width; i++){
int row = idx;
int column = i;
output[idx * width + i] = apply2d_gpu(filter,dimension,input,width,height,row,column);
}
}
}
__global__ void normalize3(int32_t *image, int32_t width, int32_t height,
int32_t *smallest, int32_t *biggest)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height && smallest[0] != biggest[0]){
for (int i = 0; i < width; i++){
image[idx * width + i] = ((image[idx * width + i] - smallest[0]) * 255) / (biggest[0] - smallest[0]);
}
}
}
|
2f7c17c5f38f582be340864d7ace5e378a3ca1dd.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.hpp"
namespace filter
{
template void linearRow<float4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 2f7c17c5f38f582be340864d7ace5e378a3ca1dd.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.hpp"
namespace filter
{
template void linearRow<float4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
c9ea5dd3dd5639f849af95a4590f6286cdaac761.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <iomanip>
#include <vector>
#include <cstdlib>
#define MAX(x, y) ((x>y) ? x : y)
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
double cal_tflops(int m, int n, int k, double msec)
{
double flops = 2. * m * n * k;
double tflops = (1E-12*flops) / (1E-3*msec);
return tflops;
}
__global__ void assignFloatValue (float *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = value;
}
}
__global__ void assignHalfValue (half *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = value;
}
}
void correctnessCheck(int m, int n, int k, float *host, float value){
for (int i = 0; i < m * n; i++) {
float val = host[i];
if ( val != k * value * value) {
std::cout << "ERROR value = " << val<< std::endl;
}
}
}
void printTime(float cublasTime, int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k ){
float tflops = cal_tflops(m, n, k, cublasTime);
if (tflops > s_max_tflops){
s_max_tflops = tflops;
s_max_m_n = m;
s_max_k = k;
}
std::cout << std::setw(7) << m << ",";
std::cout << std::setw(7) << n << ",";
std::cout << std::setw(7) << k << ",";
std::cout << std::setw(15) << std::setprecision(4) << cublasTime << ",";
std::cout << std::setw(15) << std::setprecision(4) << tflops << "," << std::endl;
}
void calFP16Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(half)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignHalfValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp16, m*k, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp16, k*n, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, HIP_R_16F, m,
b_fp16, HIP_R_16F, n,
&beta,
c_cublas, HIP_R_16F, m,
HIP_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP);
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
void calFP16Accu32Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignHalfValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp16, m*k, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp16, k*n, value);
hipLaunchKernelGGL(( assignFloatValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
// Warp up not really needed
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, HIP_R_16F, m,
b_fp16, HIP_R_16F, n,
&beta,
c_cublas, HIP_R_32F, m,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
void calFP32CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
float *a_fp32;
float *b_fp32;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp32, m * k * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&b_fp32, k * n * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignFloatValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp32, m*k, value);
hipLaunchKernelGGL(( assignFloatValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp32, k*n, value);
hipLaunchKernelGGL(( assignFloatValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
hipblasSgemm(cublasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp32, m,
b_fp32, n,
&beta,
c_cublas, m);
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime = 0.0f;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp32));
cudaErrCheck(hipFree(b_fp32));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
void calFP16CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(half)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignHalfValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp16, m*k, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp16, k*n, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
half alpha = 1.0f;
half beta = 0.0f;
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
hipblasHgemm(cublasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp16, m,
b_fp16, n,
&beta,
c_cublas, m);
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
int main(int argc, char* argv[]) {
int m,n,k;
std::string precision="NULL";
bool perf = true;
if (argc < 3) {
return EXIT_FAILURE;
}
// precision = INT8_TENSOR
// precision = FP16_TENSOR
// precision = FP16_32_TENSOR
// precision = FP32_CUDA
// precision = FP16_CUDA
if (argc == 3) {
precision = argv[1];
std::string tmp = argv[2];
if (tmp == "performance") perf= true;
else if (tmp == "pressure") perf = false;
else {
std::cout << "Invalid parameters!"<<std::endl;
return EXIT_FAILURE;
}
}
float s_max_tflops = 0;
int s_max_m_n = 0;
int s_max_k = 0;
int numRepeats;
/* // deprecated this INT8 test as it will achieve the best perf. Please refer to cublasLt
if (precision == "INT8_TENSOR" || precision == "NULL") {
std::cout << "[TensorCore INT8(INT32 accumulation) Time and TOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TOPS";
std::cout << std::endl;
// for tensorcore test TODO: to verify the int8 with int8 accumulation
for(m=1024, n = 1024; m <= 25600; m+=1024, n+=1024) {
for(k=1024; k <= 20480; k+=1024) {
int8_t *a_;
int8_t *b_;
int *c_cublas;
int *c_host_cublas;
//const int value = 1;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(hipMalloc((void**)&a_, m * k * sizeof(int8_t)));
cudaErrCheck(hipMalloc((void**)&b_, k * m * sizeof(int8_t)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(int)));
c_host_cublas = (int*)malloc(m * n * sizeof(int));
//TODO hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
//assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
int alpha = 1;
int beta = 0;
int numRepeats = 1;
// Warp up not really needed here as many params will be tested
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
m, n, k,
&alpha,
a_, HIP_R_8I, m,
b_, HIP_R_8I, n,
&beta,
c_cublas, HIP_R_32I, m,
HIP_R_32I, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_));
cudaErrCheck(hipFree(b_));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
*/
//======= for tensorcore test
// for perf test
if (precision == "FP16_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16Tensor( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP16_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
// for perf test
if (precision == "FP16_32_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16Accu32Tensor( m, n, k, s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP16_32_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Accu32Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
//======= for cudacore test
if (precision == "FP32_CUDA" && perf == true) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP32CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP32_CUDA" && perf == false) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP32CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
// for perf test
if (precision == "FP16_CUDA" && perf == true) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP16_CUDA" && perf == false) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
return 0;
}
| c9ea5dd3dd5639f849af95a4590f6286cdaac761.cu | #include <iostream>
#include <curand.h>
#include <cublas_v2.h>
#include <iomanip>
#include <vector>
#include <cstdlib>
#define MAX(x, y) ((x>y) ? x : y)
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
double cal_tflops(int m, int n, int k, double msec)
{
double flops = 2. * m * n * k;
double tflops = (1E-12*flops) / (1E-3*msec);
return tflops;
}
__global__ void assignFloatValue (float *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = value;
}
}
__global__ void assignHalfValue (half *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = value;
}
}
void correctnessCheck(int m, int n, int k, float *host, float value){
for (int i = 0; i < m * n; i++) {
float val = host[i];
if ( val != k * value * value) {
std::cout << "ERROR value = " << val<< std::endl;
}
}
}
void printTime(float cublasTime, int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k ){
float tflops = cal_tflops(m, n, k, cublasTime);
if (tflops > s_max_tflops){
s_max_tflops = tflops;
s_max_m_n = m;
s_max_k = k;
}
std::cout << std::setw(7) << m << ",";
std::cout << std::setw(7) << n << ",";
std::cout << std::setw(7) << k << ",";
std::cout << std::setw(15) << std::setprecision(4) << cublasTime << ",";
std::cout << std::setw(15) << std::setprecision(4) << tflops << "," << std::endl;
}
void calFP16Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(half)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, CUDA_R_16F, m,
b_fp16, CUDA_R_16F, n,
&beta,
c_cublas, CUDA_R_16F, m,
CUDA_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP);
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
void calFP16Accu32Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
assignFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
// Warp up not really needed
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, CUDA_R_16F, m,
b_fp16, CUDA_R_16F, n,
&beta,
c_cublas, CUDA_R_32F, m,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
void calFP32CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
float *a_fp32;
float *b_fp32;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp32, m * k * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&b_fp32, k * n * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignFloatValue <<< (m * k + 255) / 256, 256 >>> (a_fp32, m*k, value);
assignFloatValue <<< (k * n + 255) / 256, 256 >>> (b_fp32, k*n, value);
assignFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasSgemm(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp32, m,
b_fp32, n,
&beta,
c_cublas, m);
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime = 0.0f;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp32));
cudaErrCheck(cudaFree(b_fp32));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
void calFP16CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(half)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
half alpha = 1.0f;
half beta = 0.0f;
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasHgemm(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp16, m,
b_fp16, n,
&beta,
c_cublas, m);
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
int main(int argc, char* argv[]) {
int m,n,k;
std::string precision="NULL";
bool perf = true;
if (argc < 3) {
return EXIT_FAILURE;
}
// precision = INT8_TENSOR
// precision = FP16_TENSOR
// precision = FP16_32_TENSOR
// precision = FP32_CUDA
// precision = FP16_CUDA
if (argc == 3) {
precision = argv[1];
std::string tmp = argv[2];
if (tmp == "performance") perf= true;
else if (tmp == "pressure") perf = false;
else {
std::cout << "Invalid parameters!"<<std::endl;
return EXIT_FAILURE;
}
}
float s_max_tflops = 0;
int s_max_m_n = 0;
int s_max_k = 0;
int numRepeats;
/* // deprecated this INT8 test as it will achieve the best perf. Please refer to cublasLt
if (precision == "INT8_TENSOR" || precision == "NULL") {
std::cout << "[TensorCore INT8(INT32 accumulation) Time and TOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TOPS";
std::cout << std::endl;
// for tensorcore test TODO: to verify the int8 with int8 accumulation
for(m=1024, n = 1024; m <= 25600; m+=1024, n+=1024) {
for(k=1024; k <= 20480; k+=1024) {
int8_t *a_;
int8_t *b_;
int *c_cublas;
int *c_host_cublas;
//const int value = 1;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(cudaMalloc((void**)&a_, m * k * sizeof(int8_t)));
cudaErrCheck(cudaMalloc((void**)&b_, k * m * sizeof(int8_t)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(int)));
c_host_cublas = (int*)malloc(m * n * sizeof(int));
//TODO curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
//assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
int alpha = 1;
int beta = 0;
int numRepeats = 1;
// Warp up not really needed here as many params will be tested
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
m, n, k,
&alpha,
a_, CUDA_R_8I, m,
b_, CUDA_R_8I, n,
&beta,
c_cublas, CUDA_R_32I, m,
CUDA_R_32I, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_));
cudaErrCheck(cudaFree(b_));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
*/
//======= for tensorcore test
// for perf test
if (precision == "FP16_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16Tensor( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP16_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
// for perf test
if (precision == "FP16_32_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16Accu32Tensor( m, n, k, s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP16_32_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Accu32Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
//======= for cudacore test
if (precision == "FP32_CUDA" && perf == true) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP32CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP32_CUDA" && perf == false) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP32CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
// for perf test
if (precision == "FP16_CUDA" && perf == true) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 25600; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP16_CUDA" && perf == false) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
return 0;
}
|
5209660071219158bf7b97967044d19a2ae13ab7.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include "constants.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include "utils.h"
namespace mean_shift::cuda {
__global__ void mean_shift(float *data, float *data_next) {
size_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
size_t row = tid * D;
float new_position[D] = {0.};
float tot_weight = 0.;
for (size_t i = 0; i < N; ++i) {
size_t row_n = i * D;
float sq_dist = 0.;
for (size_t j = 0; j < D; ++j) {
sq_dist += (data[row + j] - data[row_n + j]) * (data[row + j] - data[row_n + j]);
}
if (sq_dist <= RADIUS) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (size_t j = 0; j < D; ++j) {
new_position[j] += weight * data[row_n + j];
}
tot_weight += weight;
}
}
for (size_t j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
}
int main() {
constexpr auto N = mean_shift::cuda::N;
constexpr auto D = mean_shift::cuda::D;
constexpr auto M = mean_shift::cuda::M;
constexpr auto THREADS = mean_shift::cuda::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::BLOCKS;
const auto PATH_TO_DATA = mean_shift::cuda::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::PATH_TO_CENTROIDS;
constexpr auto DIST_TO_REAL = mean_shift::cuda::DIST_TO_REAL;
mean_shift::cuda::utils::print_info(PATH_TO_DATA, N, D, BLOCKS, THREADS);
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
hipMalloc(&dev_data, data_bytes);
hipMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
hipMemcpy(dev_data, data.data(), data_bytes, hipMemcpyHostToDevice);
hipMemcpy(dev_data_next, data_next.data(), data_bytes, hipMemcpyHostToDevice);
// Run mean shift clustering and time the execution
const auto before = std::chrono::system_clock::now();
for (size_t i = 0; i < mean_shift::cuda::NUM_ITER; ++i) {
hipLaunchKernelGGL(( mean_shift::cuda::mean_shift), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_data, dev_data_next);
hipDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
hipMemcpy(data.data(), dev_data, data_bytes, hipMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, mean_shift::cuda::MIN_DISTANCE);
const auto after = std::chrono::system_clock::now();
const std::chrono::duration<double, std::milli> duration = after - before;
std::cout << "\nNaive took " << duration.count() << " ms\n" << std::endl;
hipFree(dev_data);
hipFree(dev_data_next);
mean_shift::cuda::utils::print_data<D>(centroids);
// Check if correct number
assert(centroids.size() == M);
// Check if these centroids are sufficiently close to real ones
const std::array<float, M * D> real = mean_shift::cuda::utils::load_csv<M, D>(PATH_TO_CENTROIDS, ',');
const bool are_close = mean_shift::cuda::utils::are_close_to_real<M, D>(centroids, real, DIST_TO_REAL);
assert(are_close);
std::cout << "SUCCESS!\n";
return 0;
} | 5209660071219158bf7b97967044d19a2ae13ab7.cu | #include <chrono>
#include "constants.h"
#include <cuda.h>
#include <iostream>
#include "utils.h"
namespace mean_shift::cuda {
__global__ void mean_shift(float *data, float *data_next) {
size_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
size_t row = tid * D;
float new_position[D] = {0.};
float tot_weight = 0.;
for (size_t i = 0; i < N; ++i) {
size_t row_n = i * D;
float sq_dist = 0.;
for (size_t j = 0; j < D; ++j) {
sq_dist += (data[row + j] - data[row_n + j]) * (data[row + j] - data[row_n + j]);
}
if (sq_dist <= RADIUS) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (size_t j = 0; j < D; ++j) {
new_position[j] += weight * data[row_n + j];
}
tot_weight += weight;
}
}
for (size_t j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
}
int main() {
constexpr auto N = mean_shift::cuda::N;
constexpr auto D = mean_shift::cuda::D;
constexpr auto M = mean_shift::cuda::M;
constexpr auto THREADS = mean_shift::cuda::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::BLOCKS;
const auto PATH_TO_DATA = mean_shift::cuda::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::PATH_TO_CENTROIDS;
constexpr auto DIST_TO_REAL = mean_shift::cuda::DIST_TO_REAL;
mean_shift::cuda::utils::print_info(PATH_TO_DATA, N, D, BLOCKS, THREADS);
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
cudaMalloc(&dev_data, data_bytes);
cudaMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
cudaMemcpy(dev_data, data.data(), data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_data_next, data_next.data(), data_bytes, cudaMemcpyHostToDevice);
// Run mean shift clustering and time the execution
const auto before = std::chrono::system_clock::now();
for (size_t i = 0; i < mean_shift::cuda::NUM_ITER; ++i) {
mean_shift::cuda::mean_shift<<<BLOCKS, THREADS>>>(dev_data, dev_data_next);
cudaDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
cudaMemcpy(data.data(), dev_data, data_bytes, cudaMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, mean_shift::cuda::MIN_DISTANCE);
const auto after = std::chrono::system_clock::now();
const std::chrono::duration<double, std::milli> duration = after - before;
std::cout << "\nNaive took " << duration.count() << " ms\n" << std::endl;
cudaFree(dev_data);
cudaFree(dev_data_next);
mean_shift::cuda::utils::print_data<D>(centroids);
// Check if correct number
assert(centroids.size() == M);
// Check if these centroids are sufficiently close to real ones
const std::array<float, M * D> real = mean_shift::cuda::utils::load_csv<M, D>(PATH_TO_CENTROIDS, ',');
const bool are_close = mean_shift::cuda::utils::are_close_to_real<M, D>(centroids, real, DIST_TO_REAL);
assert(are_close);
std::cout << "SUCCESS!\n";
return 0;
} |
39aaef7914404f8fe4adc39785a73f9e8617f70d.hip | // !!! This is a file automatically generated by hipify!!!
#include <benchmark/benchmark.h>
#include <bm_config.hpp>
#include <matazure/tensor>
using namespace matazure;
#ifdef USE_ROCM
static void BM_linear_lambda_tensor_persist_gold(benchmark::State &st) {
tensor<float, 1> tsf1(st.range(0));
while (st.KeepRunning()) {
tensor<float, 1> ts_re(tsf1.shape());
for (int_t i = 0; i < ts_re.size(); ++i) {
ts_re[i] = 2.0f * tsf1[i];
}
}
auto bytes_size = static_cast<size_t>(tsf1.size()) * sizeof(decltype(tsf1[0]));
st.SetBytesProcessed(st.iterations() * bytes_size);
}
static void BM_linear_lambda_tensor_persist(benchmark::State &st) {
tensor<float, 1> tsf1(st.range(0));
while (st.KeepRunning()) {
auto tsf1_re = make_lambda(tsf1.shape(), [tsf1](int_t i) {
return 2.0f * tsf1[i];
}).persist();
}
auto bytes_size = static_cast<size_t>(tsf1.size()) * sizeof(decltype(tsf1[0]));
st.SetBytesProcessed(st.iterations() * bytes_size);
}
BENCHMARK(BM_linear_lambda_tensor_persist_gold)->Range(1 << 10, 1 << (bm_config::max_host_memory_exponent() - 2))->UseRealTime();
BENCHMARK(BM_linear_lambda_tensor_persist)->Range(1 << 10, 1 << (bm_config::max_host_memory_exponent() - 2))->UseRealTime();
#endif
| 39aaef7914404f8fe4adc39785a73f9e8617f70d.cu | #include <benchmark/benchmark.h>
#include <bm_config.hpp>
#include <matazure/tensor>
using namespace matazure;
#ifdef USE_CUDA
static void BM_linear_lambda_tensor_persist_gold(benchmark::State &st) {
tensor<float, 1> tsf1(st.range(0));
while (st.KeepRunning()) {
tensor<float, 1> ts_re(tsf1.shape());
for (int_t i = 0; i < ts_re.size(); ++i) {
ts_re[i] = 2.0f * tsf1[i];
}
}
auto bytes_size = static_cast<size_t>(tsf1.size()) * sizeof(decltype(tsf1[0]));
st.SetBytesProcessed(st.iterations() * bytes_size);
}
static void BM_linear_lambda_tensor_persist(benchmark::State &st) {
tensor<float, 1> tsf1(st.range(0));
while (st.KeepRunning()) {
auto tsf1_re = make_lambda(tsf1.shape(), [tsf1](int_t i) {
return 2.0f * tsf1[i];
}).persist();
}
auto bytes_size = static_cast<size_t>(tsf1.size()) * sizeof(decltype(tsf1[0]));
st.SetBytesProcessed(st.iterations() * bytes_size);
}
BENCHMARK(BM_linear_lambda_tensor_persist_gold)->Range(1 << 10, 1 << (bm_config::max_host_memory_exponent() - 2))->UseRealTime();
BENCHMARK(BM_linear_lambda_tensor_persist)->Range(1 << 10, 1 << (bm_config::max_host_memory_exponent() - 2))->UseRealTime();
#endif
|
3660fbe7e1413c016fde3acd577dedc539fbef6c.hip | // !!! This is a file automatically generated by hipify!!!
#include <device_launch_parameters.h>
#include <hip/hip_runtime_api.h>
#include <cstdio>
// Device input vectors
int *d_a;
//Device output vector
int *d_b;
__global__ void naivePrefixSum(int *A, int *B, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (index >= (1 << (iteration - 1)))
A[index] = B[(int) (index - (1 << (iteration - 1)))] + B[index];
else
A[index] = B[index];
}
}
__global__ void upSweep(int *A, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (!((index + 1) % (1 << (iteration + 1))))
A[index] = A[index - (1<<iteration)] + A[index];
}
}
__global__ void setLastToCero(int *A, int size) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index == size - 1) {
A[index] = 0;
}
}
__global__ void downSweep(int *A, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
int aux;
if (!((index + 1) % (1 << (iteration + 1)))) {
aux = A[index - (1<<iteration)];
A[index - (1<<iteration)] = A[index];
A[index] = aux + A[index];
}
}
}
void initCuda(int size) {
// Allocate memory for each vector on GPU
hipMalloc((void **) &d_a, size*sizeof(int));
hipMalloc((void **) &d_b, size*sizeof(int));
}
void destroyCuda() {
// Release device memory
hipFree(d_a);
hipFree(d_b);
}
void runPrefixCuda(int *A, int size) {
// Size, in bytes, of each vector
size_t bytes = size*sizeof(int);
// Copy host vectors to device
hipMemcpy(d_a, A, bytes, hipMemcpyHostToDevice);
// Execute the kernels
for (int i = 0; i <= (int)(log2(size) - 1) ; ++i) {
hipLaunchKernelGGL(( upSweep), dim3(size), dim3(1) , 0, 0, d_a, size, i);
}
hipLaunchKernelGGL(( setLastToCero), dim3(size),dim3(1), 0, 0, d_a,size);
for (int j = (int)(log2(size) - 1); j >= 0; --j) {
hipLaunchKernelGGL(( downSweep), dim3(size), dim3(1) , 0, 0, d_a, size, j);
}
// Copy array back to host
hipMemcpy( A, d_a, bytes, hipMemcpyDeviceToHost );
}
void runNaiveCuda(int *A, int size) {
// Size, in bytes, of each vector
size_t bytes = size*sizeof(int);
// Copy host vectors to device
hipMemcpy(d_a, A, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, A, bytes, hipMemcpyHostToDevice);
// int blockSize, gridSize, n;
// // Tamao de la matriz.
// n = height*width;
//
// // Tamao del bloque. Elegir entre 32 y 31.
// //blockSize = 32;
// blockSize = 32;
//
// // Number of thread blocks in grid
// gridSize = (int)ceil((float)n/blockSize);
int *aux;
// Execute the kernel
for (int i = 1; i <= (int)log2(size) ; ++i) {
hipLaunchKernelGGL(( naivePrefixSum), dim3(size), dim3(1) , 0, 0, d_a, d_b, size, i);
aux = d_a;
d_a = d_b;
d_b = aux;
}
// Copy array back to host
hipMemcpy( A, d_b, bytes, hipMemcpyDeviceToHost );
} | 3660fbe7e1413c016fde3acd577dedc539fbef6c.cu | #include <device_launch_parameters.h>
#include <cuda_runtime_api.h>
#include <cstdio>
// Device input vectors
int *d_a;
//Device output vector
int *d_b;
__global__ void naivePrefixSum(int *A, int *B, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (index >= (1 << (iteration - 1)))
A[index] = B[(int) (index - (1 << (iteration - 1)))] + B[index];
else
A[index] = B[index];
}
}
__global__ void upSweep(int *A, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (!((index + 1) % (1 << (iteration + 1))))
A[index] = A[index - (1<<iteration)] + A[index];
}
}
__global__ void setLastToCero(int *A, int size) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index == size - 1) {
A[index] = 0;
}
}
__global__ void downSweep(int *A, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
int aux;
if (!((index + 1) % (1 << (iteration + 1)))) {
aux = A[index - (1<<iteration)];
A[index - (1<<iteration)] = A[index];
A[index] = aux + A[index];
}
}
}
void initCuda(int size) {
// Allocate memory for each vector on GPU
cudaMalloc((void **) &d_a, size*sizeof(int));
cudaMalloc((void **) &d_b, size*sizeof(int));
}
void destroyCuda() {
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
}
void runPrefixCuda(int *A, int size) {
// Size, in bytes, of each vector
size_t bytes = size*sizeof(int);
// Copy host vectors to device
cudaMemcpy(d_a, A, bytes, cudaMemcpyHostToDevice);
// Execute the kernels
for (int i = 0; i <= (int)(log2(size) - 1) ; ++i) {
upSweep<<< size, 1 >>>(d_a, size, i);
}
setLastToCero<<<size,1>>>(d_a,size);
for (int j = (int)(log2(size) - 1); j >= 0; --j) {
downSweep<<< size, 1 >>>(d_a, size, j);
}
// Copy array back to host
cudaMemcpy( A, d_a, bytes, cudaMemcpyDeviceToHost );
}
void runNaiveCuda(int *A, int size) {
// Size, in bytes, of each vector
size_t bytes = size*sizeof(int);
// Copy host vectors to device
cudaMemcpy(d_a, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, A, bytes, cudaMemcpyHostToDevice);
// int blockSize, gridSize, n;
// // Tamaño de la matriz.
// n = height*width;
//
// // Tamaño del bloque. Elegir entre 32 y 31.
// //blockSize = 32;
// blockSize = 32;
//
// // Number of thread blocks in grid
// gridSize = (int)ceil((float)n/blockSize);
int *aux;
// Execute the kernel
for (int i = 1; i <= (int)log2(size) ; ++i) {
naivePrefixSum<<< size, 1 >>>(d_a, d_b, size, i);
aux = d_a;
d_a = d_b;
d_b = aux;
}
// Copy array back to host
cudaMemcpy( A, d_b, bytes, cudaMemcpyDeviceToHost );
} |
09ebe507dffb19e32f26c2cf5bd0ac0fe8d60835.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/time.h>
#include <fstream>
#include <functional>
#include <unordered_set>
#include "HugeCTR/include/data_generator.hpp"
#include "HugeCTR/include/data_readers/data_reader.hpp"
#include "HugeCTR/include/embeddings/distributed_slot_sparse_embedding_hash.hpp"
#include "gtest/gtest.h"
#include "roctracer/roctx.h"
#include "utest/embedding/embedding_test_utils.hpp"
#include "utest/embedding/sparse_embedding_hash_cpu.hpp"
#include "utest/test_utils.h"
using namespace HugeCTR;
using namespace embedding_test;
namespace {
//---------------------------------------------------------------------------------------
// global params for all testing
const int train_batch_num = 10; // can not more than 32
const int test_batch_num = 1;
const int train_batchsize = 1024;
const int test_batchsize = 2560;
const int slot_num = 26;
const int max_nnz_per_slot = 1;
const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample
const long long vocabulary_size = 100000;
const int embedding_vec_size = 64;
const int combiner = 0; // 0-sum, 1-mean
const long long label_dim = 1;
const long long dense_dim = 0;
typedef long long T;
const float scaler = 1.0f; // used in mixed precision training
// In order to not allocate the total size of hash table on each GPU, the users need to set the
// size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count,
// eg: 1.25x of that.
const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation
const int num_files = 1;
const Check_t CHK = Check_t::Sum; // Check_t::Sum
const char *train_file_list_name = "train_file_list.txt";
const char *test_file_list_name = "test_file_list.txt";
const char *prefix = "./data_reader_test_data/temp_dataset_";
const char *hash_table_file_name = "distributed_hash_table.bin";
const char *opt_file_name = "distributed_opt.bin";
//-----------------------------------------------------------------------------------------
template <typename TypeEmbeddingComp>
void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer,
const Update_t &update_type) {
OptHyperParams<TypeEmbeddingComp> hyper_params;
hyper_params.adam.beta1 = 0.9f;
hyper_params.adam.beta2 = 0.999f;
float tolerance;
if (std::is_same<TypeEmbeddingComp, __half>::value) {
hyper_params.adam.epsilon = 1e-4f;
tolerance = 5e-3f;
} else {
hyper_params.adam.epsilon = 1e-7f;
tolerance = 1e-4f;
}
hyper_params.momentum.factor = 0.9f;
hyper_params.nesterov.mu = 0.9f;
const float lr = optimizer == Optimizer_t::Adam ? 0.001f : 0.01f;
const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, update_type,
scaler};
int numprocs = 1, pid = 0;
std::vector<std::vector<int>> vvgpu;
test::mpi_init();
for (int i = 0; i < numprocs; i++) {
vvgpu.push_back(device_list);
}
const auto &resource_manager = ResourceManager::create(vvgpu, 0);
if (pid == 0) {
// re-generate the dataset files
{
std::ifstream fs(train_file_list_name);
if (fs.good()) {
std::remove(train_file_list_name);
}
}
{
std::ifstream fs(test_file_list_name);
if (fs.good()) {
std::remove(test_file_list_name);
}
}
// data generation
HugeCTR::data_generation_for_test<T, CHK>(
train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot);
HugeCTR::data_generation_for_test<T, CHK>(
test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot);
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// setup a data reader
const DataReaderSparseParam param = {DataReaderSparse_t::Distributed, max_nnz_per_slot * slot_num,
max_nnz_per_slot, slot_num};
std::vector<DataReaderSparseParam> params;
params.push_back(param);
std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>(
train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0));
train_data_reader->create_drwg_norm(train_file_list_name, CHK);
std::unique_ptr<DataReader<T>> test_data_reader(new DataReader<T>(
test_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0));
test_data_reader->create_drwg_norm(test_file_list_name, CHK);
// init hash table file
if (pid == 0) {
std::ofstream fs(hash_table_file_name);
if (!fs.is_open()) {
ERROR_MESSAGE_("Error: file not open for writing");
}
test::UniformDataSimulator fdata_sim;
std::unique_ptr<float[]> buf(new float[embedding_vec_size]);
for (long long i = 0; i < vocabulary_size; i++) {
T key = (T)i;
// T key = ldata_sim.get_num();
// CAUSION: can not set random keys here, because we need to ensure that:
// 1) we can find keys in the data file from this hash table
// 2) there are no repeated keys
fs.write((char *)&key, sizeof(T));
fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f);
fs.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float));
}
fs.close();
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
const SparseEmbeddingHashParams<TypeEmbeddingComp> embedding_params = {
train_batchsize, test_batchsize, vocabulary_size, {}, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding(
new DistributedSlotSparseEmbeddingHash<T, TypeEmbeddingComp>(
train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(),
train_data_reader->get_nnz_array(), test_data_reader->get_row_offsets_tensors(),
test_data_reader->get_value_tensors(), test_data_reader->get_nnz_array(),
embedding_params, resource_manager));
{
// upload hash table to device
std::ifstream fs(hash_table_file_name);
embedding->load_parameters(fs);
fs.close();
}
// for SparseEmbeddingCpu
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num,
label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, opt_params,
train_file_list_name, hash_table_file_name, SparseEmbedding_t::Distributed));
TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results();
TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results();
T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr();
float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr();
// for results check
std::shared_ptr<GeneralBuffer2<HostAllocator>> buf = GeneralBuffer2<HostAllocator>::create();
Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu;
buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu);
Tensor2<TypeEmbeddingComp> wgrad_from_gpu;
buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &wgrad_from_gpu);
Tensor2<T> hash_table_key_from_gpu;
buf->reserve({vocabulary_size}, &hash_table_key_from_gpu);
Tensor2<float> hash_table_value_from_gpu;
buf->reserve({vocabulary_size * embedding_vec_size}, &hash_table_value_from_gpu);
Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu_eval;
buf->reserve({test_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu_eval);
buf->allocate();
typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue;
for (int i = 0; i < train_batch_num; i++) {
printf("Rank%d: Round %d start training:\n", pid, i);
// call read a batch
printf("Rank%d: data_reader->read_a_batch_to_device()\n", pid);
train_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding->forward()\n", pid);
embedding->forward(true);
// check the result of forward
printf("Rank%d: embedding->get_forward_results()\n", pid);
embedding->get_forward_results(true, embedding_feature_from_gpu); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu->forward()\n");
embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_TRUE(compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu.get_ptr(),
embedding_feature_from_cpu, tolerance));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU backward
printf("Rank%d: embedding->backward()\n", pid);
embedding->backward();
// check the result of backward
printf("Rank%d: embedding->get_backward_results()\n", pid);
embedding->get_backward_results(wgrad_from_gpu, 0);
if (pid == 0) {
// CPU backward
printf("Rank0: embedding_cpu->backward()\n");
embedding_cpu->backward();
printf("Rank0: check backward results: GPU and CPU\n");
ASSERT_TRUE(compare_wgrad(train_batchsize * slot_num * embedding_vec_size,
wgrad_from_gpu.get_ptr(), wgrad_from_cpu, tolerance));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU update_params
printf("Rank%d: embedding->update_params()\n", pid);
embedding->update_params();
// check the results of update params
printf("Rank%d: embedding->get_update_params_results()\n", pid);
embedding->get_update_params_results(hash_table_key_from_gpu,
hash_table_value_from_gpu); // memcpy from GPU to CPU
if (pid == 0) {
// CPU update_params
printf("Rank0: embedding_cpu->update_params()\n");
embedding_cpu->update_params();
printf("Rank0: check update_params results\n");
ASSERT_TRUE(compare_hash_table(
vocabulary_size, hash_table_key_from_gpu.get_ptr(),
reinterpret_cast<TypeHashValue *>(hash_table_value_from_gpu.get_ptr()),
hash_table_key_from_cpu, reinterpret_cast<TypeHashValue *>(hash_table_value_from_cpu),
tolerance));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
printf("Rank%d: Round %d end:\n\n", pid, i);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create new obj for eval()
{
std::ofstream fs(hash_table_file_name);
embedding->dump_parameters(fs);
fs.close();
}
{
printf("Rank%d: embedding->dump_opt_states()\n", pid);
std::ofstream fs(opt_file_name);
embedding->dump_opt_states(fs);
fs.close();
}
{
printf("Rank%d: embedding->load_opt_states()\n", pid);
std::ifstream fs(opt_file_name);
embedding->load_opt_states(fs);
fs.close();
}
// for SparseEmbeddingCpu eval
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim,
dense_dim, CHK, test_batch_num * test_batchsize, combiner, opt_params,
test_file_list_name, hash_table_file_name, SparseEmbedding_t::Distributed));
TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results();
{
// eval
printf("\nRank%d: start eval:\n", pid);
// call read a batch
printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", pid);
test_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding_eval->forward()\n", pid);
embedding->forward(false);
// check the result of forward
printf("Rank%d: embedding_eval->get_forward_results()\n", pid);
embedding->get_forward_results(false,
embedding_feature_from_gpu_eval); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu_eval->forward()\n");
test_embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_TRUE(compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu_eval.get_ptr(),
embedding_feature_from_cpu_eval, tolerance));
}
}
test::mpi_finalize();
}
template <typename TypeEmbeddingComp>
void load_and_dump(const std::vector<int> &device_list, const Optimizer_t &optimizer,
const Update_t &update_type) {
OptHyperParams<TypeEmbeddingComp> hyper_params;
hyper_params.adam.beta1 = 0.9f;
hyper_params.adam.beta2 = 0.999f;
if (std::is_same<TypeEmbeddingComp, __half>::value) {
hyper_params.adam.epsilon = 1e-4f;
} else {
hyper_params.adam.epsilon = 1e-7f;
}
hyper_params.momentum.factor = 0.9f;
hyper_params.nesterov.mu = 0.9f;
const float lr = optimizer == Optimizer_t::Adam ? 0.001f : 0.01f;
const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, update_type,
scaler};
std::vector<std::vector<int>> vvgpu;
vvgpu.push_back(device_list);
const auto &resource_manager = ResourceManager::create(vvgpu, 0);
// re-generate the dataset files
{
std::ifstream fs(train_file_list_name);
if (fs.good()) {
std::remove(train_file_list_name);
}
}
// data generation
HugeCTR::data_generation_for_test<T, CHK>(
train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot);
// setup a data reader
const DataReaderSparseParam param = {DataReaderSparse_t::Distributed, max_nnz_per_slot * slot_num,
max_nnz_per_slot, slot_num};
std::vector<DataReaderSparseParam> params;
params.push_back(param);
std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>(
train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0));
train_data_reader->create_drwg_norm(train_file_list_name, CHK);
// init hash table file
std::ofstream fs(hash_table_file_name);
if (!fs.is_open()) {
ERROR_MESSAGE_("Error: file not open for writing");
}
test::UniformDataSimulator fdata_sim;
std::unique_ptr<float[]> buf(new float[embedding_vec_size]);
for (long long i = 0; i < vocabulary_size; i++) {
T key = (T)i;
// T key = ldata_sim.get_num();
// CAUSION: can not set random keys here, because we need to ensure that:
// 1) we can find keys in the data file from this hash table
// 2) there are no repeated keys
fs.write((char *)&key, sizeof(T));
fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f);
fs.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float));
}
fs.close();
const SparseEmbeddingHashParams<TypeEmbeddingComp> embedding_params = {
train_batchsize, test_batchsize, vocabulary_size, {}, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding(
new DistributedSlotSparseEmbeddingHash<T, TypeEmbeddingComp>(
train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(),
train_data_reader->get_nnz_array(), train_data_reader->get_row_offsets_tensors(),
train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(),
embedding_params, resource_manager));
{
// upload hash table to device
std::ifstream fs(hash_table_file_name);
embedding->load_parameters(fs);
fs.close();
}
printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(),
embedding->get_vocabulary_size());
std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> blobs_buff =
GeneralBuffer2<CudaHostAllocator>::create();
Tensor2<T> keys;
blobs_buff->reserve({embedding->get_max_vocabulary_size()}, &keys);
Tensor2<float> embeddings;
blobs_buff->reserve({embedding->get_max_vocabulary_size(), embedding_vec_size}, &embeddings);
blobs_buff->allocate();
BufferBag buf_bag;
buf_bag.keys = keys.shrink();
buf_bag.embedding = embeddings;
size_t dump_size;
embedding->dump_parameters(buf_bag, &dump_size);
printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size,
embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size());
embedding->dump_parameters(buf_bag, &dump_size);
printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size,
embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size());
embedding->reset();
printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(),
embedding->get_vocabulary_size());
embedding->load_parameters(buf_bag, dump_size);
printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(),
embedding->get_vocabulary_size());
embedding->dump_parameters(buf_bag, &dump_size);
printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size,
embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size());
}
} // namespace
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_global_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_global_update_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_global_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_global_update_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_1gpu) {
train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_global_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_global_update_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_1gpu) {
train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_global_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_global_update_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, load_and_dump) {
load_and_dump<float>({0}, Optimizer_t::SGD, Update_t::Global);
}
| 09ebe507dffb19e32f26c2cf5bd0ac0fe8d60835.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/time.h>
#include <fstream>
#include <functional>
#include <unordered_set>
#include "HugeCTR/include/data_generator.hpp"
#include "HugeCTR/include/data_readers/data_reader.hpp"
#include "HugeCTR/include/embeddings/distributed_slot_sparse_embedding_hash.hpp"
#include "gtest/gtest.h"
#include "nvToolsExt.h"
#include "utest/embedding/embedding_test_utils.hpp"
#include "utest/embedding/sparse_embedding_hash_cpu.hpp"
#include "utest/test_utils.h"
using namespace HugeCTR;
using namespace embedding_test;
namespace {
//---------------------------------------------------------------------------------------
// global params for all testing
const int train_batch_num = 10; // can not more than 32
const int test_batch_num = 1;
const int train_batchsize = 1024;
const int test_batchsize = 2560;
const int slot_num = 26;
const int max_nnz_per_slot = 1;
const int max_feature_num = max_nnz_per_slot * slot_num; // max_feature_num in a sample
const long long vocabulary_size = 100000;
const int embedding_vec_size = 64;
const int combiner = 0; // 0-sum, 1-mean
const long long label_dim = 1;
const long long dense_dim = 0;
typedef long long T;
const float scaler = 1.0f; // used in mixed precision training
// In order to not allocate the total size of hash table on each GPU, the users need to set the
// size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count,
// eg: 1.25x of that.
const int num_chunk_threads = 1; // must be 1 for CPU and GPU results comparation
const int num_files = 1;
const Check_t CHK = Check_t::Sum; // Check_t::Sum
const char *train_file_list_name = "train_file_list.txt";
const char *test_file_list_name = "test_file_list.txt";
const char *prefix = "./data_reader_test_data/temp_dataset_";
const char *hash_table_file_name = "distributed_hash_table.bin";
const char *opt_file_name = "distributed_opt.bin";
//-----------------------------------------------------------------------------------------
template <typename TypeEmbeddingComp>
void train_and_test(const std::vector<int> &device_list, const Optimizer_t &optimizer,
const Update_t &update_type) {
OptHyperParams<TypeEmbeddingComp> hyper_params;
hyper_params.adam.beta1 = 0.9f;
hyper_params.adam.beta2 = 0.999f;
float tolerance;
if (std::is_same<TypeEmbeddingComp, __half>::value) {
hyper_params.adam.epsilon = 1e-4f;
tolerance = 5e-3f;
} else {
hyper_params.adam.epsilon = 1e-7f;
tolerance = 1e-4f;
}
hyper_params.momentum.factor = 0.9f;
hyper_params.nesterov.mu = 0.9f;
const float lr = optimizer == Optimizer_t::Adam ? 0.001f : 0.01f;
const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, update_type,
scaler};
int numprocs = 1, pid = 0;
std::vector<std::vector<int>> vvgpu;
test::mpi_init();
for (int i = 0; i < numprocs; i++) {
vvgpu.push_back(device_list);
}
const auto &resource_manager = ResourceManager::create(vvgpu, 0);
if (pid == 0) {
// re-generate the dataset files
{
std::ifstream fs(train_file_list_name);
if (fs.good()) {
std::remove(train_file_list_name);
}
}
{
std::ifstream fs(test_file_list_name);
if (fs.good()) {
std::remove(test_file_list_name);
}
}
// data generation
HugeCTR::data_generation_for_test<T, CHK>(
train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot);
HugeCTR::data_generation_for_test<T, CHK>(
test_file_list_name, prefix, num_files, test_batch_num * test_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot);
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// setup a data reader
const DataReaderSparseParam param = {DataReaderSparse_t::Distributed, max_nnz_per_slot * slot_num,
max_nnz_per_slot, slot_num};
std::vector<DataReaderSparseParam> params;
params.push_back(param);
std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>(
train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0));
train_data_reader->create_drwg_norm(train_file_list_name, CHK);
std::unique_ptr<DataReader<T>> test_data_reader(new DataReader<T>(
test_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0));
test_data_reader->create_drwg_norm(test_file_list_name, CHK);
// init hash table file
if (pid == 0) {
std::ofstream fs(hash_table_file_name);
if (!fs.is_open()) {
ERROR_MESSAGE_("Error: file not open for writing");
}
test::UniformDataSimulator fdata_sim;
std::unique_ptr<float[]> buf(new float[embedding_vec_size]);
for (long long i = 0; i < vocabulary_size; i++) {
T key = (T)i;
// T key = ldata_sim.get_num();
// CAUSION: can not set random keys here, because we need to ensure that:
// 1) we can find keys in the data file from this hash table
// 2) there are no repeated keys
fs.write((char *)&key, sizeof(T));
fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f);
fs.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float));
}
fs.close();
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
const SparseEmbeddingHashParams<TypeEmbeddingComp> embedding_params = {
train_batchsize, test_batchsize, vocabulary_size, {}, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding(
new DistributedSlotSparseEmbeddingHash<T, TypeEmbeddingComp>(
train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(),
train_data_reader->get_nnz_array(), test_data_reader->get_row_offsets_tensors(),
test_data_reader->get_value_tensors(), test_data_reader->get_nnz_array(),
embedding_params, resource_manager));
{
// upload hash table to device
std::ifstream fs(hash_table_file_name);
embedding->load_parameters(fs);
fs.close();
}
// for SparseEmbeddingCpu
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
train_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num,
label_dim, dense_dim, CHK, train_batch_num * train_batchsize, combiner, opt_params,
train_file_list_name, hash_table_file_name, SparseEmbedding_t::Distributed));
TypeEmbeddingComp *embedding_feature_from_cpu = embedding_cpu->get_forward_results();
TypeEmbeddingComp *wgrad_from_cpu = embedding_cpu->get_backward_results();
T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr();
float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr();
// for results check
std::shared_ptr<GeneralBuffer2<HostAllocator>> buf = GeneralBuffer2<HostAllocator>::create();
Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu;
buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu);
Tensor2<TypeEmbeddingComp> wgrad_from_gpu;
buf->reserve({train_batchsize * slot_num * embedding_vec_size}, &wgrad_from_gpu);
Tensor2<T> hash_table_key_from_gpu;
buf->reserve({vocabulary_size}, &hash_table_key_from_gpu);
Tensor2<float> hash_table_value_from_gpu;
buf->reserve({vocabulary_size * embedding_vec_size}, &hash_table_value_from_gpu);
Tensor2<TypeEmbeddingComp> embedding_feature_from_gpu_eval;
buf->reserve({test_batchsize * slot_num * embedding_vec_size}, &embedding_feature_from_gpu_eval);
buf->allocate();
typedef struct TypeHashValue_ { float data[embedding_vec_size]; } TypeHashValue;
for (int i = 0; i < train_batch_num; i++) {
printf("Rank%d: Round %d start training:\n", pid, i);
// call read a batch
printf("Rank%d: data_reader->read_a_batch_to_device()\n", pid);
train_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding->forward()\n", pid);
embedding->forward(true);
// check the result of forward
printf("Rank%d: embedding->get_forward_results()\n", pid);
embedding->get_forward_results(true, embedding_feature_from_gpu); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu->forward()\n");
embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_TRUE(compare_embedding_feature(train_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu.get_ptr(),
embedding_feature_from_cpu, tolerance));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU backward
printf("Rank%d: embedding->backward()\n", pid);
embedding->backward();
// check the result of backward
printf("Rank%d: embedding->get_backward_results()\n", pid);
embedding->get_backward_results(wgrad_from_gpu, 0);
if (pid == 0) {
// CPU backward
printf("Rank0: embedding_cpu->backward()\n");
embedding_cpu->backward();
printf("Rank0: check backward results: GPU and CPU\n");
ASSERT_TRUE(compare_wgrad(train_batchsize * slot_num * embedding_vec_size,
wgrad_from_gpu.get_ptr(), wgrad_from_cpu, tolerance));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
// GPU update_params
printf("Rank%d: embedding->update_params()\n", pid);
embedding->update_params();
// check the results of update params
printf("Rank%d: embedding->get_update_params_results()\n", pid);
embedding->get_update_params_results(hash_table_key_from_gpu,
hash_table_value_from_gpu); // memcpy from GPU to CPU
if (pid == 0) {
// CPU update_params
printf("Rank0: embedding_cpu->update_params()\n");
embedding_cpu->update_params();
printf("Rank0: check update_params results\n");
ASSERT_TRUE(compare_hash_table(
vocabulary_size, hash_table_key_from_gpu.get_ptr(),
reinterpret_cast<TypeHashValue *>(hash_table_value_from_gpu.get_ptr()),
hash_table_key_from_cpu, reinterpret_cast<TypeHashValue *>(hash_table_value_from_cpu),
tolerance));
}
#ifdef ENABLE_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
printf("Rank%d: Round %d end:\n\n", pid, i);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create new obj for eval()
{
std::ofstream fs(hash_table_file_name);
embedding->dump_parameters(fs);
fs.close();
}
{
printf("Rank%d: embedding->dump_opt_states()\n", pid);
std::ofstream fs(opt_file_name);
embedding->dump_opt_states(fs);
fs.close();
}
{
printf("Rank%d: embedding->load_opt_states()\n", pid);
std::ifstream fs(opt_file_name);
embedding->load_opt_states(fs);
fs.close();
}
// for SparseEmbeddingCpu eval
std::unique_ptr<SparseEmbeddingHashCpu<T, TypeEmbeddingComp>> test_embedding_cpu(
new SparseEmbeddingHashCpu<T, TypeEmbeddingComp>(
test_batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, label_dim,
dense_dim, CHK, test_batch_num * test_batchsize, combiner, opt_params,
test_file_list_name, hash_table_file_name, SparseEmbedding_t::Distributed));
TypeEmbeddingComp *embedding_feature_from_cpu_eval = test_embedding_cpu->get_forward_results();
{
// eval
printf("\nRank%d: start eval:\n", pid);
// call read a batch
printf("Rank%d: data_reader_eval->read_a_batch_to_device()\n", pid);
test_data_reader->read_a_batch_to_device();
// GPU forward
printf("Rank%d: embedding_eval->forward()\n", pid);
embedding->forward(false);
// check the result of forward
printf("Rank%d: embedding_eval->get_forward_results()\n", pid);
embedding->get_forward_results(false,
embedding_feature_from_gpu_eval); // memcpy from GPU to CPU
if (pid == 0) {
// CPU forward
printf("Rank0: embedding_cpu_eval->forward()\n");
test_embedding_cpu->forward();
printf("Rank0: check forward results\n");
ASSERT_TRUE(compare_embedding_feature(test_batchsize * slot_num * embedding_vec_size,
embedding_feature_from_gpu_eval.get_ptr(),
embedding_feature_from_cpu_eval, tolerance));
}
}
test::mpi_finalize();
}
template <typename TypeEmbeddingComp>
void load_and_dump(const std::vector<int> &device_list, const Optimizer_t &optimizer,
const Update_t &update_type) {
OptHyperParams<TypeEmbeddingComp> hyper_params;
hyper_params.adam.beta1 = 0.9f;
hyper_params.adam.beta2 = 0.999f;
if (std::is_same<TypeEmbeddingComp, __half>::value) {
hyper_params.adam.epsilon = 1e-4f;
} else {
hyper_params.adam.epsilon = 1e-7f;
}
hyper_params.momentum.factor = 0.9f;
hyper_params.nesterov.mu = 0.9f;
const float lr = optimizer == Optimizer_t::Adam ? 0.001f : 0.01f;
const OptParams<TypeEmbeddingComp> opt_params = {optimizer, lr, hyper_params, update_type,
scaler};
std::vector<std::vector<int>> vvgpu;
vvgpu.push_back(device_list);
const auto &resource_manager = ResourceManager::create(vvgpu, 0);
// re-generate the dataset files
{
std::ifstream fs(train_file_list_name);
if (fs.good()) {
std::remove(train_file_list_name);
}
}
// data generation
HugeCTR::data_generation_for_test<T, CHK>(
train_file_list_name, prefix, num_files, train_batch_num * train_batchsize, slot_num,
vocabulary_size, label_dim, dense_dim, max_nnz_per_slot);
// setup a data reader
const DataReaderSparseParam param = {DataReaderSparse_t::Distributed, max_nnz_per_slot * slot_num,
max_nnz_per_slot, slot_num};
std::vector<DataReaderSparseParam> params;
params.push_back(param);
std::unique_ptr<DataReader<T>> train_data_reader(new DataReader<T>(
train_batchsize, label_dim, dense_dim, params, resource_manager, true, num_chunk_threads, false, 0));
train_data_reader->create_drwg_norm(train_file_list_name, CHK);
// init hash table file
std::ofstream fs(hash_table_file_name);
if (!fs.is_open()) {
ERROR_MESSAGE_("Error: file not open for writing");
}
test::UniformDataSimulator fdata_sim;
std::unique_ptr<float[]> buf(new float[embedding_vec_size]);
for (long long i = 0; i < vocabulary_size; i++) {
T key = (T)i;
// T key = ldata_sim.get_num();
// CAUSION: can not set random keys here, because we need to ensure that:
// 1) we can find keys in the data file from this hash table
// 2) there are no repeated keys
fs.write((char *)&key, sizeof(T));
fdata_sim.fill(buf.get(), embedding_vec_size, -0.1f, 0.1f);
fs.write(reinterpret_cast<const char *>(buf.get()), embedding_vec_size * sizeof(float));
}
fs.close();
const SparseEmbeddingHashParams<TypeEmbeddingComp> embedding_params = {
train_batchsize, test_batchsize, vocabulary_size, {}, embedding_vec_size,
max_feature_num, slot_num, combiner, opt_params};
std::unique_ptr<Embedding<T, TypeEmbeddingComp>> embedding(
new DistributedSlotSparseEmbeddingHash<T, TypeEmbeddingComp>(
train_data_reader->get_row_offsets_tensors(), train_data_reader->get_value_tensors(),
train_data_reader->get_nnz_array(), train_data_reader->get_row_offsets_tensors(),
train_data_reader->get_value_tensors(), train_data_reader->get_nnz_array(),
embedding_params, resource_manager));
{
// upload hash table to device
std::ifstream fs(hash_table_file_name);
embedding->load_parameters(fs);
fs.close();
}
printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(),
embedding->get_vocabulary_size());
std::shared_ptr<GeneralBuffer2<CudaHostAllocator>> blobs_buff =
GeneralBuffer2<CudaHostAllocator>::create();
Tensor2<T> keys;
blobs_buff->reserve({embedding->get_max_vocabulary_size()}, &keys);
Tensor2<float> embeddings;
blobs_buff->reserve({embedding->get_max_vocabulary_size(), embedding_vec_size}, &embeddings);
blobs_buff->allocate();
BufferBag buf_bag;
buf_bag.keys = keys.shrink();
buf_bag.embedding = embeddings;
size_t dump_size;
embedding->dump_parameters(buf_bag, &dump_size);
printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size,
embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size());
embedding->dump_parameters(buf_bag, &dump_size);
printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size,
embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size());
embedding->reset();
printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(),
embedding->get_vocabulary_size());
embedding->load_parameters(buf_bag, dump_size);
printf("max_vocabulary_size=%zu, vocabulary_size=%zu\n", embedding->get_max_vocabulary_size(),
embedding->get_vocabulary_size());
embedding->dump_parameters(buf_bag, &dump_size);
printf("dump_size=%zu, max_vocabulary_size=%zu, vocabulary_size=%zu\n", dump_size,
embedding->get_max_vocabulary_size(), embedding->get_vocabulary_size());
}
} // namespace
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_global_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_sgd_global_update_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_global_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_sgd_global_update_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::SGD, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_1gpu) {
train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_global_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_global_update_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_1gpu) {
train_and_test<float>({0}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, fp32_adam_lazyglobal_update_8gpu) {
train_and_test<float>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_1gpu) {
train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Local);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_global_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_global_update_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::Global);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_1gpu) {
train_and_test<__half>({0}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, fp16_adam_lazyglobal_update_8gpu) {
train_and_test<__half>({0, 1, 2, 3, 4, 5, 6, 7}, Optimizer_t::Adam, Update_t::LazyGlobal);
}
TEST(distributed_sparse_embedding_hash_test, load_and_dump) {
load_and_dump<float>({0}, Optimizer_t::SGD, Update_t::Global);
}
|
a8e061b47d6b53e0f32bdf39c9bdfa1ed5f3baff.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) {
double muz4;
double muz1;
double muy4;
double muy1;
double mux4;
double mux1;
double muz3;
double muz2;
double muy3;
double muy2;
double mux3;
double mux2;
double _t_10_;
double r1;
double _t_15_;
double _t_5_;
double _t_7_;
double _t_3_;
double _t_9_;
double _t_4_;
double _t_2_;
double _t_6_;
double _t_1_;
double _t_8_;
double _t_21_;
double _t_35_;
double _t_30_;
double _t_32_;
double _t_28_;
double _t_34_;
double r2;
double _t_54_;
double _t_52_;
double _t_56_;
double _t_58_;
double _t_29_;
double _t_27_;
double _t_26_;
double _t_31_;
double _t_33_;
double _t_41_;
double r3;
double _t_46_;
double _t_53_;
double _t_51_;
double _t_55_;
double _t_57_;
double _t_59_;
double _t_100_;
double _t_74_;
double _t_61_;
double _t_87_;
double _t_75_;
double _t_101_;
double _t_62_;
double _t_88_;
double _t_106_;
double _t_78_;
double _t_80_;
double _t_109_;
double _t_83_;
double _t_86_;
double _t_104_;
double _t_112_;
double _t_60_;
double _t_67_;
double _t_91_;
double _t_70_;
double _t_93_;
double _t_96_;
double _t_99_;
double _t_65_;
double _t_73_;
double _t_127_;
double _t_153_;
double _t_140_;
double _t_114_;
double _t_115_;
double _t_141_;
double _t_128_;
double _t_154_;
double _t_120_;
double _t_144_;
double _t_123_;
double _t_146_;
double _t_149_;
double _t_152_;
double _t_118_;
double _t_126_;
double _t_113_;
double _t_133_;
double _t_157_;
double _t_136_;
double _t_159_;
double _t_162_;
double _t_165_;
double _t_131_;
double _t_139_;
double _t_167_;
double _t_180_;
double _t_206_;
double _t_193_;
double _t_168_;
double _t_181_;
double _t_194_;
double _t_207_;
double _t_173_;
double _t_184_;
double _t_176_;
double _t_186_;
double _t_189_;
double _t_192_;
double _t_171_;
double _t_179_;
double _t_166_;
double _t_199_;
double _t_210_;
double _t_202_;
double _t_212_;
double _t_215_;
double _t_218_;
double _t_197_;
double _t_205_;
double uacc_0kc0jc0ic0;
double uacc_1kc0jc0ic0;
double uacc_2kc0jc0ic0;
muz4 = -3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
muz4 += mu[k+1][j][i] * strz[k+1];
muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 += mu[k-1][j][i] * strz[k-1];
muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
muy4 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy4 += mu[k][j+1][i] * stry[j+1];
muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy1 += mu[k][j-1][i] * stry[j-1];
muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
mux4 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux4 += mu[k][j][i+1] * strx[i+1];
mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
mux1 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux1 += mu[k][j][i-1] * strx[i-1];
mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
muz3 = mu[k-1][j][i] * strz[k-1];
muz3 += mu[k+2][j][i] * strz[k+2];
muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
muz3 += 3.0 * mu[k][j][i] * strz[k];
muz2 = mu[k-2][j][i] * strz[k-2];
muz2 += mu[k+1][j][i] * strz[k+1];
muz2 += 3.0 * mu[k][j][i] * strz[k];
muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
muy3 = mu[k][j-1][i] * stry[j-1];
muy3 += mu[k][j+2][i] * stry[j+2];
muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
muy3 += 3.0 * mu[k][j][i] * stry[j];
muy2 = mu[k][j-2][i] * stry[j-2];
muy2 += mu[k][j+1][i] * stry[j+1];
muy2 += 3.0 * mu[k][j][i] * stry[j];
muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
mux3 = mu[k][j][i-1] * strx[i-1];
mux3 += mu[k][j][i+2] * strx[i+2];
mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
mux3 += 3.0 * mu[k][j][i] * strx[i];
mux2 = mu[k][j][i-2] * strx[i-2];
mux2 += mu[k][j][i+1] * strx[i+1];
mux2 += 3.0 * mu[k][j][i] * strx[i];
mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
_t_10_ = muy1 * u_0[k][j-2][i];
_t_10_ += muy2 * u_0[k][j-1][i];
_t_10_ += muy3 * u_0[k][j+1][i];
_t_10_ += muy4 * u_0[k][j+2][i];
_t_10_ -= muy4 * u_0[k][j][i];
_t_10_ -= muy1 * u_0[k][j][i];
_t_10_ -= muy3 * u_0[k][j][i];
_t_10_ -= muy2 * u_0[k][j][i];
r1 = 1.0 / 6.0 * stry[j] * _t_10_;
_t_15_ = -muz4 * u_0[k][j][i];
_t_15_ -= muz1 * u_0[k][j][i];
_t_15_ -= muz3 * u_0[k][j][i];
_t_15_ -= muz2 * u_0[k][j][i];
_t_15_ += muz1 * u_0[k-2][j][i];
_t_15_ += muz2 * u_0[k-1][j][i];
_t_15_ += muz3 * u_0[k+1][j][i];
_t_15_ += muz4 * u_0[k+2][j][i];
r1 += 1.0 / 6.0 * strz[k] * _t_15_;
_t_5_ = -u_0[k][j][i];
_t_5_ += u_0[k][j][i-1];
_t_7_ = -u_0[k][j][i];
_t_7_ += u_0[k][j][i+1];
_t_3_ = -u_0[k][j][i];
_t_9_ = -u_0[k][j][i];
_t_3_ += u_0[k][j][i-2];
_t_9_ += u_0[k][j][i+2];
_t_4_ = 2.0 * mux2;
_t_4_ += la[k][j][i-2] * strx[i-2];
_t_2_ = -3.0 / 4.0 * la[k][j][i-2] * strx[i-2];
_t_2_ += 2.0 * mux1;
_t_2_ += la[k][j][i-1] * strx[i-1];
_t_4_ += 3.0 * la[k][j][i-1] * strx[i-1];
_t_6_ = la[k][j][i-1] * strx[i-1];
_t_6_ += 2.0 * mux3;
_t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i];
_t_4_ += 3.0 * la[k][j][i] * strx[i];
_t_6_ += 3.0 * la[k][j][i] * strx[i];
_t_1_ = _t_2_ * _t_3_;
_t_8_ = -3.0 / 4.0 * la[k][j][i] * strx[i];
_t_8_ += 2.0 * mux4;
_t_4_ += la[k][j][i+1] * strx[i+1];
_t_1_ += _t_4_ * _t_5_;
_t_6_ += 3.0 * la[k][j][i+1] * strx[i+1];
_t_8_ += la[k][j][i+1] * strx[i+1];
_t_6_ += la[k][j][i+2] * strx[i+2];
_t_1_ += _t_6_ * _t_7_;
_t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2];
_t_1_ += _t_8_ * _t_9_;
r1 += 1.0 / 6.0 * strx[i] * _t_1_;
_t_21_ = mux1 * u_1[k][j][i-2];
_t_21_ += mux2 * u_1[k][j][i-1];
_t_21_ += mux3 * u_1[k][j][i+1];
_t_21_ += mux4 * u_1[k][j][i+2];
_t_35_ = muz1 * u_1[k-2][j][i];
_t_35_ += muz2 * u_1[k-1][j][i];
_t_35_ += muz3 * u_1[k+1][j][i];
_t_35_ += muz4 * u_1[k+2][j][i];
_t_30_ = u_1[k][j-1][i];
_t_32_ = u_1[k][j+1][i];
_t_28_ = u_1[k][j-2][i];
_t_28_ -= u_1[k][j][i];
_t_30_ -= u_1[k][j][i];
_t_32_ -= u_1[k][j][i];
_t_34_ = -u_1[k][j][i];
_t_35_ -= muz4 * u_1[k][j][i];
_t_35_ -= muz1 * u_1[k][j][i];
_t_21_ -= mux4 * u_1[k][j][i];
_t_21_ -= mux1 * u_1[k][j][i];
_t_35_ -= muz3 * u_1[k][j][i];
_t_35_ -= muz2 * u_1[k][j][i];
_t_21_ -= mux3 * u_1[k][j][i];
_t_21_ -= mux2 * u_1[k][j][i];
r2 = 1.0 / 6.0 * strz[k] * _t_35_;
r2 += 1.0 / 6.0 * strx[i] * _t_21_;
_t_34_ += u_1[k][j+2][i];
_t_54_ = 3.0 * la[k][j][i] * strz[k];
_t_54_ += 2.0 * muz2;
_t_52_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_52_ += 2.0 * muz1;
_t_56_ = 3.0 * la[k][j][i] * strz[k];
_t_56_ += 2.0 * muz3;
_t_58_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_58_ += 2.0 * muz4;
_t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2];
_t_54_ += la[k-2][j][i] * strz[k-2];
_t_56_ += la[k+2][j][i] * strz[k+2];
_t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2];
_t_52_ += la[k-1][j][i] * strz[k-1];
_t_54_ += 3.0 * la[k-1][j][i] * strz[k-1];
_t_56_ += la[k-1][j][i] * strz[k-1];
_t_54_ += la[k+1][j][i] * strz[k+1];
_t_56_ += 3.0 * la[k+1][j][i] * strz[k+1];
_t_58_ += la[k+1][j][i] * strz[k+1];
_t_29_ = 3.0 * la[k][j][i] * stry[j];
_t_29_ += 2.0 * muy2;
_t_27_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_27_ += 2.0 * muy1;
_t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_29_ += la[k][j-2][i] * stry[j-2];
_t_27_ += la[k][j-1][i] * stry[j-1];
_t_29_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_26_ = _t_27_ * _t_28_;
_t_31_ = la[k][j-1][i] * stry[j-1];
_t_31_ += 3.0 * la[k][j][i] * stry[j];
_t_31_ += 2.0 * muy3;
_t_33_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_33_ += 2.0 * muy4;
_t_31_ += la[k][j+2][i] * stry[j+2];
_t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_29_ += la[k][j+1][i] * stry[j+1];
_t_26_ += _t_29_ * _t_30_;
_t_31_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_26_ += _t_31_ * _t_32_;
_t_33_ += la[k][j+1][i] * stry[j+1];
_t_26_ += _t_33_ * _t_34_;
r2 += 1.0 / 6.0 * stry[j] * _t_26_;
_t_41_ = mux1 * u_2[k][j][i-2];
_t_41_ -= mux4 * u_2[k][j][i];
_t_41_ -= mux1 * u_2[k][j][i];
_t_41_ -= mux3 * u_2[k][j][i];
_t_41_ -= mux2 * u_2[k][j][i];
_t_41_ += mux2 * u_2[k][j][i-1];
_t_41_ += mux3 * u_2[k][j][i+1];
_t_41_ += mux4 * u_2[k][j][i+2];
r3 = 1.0 / 6.0 * strx[i] * _t_41_;
_t_46_ = -muy4 * u_2[k][j][i];
_t_46_ -= muy1 * u_2[k][j][i];
_t_46_ -= muy3 * u_2[k][j][i];
_t_46_ -= muy2 * u_2[k][j][i];
_t_46_ += muy1 * u_2[k][j-2][i];
_t_46_ += muy2 * u_2[k][j-1][i];
_t_46_ += muy3 * u_2[k][j+1][i];
_t_46_ += muy4 * u_2[k][j+2][i];
r3 += 1.0 / 6.0 * stry[j] * _t_46_;
_t_53_ = -u_2[k][j][i];
_t_53_ += u_2[k-2][j][i];
_t_51_ = _t_52_ * _t_53_;
_t_55_ = -u_2[k][j][i];
_t_55_ += u_2[k-1][j][i];
_t_51_ += _t_54_ * _t_55_;
_t_57_ = -u_2[k][j][i];
_t_59_ = -u_2[k][j][i];
_t_57_ += u_2[k+1][j][i];
_t_51_ += _t_56_ * _t_57_;
_t_59_ += u_2[k+2][j][i];
_t_51_ += _t_58_ * _t_59_;
r3 += 1.0 / 6.0 * strz[k] * _t_51_;
_t_100_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_74_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_61_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_87_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_75_ = mu[k][j-2][i] * u_1[k-2][j-2][i];
_t_101_ = la[k-2][j][i] * u_1[k-2][j-2][i];
_t_75_ -= mu[k][j+2][i] * u_1[k-2][j+2][i];
_t_101_ -= la[k-2][j][i] * u_1[k-2][j+2][i];
_t_75_ -= mu[k][j-2][i] * u_1[k+2][j-2][i];
_t_101_ -= la[k+2][j][i] * u_1[k+2][j-2][i];
_t_75_ += mu[k][j+2][i] * u_1[k+2][j+2][i];
_t_101_ += la[k+2][j][i] * u_1[k+2][j+2][i];
_t_62_ = mu[k][j][i-2] * u_0[k-2][j][i-2];
_t_88_ = la[k-2][j][i] * u_0[k-2][j][i-2];
_t_62_ -= mu[k][j][i+2] * u_0[k-2][j][i+2];
_t_88_ -= la[k-2][j][i] * u_0[k-2][j][i+2];
_t_62_ -= mu[k][j][i-2] * u_0[k+2][j][i-2];
_t_88_ -= la[k+2][j][i] * u_0[k+2][j][i-2];
_t_62_ += mu[k][j][i+2] * u_0[k+2][j][i+2];
_t_88_ += la[k+2][j][i] * u_0[k+2][j][i+2];
_t_106_ = u_1[k-1][j-2][i];
_t_78_ = -u_1[k-1][j-2][i];
_t_106_ += 8.0 * -u_1[k-1][j-1][i];
_t_80_ = 8.0 * -u_1[k-1][j-1][i];
_t_78_ += u_1[k+1][j-2][i];
_t_75_ += mu[k][j-2][i] * 8.0 * _t_78_;
_t_109_ = u_1[k+1][j-2][i];
_t_80_ += 8.0 * u_1[k+1][j-1][i];
_t_109_ += 8.0 * -u_1[k+1][j-1][i];
_t_83_ = 8.0 * -u_1[k-1][j+1][i];
_t_106_ += 8.0 * u_1[k-1][j+1][i];
_t_83_ += 8.0 * u_1[k+1][j+1][i];
_t_109_ += 8.0 * u_1[k+1][j+1][i];
_t_86_ = -u_1[k-1][j+2][i];
_t_106_ -= u_1[k-1][j+2][i];
_t_101_ -= 8.0 * la[k-1][j][i] * _t_106_;
_t_86_ += u_1[k+1][j+2][i];
_t_75_ -= mu[k][j+2][i] * 8.0 * _t_86_;
_t_109_ -= u_1[k+1][j+2][i];
_t_101_ += 8.0 * la[k+1][j][i] * _t_109_;
_t_80_ += u_1[k-2][j-1][i];
_t_104_ = -u_1[k-2][j-1][i];
_t_83_ += u_1[k-2][j+1][i];
_t_104_ += u_1[k-2][j+1][i];
_t_101_ += la[k-2][j][i] * 8.0 * _t_104_;
_t_80_ -= u_1[k+2][j-1][i];
_t_75_ -= 8.0 * mu[k][j-1][i] * _t_80_;
_t_112_ = -u_1[k+2][j-1][i];
_t_83_ -= u_1[k+2][j+1][i];
_t_75_ += 8.0 * mu[k][j+1][i] * _t_83_;
_t_112_ += u_1[k+2][j+1][i];
_t_101_ -= la[k+2][j][i] * 8.0 * _t_112_;
_t_60_ = _t_74_ * _t_75_;
_t_60_ += _t_100_ * _t_101_;
_t_67_ = u_0[k-2][j][i-1];
_t_91_ = -u_0[k-2][j][i-1];
_t_91_ += u_0[k-2][j][i+1];
_t_88_ += la[k-2][j][i] * 8.0 * _t_91_;
_t_70_ = u_0[k-2][j][i+1];
_t_67_ += 8.0 * -u_0[k-1][j][i-1];
_t_93_ = 8.0 * -u_0[k-1][j][i-1];
_t_70_ += 8.0 * -u_0[k-1][j][i+1];
_t_93_ += 8.0 * u_0[k-1][j][i+1];
_t_67_ += 8.0 * u_0[k+1][j][i-1];
_t_96_ = 8.0 * -u_0[k+1][j][i-1];
_t_70_ += 8.0 * u_0[k+1][j][i+1];
_t_96_ += 8.0 * u_0[k+1][j][i+1];
_t_67_ -= u_0[k+2][j][i-1];
_t_62_ -= 8.0 * mu[k][j][i-1] * _t_67_;
_t_99_ = -u_0[k+2][j][i-1];
_t_70_ -= u_0[k+2][j][i+1];
_t_62_ += 8.0 * mu[k][j][i+1] * _t_70_;
_t_99_ += u_0[k+2][j][i+1];
_t_88_ -= la[k+2][j][i] * 8.0 * _t_99_;
_t_93_ += u_0[k-1][j][i-2];
_t_65_ = -u_0[k-1][j][i-2];
_t_65_ += u_0[k+1][j][i-2];
_t_62_ += mu[k][j][i-2] * 8.0 * _t_65_;
_t_96_ += u_0[k+1][j][i-2];
_t_93_ -= u_0[k-1][j][i+2];
_t_88_ -= 8.0 * la[k-1][j][i] * _t_93_;
_t_73_ = -u_0[k-1][j][i+2];
_t_73_ += u_0[k+1][j][i+2];
_t_62_ -= mu[k][j][i+2] * 8.0 * _t_73_;
_t_60_ += _t_61_ * _t_62_;
_t_96_ -= u_0[k+1][j][i+2];
_t_88_ += 8.0 * la[k+1][j][i] * _t_96_;
_t_60_ += _t_87_ * _t_88_;
r3 += _t_60_;
_t_127_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_153_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_140_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_114_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_115_ = la[k][j][i-2] * u_1[k][j-2][i-2];
_t_141_ = mu[k][j-2][i] * u_1[k][j-2][i-2];
_t_115_ -= la[k][j][i+2] * u_1[k][j-2][i+2];
_t_141_ -= mu[k][j-2][i] * u_1[k][j-2][i+2];
_t_115_ -= la[k][j][i-2] * u_1[k][j+2][i-2];
_t_141_ -= mu[k][j+2][i] * u_1[k][j+2][i-2];
_t_115_ += la[k][j][i+2] * u_1[k][j+2][i+2];
_t_141_ += mu[k][j+2][i] * u_1[k][j+2][i+2];
_t_128_ = la[k][j][i-2] * u_2[k-2][j][i-2];
_t_154_ = mu[k-2][j][i] * u_2[k-2][j][i-2];
_t_128_ -= la[k][j][i+2] * u_2[k-2][j][i+2];
_t_154_ -= mu[k-2][j][i] * u_2[k-2][j][i+2];
_t_128_ -= la[k][j][i-2] * u_2[k+2][j][i-2];
_t_154_ -= mu[k+2][j][i] * u_2[k+2][j][i-2];
_t_128_ += la[k][j][i+2] * u_2[k+2][j][i+2];
_t_154_ += mu[k+2][j][i] * u_2[k+2][j][i+2];
_t_120_ = u_1[k][j-2][i-1];
_t_144_ = -u_1[k][j-2][i-1];
_t_144_ += u_1[k][j-2][i+1];
_t_141_ += mu[k][j-2][i] * 8.0 * _t_144_;
_t_123_ = u_1[k][j-2][i+1];
_t_120_ += 8.0 * -u_1[k][j-1][i-1];
_t_146_ = 8.0 * -u_1[k][j-1][i-1];
_t_123_ += 8.0 * -u_1[k][j-1][i+1];
_t_146_ += 8.0 * u_1[k][j-1][i+1];
_t_120_ += 8.0 * u_1[k][j+1][i-1];
_t_149_ = 8.0 * -u_1[k][j+1][i-1];
_t_123_ += 8.0 * u_1[k][j+1][i+1];
_t_149_ += 8.0 * u_1[k][j+1][i+1];
_t_120_ -= u_1[k][j+2][i-1];
_t_115_ -= 8.0 * la[k][j][i-1] * _t_120_;
_t_152_ = -u_1[k][j+2][i-1];
_t_123_ -= u_1[k][j+2][i+1];
_t_115_ += 8.0 * la[k][j][i+1] * _t_123_;
_t_152_ += u_1[k][j+2][i+1];
_t_141_ -= mu[k][j+2][i] * 8.0 * _t_152_;
_t_118_ = -u_1[k][j-1][i-2];
_t_146_ += u_1[k][j-1][i-2];
_t_118_ += u_1[k][j+1][i-2];
_t_115_ += la[k][j][i-2] * 8.0 * _t_118_;
_t_149_ += u_1[k][j+1][i-2];
_t_126_ = -u_1[k][j-1][i+2];
_t_146_ -= u_1[k][j-1][i+2];
_t_141_ -= 8.0 * mu[k][j-1][i] * _t_146_;
_t_126_ += u_1[k][j+1][i+2];
_t_115_ -= la[k][j][i+2] * 8.0 * _t_126_;
_t_149_ -= u_1[k][j+1][i+2];
_t_141_ += 8.0 * mu[k][j+1][i] * _t_149_;
_t_113_ = _t_114_ * _t_115_;
_t_113_ += _t_140_ * _t_141_;
_t_133_ = u_2[k-2][j][i-1];
_t_157_ = -u_2[k-2][j][i-1];
_t_157_ += u_2[k-2][j][i+1];
_t_154_ += mu[k-2][j][i] * 8.0 * _t_157_;
_t_136_ = u_2[k-2][j][i+1];
_t_133_ += 8.0 * -u_2[k-1][j][i-1];
_t_159_ = 8.0 * -u_2[k-1][j][i-1];
_t_136_ += 8.0 * -u_2[k-1][j][i+1];
_t_159_ += 8.0 * u_2[k-1][j][i+1];
_t_133_ += 8.0 * u_2[k+1][j][i-1];
_t_162_ = 8.0 * -u_2[k+1][j][i-1];
_t_136_ += 8.0 * u_2[k+1][j][i+1];
_t_162_ += 8.0 * u_2[k+1][j][i+1];
_t_133_ -= u_2[k+2][j][i-1];
_t_128_ -= 8.0 * la[k][j][i-1] * _t_133_;
_t_165_ = -u_2[k+2][j][i-1];
_t_136_ -= u_2[k+2][j][i+1];
_t_128_ += 8.0 * la[k][j][i+1] * _t_136_;
_t_165_ += u_2[k+2][j][i+1];
_t_154_ -= mu[k+2][j][i] * 8.0 * _t_165_;
_t_131_ = -u_2[k-1][j][i-2];
_t_159_ += u_2[k-1][j][i-2];
_t_131_ += u_2[k+1][j][i-2];
_t_128_ += la[k][j][i-2] * 8.0 * _t_131_;
_t_162_ += u_2[k+1][j][i-2];
_t_139_ = -u_2[k-1][j][i+2];
_t_159_ -= u_2[k-1][j][i+2];
_t_154_ -= 8.0 * mu[k-1][j][i] * _t_159_;
_t_139_ += u_2[k+1][j][i+2];
_t_128_ -= la[k][j][i+2] * 8.0 * _t_139_;
_t_113_ += _t_127_ * _t_128_;
_t_162_ -= u_2[k+1][j][i+2];
_t_154_ += 8.0 * mu[k+1][j][i] * _t_162_;
_t_113_ += _t_153_ * _t_154_;
r1 += _t_113_;
_t_167_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_180_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_206_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_193_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_168_ = mu[k][j][i-2] * u_0[k][j-2][i-2];
_t_181_ = la[k][j-2][i] * u_0[k][j-2][i-2];
_t_168_ -= mu[k][j][i+2] * u_0[k][j-2][i+2];
_t_181_ -= la[k][j-2][i] * u_0[k][j-2][i+2];
_t_168_ -= mu[k][j][i-2] * u_0[k][j+2][i-2];
_t_181_ -= la[k][j+2][i] * u_0[k][j+2][i-2];
_t_168_ += mu[k][j][i+2] * u_0[k][j+2][i+2];
_t_181_ += la[k][j+2][i] * u_0[k][j+2][i+2];
_t_194_ = la[k][j-2][i] * u_2[k-2][j-2][i];
_t_207_ = mu[k-2][j][i] * u_2[k-2][j-2][i];
_t_194_ -= la[k][j+2][i] * u_2[k-2][j+2][i];
_t_207_ -= mu[k-2][j][i] * u_2[k-2][j+2][i];
_t_194_ -= la[k][j-2][i] * u_2[k+2][j-2][i];
_t_207_ -= mu[k+2][j][i] * u_2[k+2][j-2][i];
_t_194_ += la[k][j+2][i] * u_2[k+2][j+2][i];
_t_207_ += mu[k+2][j][i] * u_2[k+2][j+2][i];
_t_173_ = u_0[k][j-2][i-1];
_t_184_ = -u_0[k][j-2][i-1];
_t_184_ += u_0[k][j-2][i+1];
_t_181_ += la[k][j-2][i] * 8.0 * _t_184_;
_t_176_ = u_0[k][j-2][i+1];
_t_173_ += 8.0 * -u_0[k][j-1][i-1];
_t_186_ = 8.0 * -u_0[k][j-1][i-1];
_t_176_ += 8.0 * -u_0[k][j-1][i+1];
_t_186_ += 8.0 * u_0[k][j-1][i+1];
_t_173_ += 8.0 * u_0[k][j+1][i-1];
_t_189_ = 8.0 * -u_0[k][j+1][i-1];
_t_176_ += 8.0 * u_0[k][j+1][i+1];
_t_189_ += 8.0 * u_0[k][j+1][i+1];
_t_173_ -= u_0[k][j+2][i-1];
_t_168_ -= 8.0 * mu[k][j][i-1] * _t_173_;
_t_192_ = -u_0[k][j+2][i-1];
_t_176_ -= u_0[k][j+2][i+1];
_t_168_ += 8.0 * mu[k][j][i+1] * _t_176_;
_t_192_ += u_0[k][j+2][i+1];
_t_181_ -= la[k][j+2][i] * 8.0 * _t_192_;
_t_171_ = -u_0[k][j-1][i-2];
_t_186_ += u_0[k][j-1][i-2];
_t_171_ += u_0[k][j+1][i-2];
_t_168_ += mu[k][j][i-2] * 8.0 * _t_171_;
_t_189_ += u_0[k][j+1][i-2];
_t_179_ = -u_0[k][j-1][i+2];
_t_186_ -= u_0[k][j-1][i+2];
_t_181_ -= 8.0 * la[k][j-1][i] * _t_186_;
_t_179_ += u_0[k][j+1][i+2];
_t_168_ -= mu[k][j][i+2] * 8.0 * _t_179_;
_t_189_ -= u_0[k][j+1][i+2];
_t_181_ += 8.0 * la[k][j+1][i] * _t_189_;
_t_166_ = _t_167_ * _t_168_;
_t_166_ += _t_180_ * _t_181_;
_t_199_ = u_2[k-2][j-1][i];
_t_210_ = -u_2[k-2][j-1][i];
_t_210_ += u_2[k-2][j+1][i];
_t_207_ += mu[k-2][j][i] * 8.0 * _t_210_;
_t_202_ = u_2[k-2][j+1][i];
_t_199_ += 8.0 * -u_2[k-1][j-1][i];
_t_212_ = 8.0 * -u_2[k-1][j-1][i];
_t_202_ += 8.0 * -u_2[k-1][j+1][i];
_t_212_ += 8.0 * u_2[k-1][j+1][i];
_t_199_ += 8.0 * u_2[k+1][j-1][i];
_t_215_ = 8.0 * -u_2[k+1][j-1][i];
_t_202_ += 8.0 * u_2[k+1][j+1][i];
_t_215_ += 8.0 * u_2[k+1][j+1][i];
_t_199_ -= u_2[k+2][j-1][i];
_t_194_ -= 8.0 * la[k][j-1][i] * _t_199_;
_t_218_ = -u_2[k+2][j-1][i];
_t_202_ -= u_2[k+2][j+1][i];
_t_194_ += 8.0 * la[k][j+1][i] * _t_202_;
_t_218_ += u_2[k+2][j+1][i];
_t_207_ -= mu[k+2][j][i] * 8.0 * _t_218_;
_t_197_ = -u_2[k-1][j-2][i];
_t_212_ += u_2[k-1][j-2][i];
_t_197_ += u_2[k+1][j-2][i];
_t_194_ += la[k][j-2][i] * 8.0 * _t_197_;
_t_215_ += u_2[k+1][j-2][i];
_t_205_ = -u_2[k-1][j+2][i];
_t_212_ -= u_2[k-1][j+2][i];
_t_207_ -= 8.0 * mu[k-1][j][i] * _t_212_;
_t_205_ += u_2[k+1][j+2][i];
_t_194_ -= la[k][j+2][i] * 8.0 * _t_205_;
_t_166_ += _t_193_ * _t_194_;
_t_215_ -= u_2[k+1][j+2][i];
_t_207_ += 8.0 * mu[k+1][j][i] * _t_215_;
_t_166_ += _t_206_ * _t_207_;
r2 += _t_166_;
uacc_0kc0jc0ic0 = a1 * uacc_0[k][j][i];
uacc_0kc0jc0ic0 += cof * r1;
uacc_0[k][j][i] = uacc_0kc0jc0ic0;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_2kc0jc0ic0 = a1 * uacc_2[k][j][i];
uacc_2kc0jc0ic0 += cof * r3;
uacc_2[k][j][i] = uacc_2kc0jc0ic0;
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
hipMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
hipMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_1;
hipMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
hipMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *uacc_2;
hipMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
hipMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_0;
hipMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
hipMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_1;
hipMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
hipMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *u_2;
hipMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
hipMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *mu;
hipMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *la;
hipMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *strx;
hipMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice);
double *stry;
hipMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice);
double *strz;
hipMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
hipMemcpy (strz, h_strz, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 2, 2);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z));
hipLaunchKernelGGL(( sw4) , dim3(gridconfig), dim3(blockconfig), 0, 0, uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
hipMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipFree (uacc_0);
hipFree (uacc_1);
hipFree (uacc_2);
hipFree (u_0);
hipFree (u_1);
hipFree (u_2);
hipFree (mu);
hipFree (la);
hipFree (strx);
hipFree (stry);
hipFree (strz);
}
| a8e061b47d6b53e0f32bdf39c9bdfa1ed5f3baff.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) {
double muz4;
double muz1;
double muy4;
double muy1;
double mux4;
double mux1;
double muz3;
double muz2;
double muy3;
double muy2;
double mux3;
double mux2;
double _t_10_;
double r1;
double _t_15_;
double _t_5_;
double _t_7_;
double _t_3_;
double _t_9_;
double _t_4_;
double _t_2_;
double _t_6_;
double _t_1_;
double _t_8_;
double _t_21_;
double _t_35_;
double _t_30_;
double _t_32_;
double _t_28_;
double _t_34_;
double r2;
double _t_54_;
double _t_52_;
double _t_56_;
double _t_58_;
double _t_29_;
double _t_27_;
double _t_26_;
double _t_31_;
double _t_33_;
double _t_41_;
double r3;
double _t_46_;
double _t_53_;
double _t_51_;
double _t_55_;
double _t_57_;
double _t_59_;
double _t_100_;
double _t_74_;
double _t_61_;
double _t_87_;
double _t_75_;
double _t_101_;
double _t_62_;
double _t_88_;
double _t_106_;
double _t_78_;
double _t_80_;
double _t_109_;
double _t_83_;
double _t_86_;
double _t_104_;
double _t_112_;
double _t_60_;
double _t_67_;
double _t_91_;
double _t_70_;
double _t_93_;
double _t_96_;
double _t_99_;
double _t_65_;
double _t_73_;
double _t_127_;
double _t_153_;
double _t_140_;
double _t_114_;
double _t_115_;
double _t_141_;
double _t_128_;
double _t_154_;
double _t_120_;
double _t_144_;
double _t_123_;
double _t_146_;
double _t_149_;
double _t_152_;
double _t_118_;
double _t_126_;
double _t_113_;
double _t_133_;
double _t_157_;
double _t_136_;
double _t_159_;
double _t_162_;
double _t_165_;
double _t_131_;
double _t_139_;
double _t_167_;
double _t_180_;
double _t_206_;
double _t_193_;
double _t_168_;
double _t_181_;
double _t_194_;
double _t_207_;
double _t_173_;
double _t_184_;
double _t_176_;
double _t_186_;
double _t_189_;
double _t_192_;
double _t_171_;
double _t_179_;
double _t_166_;
double _t_199_;
double _t_210_;
double _t_202_;
double _t_212_;
double _t_215_;
double _t_218_;
double _t_197_;
double _t_205_;
double uacc_0kc0jc0ic0;
double uacc_1kc0jc0ic0;
double uacc_2kc0jc0ic0;
muz4 = -3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
muz4 += mu[k+1][j][i] * strz[k+1];
muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 += mu[k-1][j][i] * strz[k-1];
muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
muy4 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy4 += mu[k][j+1][i] * stry[j+1];
muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
muy1 = -3.0 / 4.0 * mu[k][j][i] * stry[j];
muy1 += mu[k][j-1][i] * stry[j-1];
muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
mux4 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux4 += mu[k][j][i+1] * strx[i+1];
mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
mux1 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
mux1 += mu[k][j][i-1] * strx[i-1];
mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
muz3 = mu[k-1][j][i] * strz[k-1];
muz3 += mu[k+2][j][i] * strz[k+2];
muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
muz3 += 3.0 * mu[k][j][i] * strz[k];
muz2 = mu[k-2][j][i] * strz[k-2];
muz2 += mu[k+1][j][i] * strz[k+1];
muz2 += 3.0 * mu[k][j][i] * strz[k];
muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
muy3 = mu[k][j-1][i] * stry[j-1];
muy3 += mu[k][j+2][i] * stry[j+2];
muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
muy3 += 3.0 * mu[k][j][i] * stry[j];
muy2 = mu[k][j-2][i] * stry[j-2];
muy2 += mu[k][j+1][i] * stry[j+1];
muy2 += 3.0 * mu[k][j][i] * stry[j];
muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
mux3 = mu[k][j][i-1] * strx[i-1];
mux3 += mu[k][j][i+2] * strx[i+2];
mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
mux3 += 3.0 * mu[k][j][i] * strx[i];
mux2 = mu[k][j][i-2] * strx[i-2];
mux2 += mu[k][j][i+1] * strx[i+1];
mux2 += 3.0 * mu[k][j][i] * strx[i];
mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
_t_10_ = muy1 * u_0[k][j-2][i];
_t_10_ += muy2 * u_0[k][j-1][i];
_t_10_ += muy3 * u_0[k][j+1][i];
_t_10_ += muy4 * u_0[k][j+2][i];
_t_10_ -= muy4 * u_0[k][j][i];
_t_10_ -= muy1 * u_0[k][j][i];
_t_10_ -= muy3 * u_0[k][j][i];
_t_10_ -= muy2 * u_0[k][j][i];
r1 = 1.0 / 6.0 * stry[j] * _t_10_;
_t_15_ = -muz4 * u_0[k][j][i];
_t_15_ -= muz1 * u_0[k][j][i];
_t_15_ -= muz3 * u_0[k][j][i];
_t_15_ -= muz2 * u_0[k][j][i];
_t_15_ += muz1 * u_0[k-2][j][i];
_t_15_ += muz2 * u_0[k-1][j][i];
_t_15_ += muz3 * u_0[k+1][j][i];
_t_15_ += muz4 * u_0[k+2][j][i];
r1 += 1.0 / 6.0 * strz[k] * _t_15_;
_t_5_ = -u_0[k][j][i];
_t_5_ += u_0[k][j][i-1];
_t_7_ = -u_0[k][j][i];
_t_7_ += u_0[k][j][i+1];
_t_3_ = -u_0[k][j][i];
_t_9_ = -u_0[k][j][i];
_t_3_ += u_0[k][j][i-2];
_t_9_ += u_0[k][j][i+2];
_t_4_ = 2.0 * mux2;
_t_4_ += la[k][j][i-2] * strx[i-2];
_t_2_ = -3.0 / 4.0 * la[k][j][i-2] * strx[i-2];
_t_2_ += 2.0 * mux1;
_t_2_ += la[k][j][i-1] * strx[i-1];
_t_4_ += 3.0 * la[k][j][i-1] * strx[i-1];
_t_6_ = la[k][j][i-1] * strx[i-1];
_t_6_ += 2.0 * mux3;
_t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i];
_t_4_ += 3.0 * la[k][j][i] * strx[i];
_t_6_ += 3.0 * la[k][j][i] * strx[i];
_t_1_ = _t_2_ * _t_3_;
_t_8_ = -3.0 / 4.0 * la[k][j][i] * strx[i];
_t_8_ += 2.0 * mux4;
_t_4_ += la[k][j][i+1] * strx[i+1];
_t_1_ += _t_4_ * _t_5_;
_t_6_ += 3.0 * la[k][j][i+1] * strx[i+1];
_t_8_ += la[k][j][i+1] * strx[i+1];
_t_6_ += la[k][j][i+2] * strx[i+2];
_t_1_ += _t_6_ * _t_7_;
_t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2];
_t_1_ += _t_8_ * _t_9_;
r1 += 1.0 / 6.0 * strx[i] * _t_1_;
_t_21_ = mux1 * u_1[k][j][i-2];
_t_21_ += mux2 * u_1[k][j][i-1];
_t_21_ += mux3 * u_1[k][j][i+1];
_t_21_ += mux4 * u_1[k][j][i+2];
_t_35_ = muz1 * u_1[k-2][j][i];
_t_35_ += muz2 * u_1[k-1][j][i];
_t_35_ += muz3 * u_1[k+1][j][i];
_t_35_ += muz4 * u_1[k+2][j][i];
_t_30_ = u_1[k][j-1][i];
_t_32_ = u_1[k][j+1][i];
_t_28_ = u_1[k][j-2][i];
_t_28_ -= u_1[k][j][i];
_t_30_ -= u_1[k][j][i];
_t_32_ -= u_1[k][j][i];
_t_34_ = -u_1[k][j][i];
_t_35_ -= muz4 * u_1[k][j][i];
_t_35_ -= muz1 * u_1[k][j][i];
_t_21_ -= mux4 * u_1[k][j][i];
_t_21_ -= mux1 * u_1[k][j][i];
_t_35_ -= muz3 * u_1[k][j][i];
_t_35_ -= muz2 * u_1[k][j][i];
_t_21_ -= mux3 * u_1[k][j][i];
_t_21_ -= mux2 * u_1[k][j][i];
r2 = 1.0 / 6.0 * strz[k] * _t_35_;
r2 += 1.0 / 6.0 * strx[i] * _t_21_;
_t_34_ += u_1[k][j+2][i];
_t_54_ = 3.0 * la[k][j][i] * strz[k];
_t_54_ += 2.0 * muz2;
_t_52_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_52_ += 2.0 * muz1;
_t_56_ = 3.0 * la[k][j][i] * strz[k];
_t_56_ += 2.0 * muz3;
_t_58_ = -3.0 / 4.0 * la[k][j][i] * strz[k];
_t_58_ += 2.0 * muz4;
_t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2];
_t_54_ += la[k-2][j][i] * strz[k-2];
_t_56_ += la[k+2][j][i] * strz[k+2];
_t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2];
_t_52_ += la[k-1][j][i] * strz[k-1];
_t_54_ += 3.0 * la[k-1][j][i] * strz[k-1];
_t_56_ += la[k-1][j][i] * strz[k-1];
_t_54_ += la[k+1][j][i] * strz[k+1];
_t_56_ += 3.0 * la[k+1][j][i] * strz[k+1];
_t_58_ += la[k+1][j][i] * strz[k+1];
_t_29_ = 3.0 * la[k][j][i] * stry[j];
_t_29_ += 2.0 * muy2;
_t_27_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_27_ += 2.0 * muy1;
_t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_29_ += la[k][j-2][i] * stry[j-2];
_t_27_ += la[k][j-1][i] * stry[j-1];
_t_29_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_26_ = _t_27_ * _t_28_;
_t_31_ = la[k][j-1][i] * stry[j-1];
_t_31_ += 3.0 * la[k][j][i] * stry[j];
_t_31_ += 2.0 * muy3;
_t_33_ = -3.0 / 4.0 * la[k][j][i] * stry[j];
_t_33_ += 2.0 * muy4;
_t_31_ += la[k][j+2][i] * stry[j+2];
_t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_29_ += la[k][j+1][i] * stry[j+1];
_t_26_ += _t_29_ * _t_30_;
_t_31_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_26_ += _t_31_ * _t_32_;
_t_33_ += la[k][j+1][i] * stry[j+1];
_t_26_ += _t_33_ * _t_34_;
r2 += 1.0 / 6.0 * stry[j] * _t_26_;
_t_41_ = mux1 * u_2[k][j][i-2];
_t_41_ -= mux4 * u_2[k][j][i];
_t_41_ -= mux1 * u_2[k][j][i];
_t_41_ -= mux3 * u_2[k][j][i];
_t_41_ -= mux2 * u_2[k][j][i];
_t_41_ += mux2 * u_2[k][j][i-1];
_t_41_ += mux3 * u_2[k][j][i+1];
_t_41_ += mux4 * u_2[k][j][i+2];
r3 = 1.0 / 6.0 * strx[i] * _t_41_;
_t_46_ = -muy4 * u_2[k][j][i];
_t_46_ -= muy1 * u_2[k][j][i];
_t_46_ -= muy3 * u_2[k][j][i];
_t_46_ -= muy2 * u_2[k][j][i];
_t_46_ += muy1 * u_2[k][j-2][i];
_t_46_ += muy2 * u_2[k][j-1][i];
_t_46_ += muy3 * u_2[k][j+1][i];
_t_46_ += muy4 * u_2[k][j+2][i];
r3 += 1.0 / 6.0 * stry[j] * _t_46_;
_t_53_ = -u_2[k][j][i];
_t_53_ += u_2[k-2][j][i];
_t_51_ = _t_52_ * _t_53_;
_t_55_ = -u_2[k][j][i];
_t_55_ += u_2[k-1][j][i];
_t_51_ += _t_54_ * _t_55_;
_t_57_ = -u_2[k][j][i];
_t_59_ = -u_2[k][j][i];
_t_57_ += u_2[k+1][j][i];
_t_51_ += _t_56_ * _t_57_;
_t_59_ += u_2[k+2][j][i];
_t_51_ += _t_58_ * _t_59_;
r3 += 1.0 / 6.0 * strz[k] * _t_51_;
_t_100_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_74_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_61_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_87_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_75_ = mu[k][j-2][i] * u_1[k-2][j-2][i];
_t_101_ = la[k-2][j][i] * u_1[k-2][j-2][i];
_t_75_ -= mu[k][j+2][i] * u_1[k-2][j+2][i];
_t_101_ -= la[k-2][j][i] * u_1[k-2][j+2][i];
_t_75_ -= mu[k][j-2][i] * u_1[k+2][j-2][i];
_t_101_ -= la[k+2][j][i] * u_1[k+2][j-2][i];
_t_75_ += mu[k][j+2][i] * u_1[k+2][j+2][i];
_t_101_ += la[k+2][j][i] * u_1[k+2][j+2][i];
_t_62_ = mu[k][j][i-2] * u_0[k-2][j][i-2];
_t_88_ = la[k-2][j][i] * u_0[k-2][j][i-2];
_t_62_ -= mu[k][j][i+2] * u_0[k-2][j][i+2];
_t_88_ -= la[k-2][j][i] * u_0[k-2][j][i+2];
_t_62_ -= mu[k][j][i-2] * u_0[k+2][j][i-2];
_t_88_ -= la[k+2][j][i] * u_0[k+2][j][i-2];
_t_62_ += mu[k][j][i+2] * u_0[k+2][j][i+2];
_t_88_ += la[k+2][j][i] * u_0[k+2][j][i+2];
_t_106_ = u_1[k-1][j-2][i];
_t_78_ = -u_1[k-1][j-2][i];
_t_106_ += 8.0 * -u_1[k-1][j-1][i];
_t_80_ = 8.0 * -u_1[k-1][j-1][i];
_t_78_ += u_1[k+1][j-2][i];
_t_75_ += mu[k][j-2][i] * 8.0 * _t_78_;
_t_109_ = u_1[k+1][j-2][i];
_t_80_ += 8.0 * u_1[k+1][j-1][i];
_t_109_ += 8.0 * -u_1[k+1][j-1][i];
_t_83_ = 8.0 * -u_1[k-1][j+1][i];
_t_106_ += 8.0 * u_1[k-1][j+1][i];
_t_83_ += 8.0 * u_1[k+1][j+1][i];
_t_109_ += 8.0 * u_1[k+1][j+1][i];
_t_86_ = -u_1[k-1][j+2][i];
_t_106_ -= u_1[k-1][j+2][i];
_t_101_ -= 8.0 * la[k-1][j][i] * _t_106_;
_t_86_ += u_1[k+1][j+2][i];
_t_75_ -= mu[k][j+2][i] * 8.0 * _t_86_;
_t_109_ -= u_1[k+1][j+2][i];
_t_101_ += 8.0 * la[k+1][j][i] * _t_109_;
_t_80_ += u_1[k-2][j-1][i];
_t_104_ = -u_1[k-2][j-1][i];
_t_83_ += u_1[k-2][j+1][i];
_t_104_ += u_1[k-2][j+1][i];
_t_101_ += la[k-2][j][i] * 8.0 * _t_104_;
_t_80_ -= u_1[k+2][j-1][i];
_t_75_ -= 8.0 * mu[k][j-1][i] * _t_80_;
_t_112_ = -u_1[k+2][j-1][i];
_t_83_ -= u_1[k+2][j+1][i];
_t_75_ += 8.0 * mu[k][j+1][i] * _t_83_;
_t_112_ += u_1[k+2][j+1][i];
_t_101_ -= la[k+2][j][i] * 8.0 * _t_112_;
_t_60_ = _t_74_ * _t_75_;
_t_60_ += _t_100_ * _t_101_;
_t_67_ = u_0[k-2][j][i-1];
_t_91_ = -u_0[k-2][j][i-1];
_t_91_ += u_0[k-2][j][i+1];
_t_88_ += la[k-2][j][i] * 8.0 * _t_91_;
_t_70_ = u_0[k-2][j][i+1];
_t_67_ += 8.0 * -u_0[k-1][j][i-1];
_t_93_ = 8.0 * -u_0[k-1][j][i-1];
_t_70_ += 8.0 * -u_0[k-1][j][i+1];
_t_93_ += 8.0 * u_0[k-1][j][i+1];
_t_67_ += 8.0 * u_0[k+1][j][i-1];
_t_96_ = 8.0 * -u_0[k+1][j][i-1];
_t_70_ += 8.0 * u_0[k+1][j][i+1];
_t_96_ += 8.0 * u_0[k+1][j][i+1];
_t_67_ -= u_0[k+2][j][i-1];
_t_62_ -= 8.0 * mu[k][j][i-1] * _t_67_;
_t_99_ = -u_0[k+2][j][i-1];
_t_70_ -= u_0[k+2][j][i+1];
_t_62_ += 8.0 * mu[k][j][i+1] * _t_70_;
_t_99_ += u_0[k+2][j][i+1];
_t_88_ -= la[k+2][j][i] * 8.0 * _t_99_;
_t_93_ += u_0[k-1][j][i-2];
_t_65_ = -u_0[k-1][j][i-2];
_t_65_ += u_0[k+1][j][i-2];
_t_62_ += mu[k][j][i-2] * 8.0 * _t_65_;
_t_96_ += u_0[k+1][j][i-2];
_t_93_ -= u_0[k-1][j][i+2];
_t_88_ -= 8.0 * la[k-1][j][i] * _t_93_;
_t_73_ = -u_0[k-1][j][i+2];
_t_73_ += u_0[k+1][j][i+2];
_t_62_ -= mu[k][j][i+2] * 8.0 * _t_73_;
_t_60_ += _t_61_ * _t_62_;
_t_96_ -= u_0[k+1][j][i+2];
_t_88_ += 8.0 * la[k+1][j][i] * _t_96_;
_t_60_ += _t_87_ * _t_88_;
r3 += _t_60_;
_t_127_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_153_ = 1.0 / 144.0 * strx[i] * strz[k];
_t_140_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_114_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_115_ = la[k][j][i-2] * u_1[k][j-2][i-2];
_t_141_ = mu[k][j-2][i] * u_1[k][j-2][i-2];
_t_115_ -= la[k][j][i+2] * u_1[k][j-2][i+2];
_t_141_ -= mu[k][j-2][i] * u_1[k][j-2][i+2];
_t_115_ -= la[k][j][i-2] * u_1[k][j+2][i-2];
_t_141_ -= mu[k][j+2][i] * u_1[k][j+2][i-2];
_t_115_ += la[k][j][i+2] * u_1[k][j+2][i+2];
_t_141_ += mu[k][j+2][i] * u_1[k][j+2][i+2];
_t_128_ = la[k][j][i-2] * u_2[k-2][j][i-2];
_t_154_ = mu[k-2][j][i] * u_2[k-2][j][i-2];
_t_128_ -= la[k][j][i+2] * u_2[k-2][j][i+2];
_t_154_ -= mu[k-2][j][i] * u_2[k-2][j][i+2];
_t_128_ -= la[k][j][i-2] * u_2[k+2][j][i-2];
_t_154_ -= mu[k+2][j][i] * u_2[k+2][j][i-2];
_t_128_ += la[k][j][i+2] * u_2[k+2][j][i+2];
_t_154_ += mu[k+2][j][i] * u_2[k+2][j][i+2];
_t_120_ = u_1[k][j-2][i-1];
_t_144_ = -u_1[k][j-2][i-1];
_t_144_ += u_1[k][j-2][i+1];
_t_141_ += mu[k][j-2][i] * 8.0 * _t_144_;
_t_123_ = u_1[k][j-2][i+1];
_t_120_ += 8.0 * -u_1[k][j-1][i-1];
_t_146_ = 8.0 * -u_1[k][j-1][i-1];
_t_123_ += 8.0 * -u_1[k][j-1][i+1];
_t_146_ += 8.0 * u_1[k][j-1][i+1];
_t_120_ += 8.0 * u_1[k][j+1][i-1];
_t_149_ = 8.0 * -u_1[k][j+1][i-1];
_t_123_ += 8.0 * u_1[k][j+1][i+1];
_t_149_ += 8.0 * u_1[k][j+1][i+1];
_t_120_ -= u_1[k][j+2][i-1];
_t_115_ -= 8.0 * la[k][j][i-1] * _t_120_;
_t_152_ = -u_1[k][j+2][i-1];
_t_123_ -= u_1[k][j+2][i+1];
_t_115_ += 8.0 * la[k][j][i+1] * _t_123_;
_t_152_ += u_1[k][j+2][i+1];
_t_141_ -= mu[k][j+2][i] * 8.0 * _t_152_;
_t_118_ = -u_1[k][j-1][i-2];
_t_146_ += u_1[k][j-1][i-2];
_t_118_ += u_1[k][j+1][i-2];
_t_115_ += la[k][j][i-2] * 8.0 * _t_118_;
_t_149_ += u_1[k][j+1][i-2];
_t_126_ = -u_1[k][j-1][i+2];
_t_146_ -= u_1[k][j-1][i+2];
_t_141_ -= 8.0 * mu[k][j-1][i] * _t_146_;
_t_126_ += u_1[k][j+1][i+2];
_t_115_ -= la[k][j][i+2] * 8.0 * _t_126_;
_t_149_ -= u_1[k][j+1][i+2];
_t_141_ += 8.0 * mu[k][j+1][i] * _t_149_;
_t_113_ = _t_114_ * _t_115_;
_t_113_ += _t_140_ * _t_141_;
_t_133_ = u_2[k-2][j][i-1];
_t_157_ = -u_2[k-2][j][i-1];
_t_157_ += u_2[k-2][j][i+1];
_t_154_ += mu[k-2][j][i] * 8.0 * _t_157_;
_t_136_ = u_2[k-2][j][i+1];
_t_133_ += 8.0 * -u_2[k-1][j][i-1];
_t_159_ = 8.0 * -u_2[k-1][j][i-1];
_t_136_ += 8.0 * -u_2[k-1][j][i+1];
_t_159_ += 8.0 * u_2[k-1][j][i+1];
_t_133_ += 8.0 * u_2[k+1][j][i-1];
_t_162_ = 8.0 * -u_2[k+1][j][i-1];
_t_136_ += 8.0 * u_2[k+1][j][i+1];
_t_162_ += 8.0 * u_2[k+1][j][i+1];
_t_133_ -= u_2[k+2][j][i-1];
_t_128_ -= 8.0 * la[k][j][i-1] * _t_133_;
_t_165_ = -u_2[k+2][j][i-1];
_t_136_ -= u_2[k+2][j][i+1];
_t_128_ += 8.0 * la[k][j][i+1] * _t_136_;
_t_165_ += u_2[k+2][j][i+1];
_t_154_ -= mu[k+2][j][i] * 8.0 * _t_165_;
_t_131_ = -u_2[k-1][j][i-2];
_t_159_ += u_2[k-1][j][i-2];
_t_131_ += u_2[k+1][j][i-2];
_t_128_ += la[k][j][i-2] * 8.0 * _t_131_;
_t_162_ += u_2[k+1][j][i-2];
_t_139_ = -u_2[k-1][j][i+2];
_t_159_ -= u_2[k-1][j][i+2];
_t_154_ -= 8.0 * mu[k-1][j][i] * _t_159_;
_t_139_ += u_2[k+1][j][i+2];
_t_128_ -= la[k][j][i+2] * 8.0 * _t_139_;
_t_113_ += _t_127_ * _t_128_;
_t_162_ -= u_2[k+1][j][i+2];
_t_154_ += 8.0 * mu[k+1][j][i] * _t_162_;
_t_113_ += _t_153_ * _t_154_;
r1 += _t_113_;
_t_167_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_180_ = 1.0 / 144.0 * strx[i] * stry[j];
_t_206_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_193_ = 1.0 / 144.0 * stry[j] * strz[k];
_t_168_ = mu[k][j][i-2] * u_0[k][j-2][i-2];
_t_181_ = la[k][j-2][i] * u_0[k][j-2][i-2];
_t_168_ -= mu[k][j][i+2] * u_0[k][j-2][i+2];
_t_181_ -= la[k][j-2][i] * u_0[k][j-2][i+2];
_t_168_ -= mu[k][j][i-2] * u_0[k][j+2][i-2];
_t_181_ -= la[k][j+2][i] * u_0[k][j+2][i-2];
_t_168_ += mu[k][j][i+2] * u_0[k][j+2][i+2];
_t_181_ += la[k][j+2][i] * u_0[k][j+2][i+2];
_t_194_ = la[k][j-2][i] * u_2[k-2][j-2][i];
_t_207_ = mu[k-2][j][i] * u_2[k-2][j-2][i];
_t_194_ -= la[k][j+2][i] * u_2[k-2][j+2][i];
_t_207_ -= mu[k-2][j][i] * u_2[k-2][j+2][i];
_t_194_ -= la[k][j-2][i] * u_2[k+2][j-2][i];
_t_207_ -= mu[k+2][j][i] * u_2[k+2][j-2][i];
_t_194_ += la[k][j+2][i] * u_2[k+2][j+2][i];
_t_207_ += mu[k+2][j][i] * u_2[k+2][j+2][i];
_t_173_ = u_0[k][j-2][i-1];
_t_184_ = -u_0[k][j-2][i-1];
_t_184_ += u_0[k][j-2][i+1];
_t_181_ += la[k][j-2][i] * 8.0 * _t_184_;
_t_176_ = u_0[k][j-2][i+1];
_t_173_ += 8.0 * -u_0[k][j-1][i-1];
_t_186_ = 8.0 * -u_0[k][j-1][i-1];
_t_176_ += 8.0 * -u_0[k][j-1][i+1];
_t_186_ += 8.0 * u_0[k][j-1][i+1];
_t_173_ += 8.0 * u_0[k][j+1][i-1];
_t_189_ = 8.0 * -u_0[k][j+1][i-1];
_t_176_ += 8.0 * u_0[k][j+1][i+1];
_t_189_ += 8.0 * u_0[k][j+1][i+1];
_t_173_ -= u_0[k][j+2][i-1];
_t_168_ -= 8.0 * mu[k][j][i-1] * _t_173_;
_t_192_ = -u_0[k][j+2][i-1];
_t_176_ -= u_0[k][j+2][i+1];
_t_168_ += 8.0 * mu[k][j][i+1] * _t_176_;
_t_192_ += u_0[k][j+2][i+1];
_t_181_ -= la[k][j+2][i] * 8.0 * _t_192_;
_t_171_ = -u_0[k][j-1][i-2];
_t_186_ += u_0[k][j-1][i-2];
_t_171_ += u_0[k][j+1][i-2];
_t_168_ += mu[k][j][i-2] * 8.0 * _t_171_;
_t_189_ += u_0[k][j+1][i-2];
_t_179_ = -u_0[k][j-1][i+2];
_t_186_ -= u_0[k][j-1][i+2];
_t_181_ -= 8.0 * la[k][j-1][i] * _t_186_;
_t_179_ += u_0[k][j+1][i+2];
_t_168_ -= mu[k][j][i+2] * 8.0 * _t_179_;
_t_189_ -= u_0[k][j+1][i+2];
_t_181_ += 8.0 * la[k][j+1][i] * _t_189_;
_t_166_ = _t_167_ * _t_168_;
_t_166_ += _t_180_ * _t_181_;
_t_199_ = u_2[k-2][j-1][i];
_t_210_ = -u_2[k-2][j-1][i];
_t_210_ += u_2[k-2][j+1][i];
_t_207_ += mu[k-2][j][i] * 8.0 * _t_210_;
_t_202_ = u_2[k-2][j+1][i];
_t_199_ += 8.0 * -u_2[k-1][j-1][i];
_t_212_ = 8.0 * -u_2[k-1][j-1][i];
_t_202_ += 8.0 * -u_2[k-1][j+1][i];
_t_212_ += 8.0 * u_2[k-1][j+1][i];
_t_199_ += 8.0 * u_2[k+1][j-1][i];
_t_215_ = 8.0 * -u_2[k+1][j-1][i];
_t_202_ += 8.0 * u_2[k+1][j+1][i];
_t_215_ += 8.0 * u_2[k+1][j+1][i];
_t_199_ -= u_2[k+2][j-1][i];
_t_194_ -= 8.0 * la[k][j-1][i] * _t_199_;
_t_218_ = -u_2[k+2][j-1][i];
_t_202_ -= u_2[k+2][j+1][i];
_t_194_ += 8.0 * la[k][j+1][i] * _t_202_;
_t_218_ += u_2[k+2][j+1][i];
_t_207_ -= mu[k+2][j][i] * 8.0 * _t_218_;
_t_197_ = -u_2[k-1][j-2][i];
_t_212_ += u_2[k-1][j-2][i];
_t_197_ += u_2[k+1][j-2][i];
_t_194_ += la[k][j-2][i] * 8.0 * _t_197_;
_t_215_ += u_2[k+1][j-2][i];
_t_205_ = -u_2[k-1][j+2][i];
_t_212_ -= u_2[k-1][j+2][i];
_t_207_ -= 8.0 * mu[k-1][j][i] * _t_212_;
_t_205_ += u_2[k+1][j+2][i];
_t_194_ -= la[k][j+2][i] * 8.0 * _t_205_;
_t_166_ += _t_193_ * _t_194_;
_t_215_ -= u_2[k+1][j+2][i];
_t_207_ += 8.0 * mu[k+1][j][i] * _t_215_;
_t_166_ += _t_206_ * _t_207_;
r2 += _t_166_;
uacc_0kc0jc0ic0 = a1 * uacc_0[k][j][i];
uacc_0kc0jc0ic0 += cof * r1;
uacc_0[k][j][i] = uacc_0kc0jc0ic0;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_2kc0jc0ic0 = a1 * uacc_2[k][j][i];
uacc_2kc0jc0ic0 += cof * r3;
uacc_2[k][j][i] = uacc_2kc0jc0ic0;
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 2, 2);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z));
sw4 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.