serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
21,601 |
#include "filter.cuh"
//////////////////////////////////////////////////////////////
// constructors
/////////////////////////////////////////////////////////////
Filter::Filter( const float mu[], const float sigma[], const float worldPoints[] )
{
// allocate memory on GPU
cudaMalloc( &d_states1, N_PARTICLES*N_STATES*sizeof( float ) );
cudaMalloc( &d_states2, N_PARTICLES*N_STATES*sizeof( float ) );
cudaMalloc( &d_weights, (N_PARTICLES+1)*sizeof( float ) );
cudaMalloc( &d_worldPoints, 3*N_WPOINTS*sizeof( float ) );
cudaMalloc( &d_weightSum, sizeof( float ) );
cudaMalloc( &d_resampleIdx, N_PARTICLES*sizeof( int ) );
cudaMalloc( &d_meanStates, N_STATES*sizeof( float ) );
cudaMalloc( &d_rngStates, N_PARTICLES*sizeof( curandState ) );
cudaMalloc( &d_cumsum, (N_PARTICLES+1)*sizeof( float ) );
checkCUDAError("constructor malloc");
// copy world points to GPU
cudaMemcpy( d_worldPoints, worldPoints, 3*N_WPOINTS*sizeof( float ), cudaMemcpyHostToDevice);
checkCUDAError("constructor memcpy");
// zero out first element of weights
cudaMemset( d_weights, 0, sizeof( float ) );
checkCUDAError("constructor memset");
// Determine temporary device storage requirements for inclusive prefix sum
d_temp_storage = NULL;
temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, d_weights, d_cumsum, N_PARTICLES );
// Allocate temporary storage for inclusive prefix sum
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// set up the states of random generators on the GPU
setupRNG_kernel<<<N_PARTICLES/PARTICLE_BLK_SZ, PARTICLE_BLK_SZ>>>( d_rngStates, 1234 );
// initilize particles
init( mu, sigma, worldPoints );
}
//////////////////////////////////////////////////////////////
// destructor
/////////////////////////////////////////////////////////////
Filter:: ~Filter()
{
cudaFree(d_states1);
cudaFree(d_states2);
cudaFree(d_weights);
cudaFree(d_worldPoints);
cudaFree(d_weightSum);
cudaFree(d_cumsum);
cudaFree(d_meanStates);
cudaFree(d_temp_storage);
}
//////////////////////////////////////////////////////////////
// initalize particles
/////////////////////////////////////////////////////////////
void Filter::init( const float mu[], const float sigma[], const float worldPoints[] )
{
std::random_device rd{};
std::mt19937 gen{rd()};
float *h_states = new float[N_PARTICLES*N_STATES];
// float h_states[N_PARTICLES*N_STATES];
// pack memory for thread coalessing N_STATES*N_PARTICLES matrix
for (int state = 0; state < N_STATES; ++state)
{
std::normal_distribution<float> distribution( mu[state], sigma[state] );
for (int particle = 0; particle < N_PARTICLES; ++particle)
{
h_states[ state*N_PARTICLES + particle ] = distribution( gen );
}
}
// copy to GPU
cudaMemcpy( d_states1, h_states, N_PARTICLES*N_STATES*sizeof( float ), cudaMemcpyHostToDevice );
checkCUDAError("memcpy");
states = d_states1;
/* TODO: init on device
// launch kernel
dim3 blocksPerGrid( N_PARTICLES/PARTICLE_BLK_SZ, 1, 1 );
dim3 threadsPerBlock( PARTICLE_BLK_SZ, 1, 1);
init_kernel<<< blocksPerGrid, threadsPerBlock >>>(
*/
delete[] h_states;
}
//////////////////////////////////////////////////////////////
// update motion and observation
/////////////////////////////////////////////////////////////
void Filter::update( float startTime, float endTime, const float *d_camera, cudaTextureObject_t texObj, cudaStream_t motionStream/*=0*/ )
{
float interval = endTime - startTime;
// launch kernel
dim3 blocksPerGrid( N_PARTICLES/PARTICLE_BLK_SZ, 1, 1 );
dim3 threadsPerBlock( PARTICLE_BLK_SZ, 1, 1);
motion_kernel<<< blocksPerGrid, threadsPerBlock, 0, motionStream >>>( d_rngStates, interval, states );
// cudaThreadSynchronize();
dim3 blocksPerGrid2( N_PARTICLES, 1, 1 );
dim3 threadsPerBlock2( N_LINE_SAMPLES*(N_WPOINTS/2), 1, 1);
cudaMemset( d_weightSum, 0, sizeof(float) );
observation_kernel<<< blocksPerGrid2, threadsPerBlock2 >>>( texObj, d_camera, d_worldPoints, states, &d_weights[1], d_weightSum );
#if PARTICLE_DBG
cudaThreadSynchronize();
#endif
checkCUDAError("update_kernel");
}
//////////////////////////////////////////////////////////////
// resample
/////////////////////////////////////////////////////////////
void Filter::resample()
{
// normalize weights
normalize();
// launch kernel
dim3 blocksPerGrid( N_PARTICLES/PARTICLE_BLK_SZ, 1, 1 );
dim3 threadsPerBlock( PARTICLE_BLK_SZ, 1, 1);
// swap elements
if( states==d_states1 )
{
resample_kernel<<< blocksPerGrid, threadsPerBlock >>>( d_rngStates, d_cumsum, states, d_states2 );
states = d_states2;
}
else
{
resample_kernel<<< blocksPerGrid, threadsPerBlock >>>( d_rngStates, d_cumsum, states, d_states1 );
states = d_states1;
}
// cudaThreadSynchronize();
checkCUDAError("resample_kernel");
}
void Filter::mean()
{
// reset mean
cudaMemset( d_meanStates, 0, N_STATES*sizeof(float) );
checkCUDAError("memset mean");
// launch kernel
dim3 blocksPerGrid( N_PARTICLES/PARTICLE_BLK_SZ, 1, 1 );
dim3 threadsPerBlock( PARTICLE_BLK_SZ, 1, 1);
mean_kernel<<< blocksPerGrid, threadsPerBlock >>>( states, d_meanStates );
// cudaThreadSynchronize();
checkCUDAError("mean_kernel");
// copy back mean states
cudaMemcpy( h_meanStates, d_meanStates, N_STATES*sizeof( float ), cudaMemcpyDeviceToHost );
checkCUDAError("memcpy mean");
for (int i = 0; i < N_STATES; ++i)
{
h_meanStates[i] /= (float)N_PARTICLES;
}
}
//////////////////////////////////////////////////////////////
// normalize and compute cumulative sum
/////////////////////////////////////////////////////////////
void Filter::normalize()
{
// launch kernel
dim3 blocksPerGrid( N_PARTICLES/PARTICLE_BLK_SZ, 1, 1 );
dim3 threadsPerBlock( PARTICLE_BLK_SZ, 1, 1);
cudaThreadSynchronize();
normalize_kernel<<< blocksPerGrid, threadsPerBlock >>>( &d_weights[1], d_weightSum );
checkCUDAError("normalize_kernel");
/* compute cumulative sum of weights on the device */
// Run inclusive prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_weights, d_cumsum, N_PARTICLES );
checkCUDAError("cumsum");
}
//////////////////////////////////////////////////////////////
// compute normal from 2 image points
/////////////////////////////////////////////////////////////
__device__ void computeNormal( const float *imPts, float *n )
{
n[0] = imPts[1] - imPts[3];
n[1] = imPts[2] - imPts[0];
// make unit length
float norm_inv = rhypotf( n[0], n[1] );
n[0] *= norm_inv;
n[1] *= norm_inv;
// scale it by lambda pixels
n[0] *= IM_LAMBDA;
n[1] *= IM_LAMBDA;
}
/******************************* KERNELS ******************************************************/
//////////////////////////////////////////////////////////////
// mean kernel
/////////////////////////////////////////////////////////////
__global__ void mean_kernel( const float *statesPtr, float *meanStates )
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
// block reduce to sum states
typedef cub::BlockReduce<float, PARTICLE_BLK_SZ> BlockReduce;
__shared__ typename BlockReduce::TempStorage tmp_storage;
#pragma unroll
for (int i = 0; i < N_STATES; ++i)
{
float state = statesPtr[index + i*N_PARTICLES];
float blockSum = BlockReduce(tmp_storage).Sum( state );
if( threadIdx.x==0 )
atomicAdd( &meanStates[ i ], blockSum );
}
}
//////////////////////////////////////////////////////////////
// resample kernel
/////////////////////////////////////////////////////////////
__global__ void resample_kernel( curandState *rngStates, const float *weights, const float *fromStates, float *toStates )
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
curandState random = rngStates[index];
float U = curand_uniform( &random );
int particle = findInterval( U, weights );
// copy states to new array
#pragma unroll
for (int i = 0; i < N_STATES; ++i)
{
toStates[ index + i*N_PARTICLES ] = fromStates[ particle + i*N_PARTICLES ];
}
#if PARTICLE_DBG
printf("Particle %d, sample: %f, resampled to particle %d\n", index, U, particle);
#endif
rngStates[index] = random;
}
//////////////////////////////////////////////////////////////
// normalize kernel
/////////////////////////////////////////////////////////////
__global__ void normalize_kernel( float *weightPtr, const float *weightSum )
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
weightPtr[index] /= *weightSum;
#if PARTICLE_DBG
printf("Normalized %d: %f\n", index, weightPtr[index]);
#endif
}
//////////////////////////////////////////////////////////////
// motion kernel
/////////////////////////////////////////////////////////////
__global__ void motion_kernel( curandState *rngStates, float time_left, float *statesPtr )
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
// copy data to thread mem
float states[N_STATES];
for (int i = 0; i < N_STATES; ++i)
{
states[i] = statesPtr[ index + i*N_PARTICLES ];
}
#if PARTICLE_DBG
printf("thread %d initial states: [ %f, %f, %f, %f, %f, %f ]\n", index, STATE_X, STATE_Y, STATE_Z, STATE_ALPHA, STATE_BETA, STATE_GAMMA );
#endif
// convert to orientation to the center of mass
STATE_ALPHA += PARAMETER::THETA_ALPHA;
STATE_BETA += PARAMETER::THETA_BETA;
// states: x,y,z,x_dot,y_dot,z_dot,alpha,beta,gamma,alpha_dot,beta_dot,gamma_dot
// first handle states with constant acceleration
STATE_X += time_left*STATE_X_D;// x
STATE_Y += time_left*STATE_Y_D;// y
STATE_Z += time_left*STATE_Z_D;// z
// states[3] to states[5] and states[11] are unchanged
STATE_GAMMA += time_left*STATE_GAMMA_D;
// next use the forward euler method to integrate alpha, beta and their velocities
float alpha = STATE_ALPHA;
float beta = STATE_BETA;
float alpha_d = STATE_ALPHA_D;
while( time_left>STEP_SZ )
{
STATE_ALPHA += STEP_SZ*alpha_d;// alpha
STATE_BETA += STEP_SZ*STATE_BETA_D;// beta
STATE_ALPHA_D += STEP_SZ*dynamicsAlpha( alpha, beta, alpha_d, STATE_BETA_D );// alpha_dot
STATE_BETA_D += STEP_SZ*dynamicsBeta( alpha, beta, alpha_d );// beta_dot
time_left -= STEP_SZ;
alpha = STATE_ALPHA;
beta = STATE_BETA;
alpha_d = STATE_ALPHA_D;
}
// finish up the remainder
STATE_ALPHA += time_left*alpha_d;// - PARAMETER::THETA_ALPHA;// alpha
STATE_BETA += time_left*STATE_BETA_D;// - PARAMETER::THETA_BETA;// beta
STATE_ALPHA_D += time_left*dynamicsAlpha( alpha, beta, alpha_d, STATE_BETA_D );// alpha_dot
STATE_BETA_D += time_left*dynamicsBeta( alpha, beta, alpha_d );// beta_dot
// convert to pviot frame
STATE_ALPHA -= PARAMETER::THETA_ALPHA;
STATE_BETA -= PARAMETER::THETA_BETA;
// apply noise
curandState random = rngStates[index];
STATE_X += curand_normal(&random)*X_SIGMA;
STATE_Y += curand_normal(&random)*Y_SIGMA;
STATE_Z += curand_normal(&random)*Z_SIGMA;
STATE_X_D += curand_normal(&random)*X_D_SIGMA;
STATE_Y_D += curand_normal(&random)*Y_D_SIGMA;
STATE_Z_D += curand_normal(&random)*Z_D_SIGMA;
STATE_ALPHA += curand_normal(&random)*ALPHA_SIGMA;
STATE_BETA += curand_normal(&random)*BETA_SIGMA;
STATE_GAMMA += curand_normal(&random)*GAMMA_SIGMA;
STATE_ALPHA_D += curand_normal(&random)*ALPHA_D_SIGMA;
STATE_BETA_D += curand_normal(&random)*BETA_D_SIGMA;
STATE_GAMMA_D += curand_normal(&random)*GAMMA_D_SIGMA;
rngStates[index] = random;
// copy back to global memory
for (int i = 0; i < N_STATES; ++i)
{
statesPtr[ index + i*N_PARTICLES ] = states[i];
}
#if PARTICLE_DBG
printf("thread %d predicted states: [ %f, %f, %f, %f, %f, %f ]\n", index, STATE_X, STATE_Y, STATE_Z, STATE_ALPHA, STATE_BETA, STATE_GAMMA );
#endif
}
//////////////////////////////////////////////////////////////
// Observation kernel
/////////////////////////////////////////////////////////////
__global__ void observation_kernel(cudaTextureObject_t texObj, const float *cameraPtr, const float *worldPtsPtr, float *statesPtr, float *weightPtr, float *weightSum )
{
__shared__ float camera[12];
__shared__ float states[N_STATES];
__shared__ float worldPts[3*N_WPOINTS];
__shared__ float imPts[2*N_WPOINTS];
__shared__ float normals[N_WPOINTS];
// block reduce to sum weights
typedef cub::BlockReduce<float, N_LINE_SAMPLES*(N_WPOINTS/2)> BlockReduce;
__shared__ typename BlockReduce::TempStorage tmp_storage;
// assuming 3*N_WPOINTS is largest
if( threadIdx.x<3*N_WPOINTS )
{
worldPts[threadIdx.x] = worldPtsPtr[threadIdx.x];
// next is states
#if N_STATES==12
if( threadIdx.x < 12 )
{
camera[threadIdx.x] = cameraPtr[threadIdx.x];
states[threadIdx.x] = statesPtr[ blockIdx.x + threadIdx.x*N_PARTICLES ];
}
#else
if( threadIdx.x < 12 )
camera[threadIdx.x] = cameraPtr[threadIdx.x];
if( threadIdx.x < N_STATES )
states[threadIdx.x] = statesPtr[ blockIdx.x + threadIdx.x*N_PARTICLES ];
#endif
// project to image points
if( threadIdx.x < N_WPOINTS )
{
float T[12];
statesToTransform( states, T );
float tmp[3];
// transform and project points
transformPt( T, &worldPts[3*threadIdx.x], tmp );
project( tmp, camera, &imPts[2*threadIdx.x] );
// and compute normals
if( threadIdx.x < (N_WPOINTS/2) )
computeNormal( &imPts[4*threadIdx.x], &normals[2*threadIdx.x] );
}
}
__syncthreads();// is this neccesary if N_WPOINTS<32?
#if PARTICLE_DBG
if(threadIdx.x==0)
printf("Particle %d states: [ %f, %f, %f, %f, %f, %f ]\n", blockIdx.x, STATE_X, STATE_Y, STATE_Z, STATE_ALPHA, STATE_BETA, STATE_GAMMA );
#endif
int idx = threadIdx.x/N_LINE_SAMPLES;
int remainderIdx = threadIdx.x%N_LINE_SAMPLES;
// fraction of line segment
float frac = (float)(remainderIdx+1)/( N_LINE_SAMPLES + 1 );
// some aliases
const float &ax = imPts[4*idx];
const float &ay = imPts[4*idx+1];
const float &bx = imPts[4*idx+2];
const float &by = imPts[4*idx+3];
const float *n = &normals[2*idx];
// dont sample if line crosses image boundaries
float weight = 0;
float diff = 0;
// negative pixel pos
if( ax<IM_LAMBDA || ay<IM_LAMBDA || bx<IM_LAMBDA || by<IM_LAMBDA )
goto end;
// too large pixel pos
if( (IM_LAMBDA + ax)>=IM_W || (IM_LAMBDA + ay)>=IM_H || (IM_LAMBDA + bx)>=IM_W || (IM_LAMBDA + by)>=IM_H )
goto end;
#if PARTICLE_DBG
printf("particle %d, thread %d: a=[ %f, %f ], b=[ %f, %f ] \n", blockIdx.x, threadIdx.x, ax, ay, bx, by );
#endif
float pt[2];
pt[0] = ( 1-frac )*ax + frac*bx;
pt[1] = ( 1-frac )*ay + frac*by;
#if PARTICLE_DBG
printf("particle %d, thread %d: pt= [ %f, %f ]\n", blockIdx.x, threadIdx.x, pt[0], pt[1] );
#endif
// measure image at point +- normal
diff = tex2D<uchar>( texObj, pt[0] + n[0], pt[1] + n[1] ) - tex2D<uchar>( texObj, pt[0] - n[0], pt[1] - n[1] );
weight = idx*diff*diff;// weight by position in array, closest to pivot first
#if PARTICLE_DBG
printf("Particle %d, thread %d weight: %f \n", blockIdx.x, threadIdx.x, weight);
#endif
end:
float blockSum = BlockReduce(tmp_storage).Sum( weight );
#if PARTICLE_DBG
if(threadIdx.x==0)
printf("Particle %d, blockSum: %f, weightSum: %f\n", blockIdx.x, blockSum, *weightSum );
#endif
if(threadIdx.x==0)
{
weightPtr[blockIdx.x] = blockSum;
atomicAdd( weightSum, blockSum );
}
}
/******************************* MOTION DEVICE FUNCTIONS **************************************/
//////////////////////////////////////////////////////////////
// motion equations
/////////////////////////////////////////////////////////////
__device__ inline float dynamicsAlpha( float alpha, float beta, float alpha_dot, float beta_dot )
{
float sinBeta, cosBeta;
sincosf( beta, &sinBeta, &cosBeta );
return ( 2*alpha_dot*beta_dot*sinBeta - (9.81f/PARAMETER::LENGTH)*sinf(alpha) )/cosBeta;
}
__device__ inline float dynamicsBeta( float alpha, float beta, float alpha_dot )
{
float sinBeta, cosBeta;
sincosf( beta, &sinBeta, &cosBeta );
return -( (9.81f/PARAMETER::LENGTH)*cosf(alpha) + alpha_dot*alpha_dot*cosBeta )*sinBeta;
}
/******************************* OBSERVATION DEVICE FUNCTIONS *********************************/
//////////////////////////////////////////////////////////////
// project world points to image points
/////////////////////////////////////////////////////////////
__device__ void projectWorldPt( const float *camera, const float *worldPt, const float states[], float imPt[] )
{
/* first update the camera to a new reference frame
* equivalent of transforming the states in the world
* frame to the camera frame*/
/*
float newCamera[12];
transformCamera( camera, states, newCamera );
// go through the world points and transform them to image points
for (int i = 0; i < 2; ++i)
{
project( &worldPts[i*3], newCamera, &imPts[i*2] );
}
*/
// transform point
float T[12];
statesToTransform( states, T );
float pt3D[3];
transformPt( T, worldPt, pt3D );
project( pt3D, camera, imPt );
}
//////////////////////////////////////////////////////////////
// transform a single world point
/////////////////////////////////////////////////////////////
__device__ void transformPt( const float T[12], const float *worldPt, float pt3D[3] )
{
pt3D[0] = T[0]*worldPt[0] + T[1]*worldPt[1] + T[2]*worldPt[2] + T[3];
pt3D[1] = T[4]*worldPt[0] + T[5]*worldPt[1] + T[6]*worldPt[2] + T[7];
pt3D[2] = T[8]*worldPt[0] + T[9]*worldPt[1] + T[10]*worldPt[2] + T[11];
}
//////////////////////////////////////////////////////////////
// Make a (truncated) transformation matrix given the states
/////////////////////////////////////////////////////////////
__device__ void statesToTransform( const float states[], float T[] )
{
// Rotation matrix:
float s1, c1, s2, c2, s3, c3;
sincosf( STATE_ALPHA, &s1, &c1 );
sincosf( STATE_BETA, &s2, &c2 );
sincosf( STATE_GAMMA, &s3, &c3 );
// printf("s1: %f, c1: %f\n", s1, c1);
/* row 1 */
// T_11
T[0] = c2*c3;
// T_12
T[1] = -c2*s3;
// T_13
T[2] = s2;
// T_14
T[3] = STATE_X;
/* row 2 */
// T_21
T[4] = c1*s3 + c3*s1*s2;
// T_22
T[5] = c1*c3 - s1*s2*s3;
// T_23
T[6] = -c2*s1;
// T_24
T[7] = STATE_Y;
/* row 3 */
// T_31
T[8] = s1*s3 - c1*c3*s2;
// T_32
T[9] = c3*s1 + c1*s2*s3;
// T_33
T[10] = c1*c2;
// T_34
T[11] = STATE_Z;
/* row 4 is only 0 0 0 1, save the memory */
/*
#if PARTICLE_DBG
printf("T= [ %f, %f, %f, %f;\n %f, %f, %f, %f;\n %f, %f, %f, %f ];\n", T[0], T[1], T[2], T[3], T[4], T[5], T[6], T[7], T[8], T[9], T[10], T[11] );
#endif
*/
}
//////////////////////////////////////////////////////////////
// Transform camera to new reference frame given a state
/////////////////////////////////////////////////////////////
__device__ void transformCamera( const float *camera, const float states[], float newCamera[] )
{
// first make transformation matrix from states
float T[12];
statesToTransform( states, T );
// update camera matrix by P*T
for (int row = 0; row < 3; ++row)
{
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
for (int col = 0; col < 3; ++col)
{
sum1 += camera[row*4 + col]*T[4*col];
sum2 += camera[row*4 + col]*T[4*col +1];
sum3 += camera[row*4 + col]*T[4*col +2];
sum4 += camera[row*4 + col]*T[4*col +3];
}
newCamera[row*4] = sum1;
newCamera[row*4 +1] = sum2;
newCamera[row*4 +2] = sum3;
newCamera[row*4 +3] = sum4 + camera[row*4 +3];
}
/*
#if PARTICLE_DBG
printf("camera= [ %f, %f, %f, %f;\n %f, %f, %f, %f;\n %f, %f, %f, %f ];\n", newCamera[0], newCamera[1], newCamera[2], newCamera[3], newCamera[4], newCamera[5], newCamera[6], newCamera[7], newCamera[8], newCamera[9], newCamera[10], newCamera[11] );
#endif
*/
}
//////////////////////////////////////////////////////////////
// Project 3D world point using a given camera to a 2D point
/////////////////////////////////////////////////////////////
__device__ void project( const float worldPt[], const float *camera, float imPt[] )
{
/* project [X,Y,Z,1]^T to [x,y,w] */
// project x
imPt[0] = worldPt[0]*camera[0] + worldPt[1]*camera[1] + worldPt[2]*camera[2] + camera[3];
// project y
imPt[1] = worldPt[0]*camera[4] + worldPt[1]*camera[5] + worldPt[2]*camera[6] + camera[7];
// project w
float denominator = worldPt[0]*camera[8] + worldPt[1]*camera[9] + worldPt[2]*camera[10] + camera[11];
// normalize x and y, assume no division by 0
imPt[0] /= denominator;
imPt[1] /= denominator;
}
//////////////////////////////////////////////////////////////
// PRNG
/////////////////////////////////////////////////////////////
__global__ void setupRNG_kernel( curandState *rngStates, unsigned int seed )
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
// same seed, different sequence, no offset
curand_init( seed, index, 0, &rngStates[index] );
}
__device__ constexpr unsigned int floor_pow_2( unsigned int N )
{
#define SH_1( x ) ( x | (x >> 1) )
#define SH_2( x ) ( x | (x >> 2) )
#define SH_3( x ) ( x | (x >> 4) )
#define SH_4( x ) ( x | (x >> 8) )
#define SH_5( x ) ( x | (x >> 16) )
#define SH_6( x ) ( x - (x >> 1) )
#define FLOOR_POW_2( x ) ( SH_6( SH_5( SH_4( SH_3( SH_2( SH_1( x ) ) ) ) ) ) )
return FLOOR_POW_2( N );
#undef SH_1
#undef SH_2
#undef SH_3
#undef SH_4
#undef SH_5
#undef SH_6
#undef FLOOR_POW_2
}
__device__ int findInterval( float val, const float *arr )
{
int idx = 0;
int n = floor_pow_2( N_PARTICLES );
idx += ( arr[ n ] <= val )*( N_PARTICLES - n );
// branchless binary search
for (int i = n/2; i>1; i/=2)
idx += (arr[ idx+i ]<= val)*i;
// returns index of the last element in arr smaller than val
return idx;
}
|
21,602 | #include "includes.h"
#define KERNEL_RADIUS 31
#define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1)
__constant__ float c_Kernel[ KERNEL_LENGTH ];
__global__ void convolutionX_63_Kernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int outofbounds, float outofboundsvalue )
{
__shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z;
const int firstPixelInLine = ROWS_BLOCKDIM_X * ROWS_HALO_STEPS - threadIdx.x;
const int lastPixelInLine = imageW - baseX - 1;
// set the input and output arrays to the right offset (actually the output is not at the right offset, but this is corrected later)
d_Src += baseZ * imageH * imageW + baseY * imageW + baseX;
d_Dst += baseZ * imageH * imageW + baseY * imageW + baseX;
// Load main data
// Start copying after the ROWS_HALO_STEPS, only the original data that will be convolved
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ lastPixelInLine ];
}
// Load left halo
// If the data fetched is outside of the image (note: baseX can be <0 for the first block) , use a zero-out of bounds strategy
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ firstPixelInLine ];
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
{
if ( outofbounds == 0 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
else if ( outofbounds == 1 )
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : outofboundsvalue;
else
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[ lastPixelInLine ];
}
//Compute and store results
__syncthreads();
// this pixel is not part of the image and does not need to be convolved
if ( baseY >= imageH )
return;
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
if (imageW - baseX > i * ROWS_BLOCKDIM_X)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
} |
21,603 | /*
* Alexandre Maros - 2016
*
* Cuda Matrix Multiplication with Shared Memory.
*
* nvcc cuda_matrix_shared.cu -o cs.o
*
* Implemented by Alexandre Maros for learning purposes.
* A version of this code using Global Memory is in here:
* https://github.com/alepmaros/cuda_matrix_multiplication
*
* Distributed under the MIT Lincese.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
// 32x32 Threads in a block.
#define NTHREADS_X 32
#define NTHREADS_Y 32
#define THREADS_PER_BLOCK NTHREADS_X * NTHREADS_Y
/* A macro used for error checking in CUDA function calls
* Credit to: http://stackoverflow.com/a/14038590 for the gpuErrchk macro.
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void matrix_mul(int *a, int *b, int *c, int a_ncolumns, int c_nlines,
int c_ncolumns, int nBlocks)
{
int i, z, sum = 0;
/* How many multiplications there will be for each value in Matrix C
* This corresponds to the number of columns in Matrix A (or number of)
* lines in Matrix B
*/
int nMultiplications = a_ncolumns;
/* Each iteration of the block will multiply NTHREADS_Y values. This value
* Can be less then NTHREADS_Y if the number of a_ncolumns is not divisible
* by NTHREADS_Y. This value is used to control that.
*/
int multiplicationsInBlock = NTHREADS_Y;
int column = blockIdx.x * blockDim.x + threadIdx.x;
int line = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int s_a[NTHREADS_Y][NTHREADS_X];
__shared__ int s_b[NTHREADS_Y][NTHREADS_X];
/* temporary line and temporary column
* Each thread is responsible for loading one value in the matrix A and
* Matrix B. These variables are used to hold which line and column of the
* original Matrices they are suppose to load. I also need to check if those
* values that they will load actually correspond to a valid position in the
* original Matrix.
*/
int a_tLine, a_tColumn, b_tLine, b_tColumn;
for (z = 0; z < nBlocks; z++)
{
// Load Matrix A
a_tLine = (blockIdx.y * NTHREADS_Y + threadIdx.y);
a_tColumn = (z * NTHREADS_X + threadIdx.x);
if (a_tLine < c_nlines && a_tColumn < a_ncolumns)
{
s_a[threadIdx.y][threadIdx.x] = a[ (a_ncolumns * a_tLine) + a_tColumn];
}
// Load Matrix B
b_tLine = (z * NTHREADS_Y + threadIdx.y);
b_tColumn = (blockIdx.x * NTHREADS_X + threadIdx.x);
if (b_tLine < a_ncolumns && b_tColumn < c_ncolumns)
{
s_b[threadIdx.y][threadIdx.x] = b[ (c_ncolumns * b_tLine) + b_tColumn ];
}
__syncthreads();
/* Checkin to see if that thread actually belongs to a valid position in
* the Matrix C
*/
if (column < c_ncolumns && line < c_nlines)
{
if (nMultiplications < NTHREADS_Y)
{
multiplicationsInBlock = nMultiplications;
}
for (i = 0; i < multiplicationsInBlock; i++)
{
sum += s_a[threadIdx.y][i] * s_b[i][threadIdx.x];
}
nMultiplications -= NTHREADS_Y;
}
__syncthreads();
}
/* Checkin to see if that thread actually belongs to a valid position in
* the Matrix C
*/
if (column < c_ncolumns && line < c_nlines)
{
c[line * c_ncolumns + column] = sum;
}
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int a_nlines, a_ncolumns;
int b_nlines, b_ncolumns;
int c_nlines, c_ncolumns;
size_t a_size, b_size, c_size;
int i, j;
cudaEvent_t start, stop;
gpuErrchk( cudaEventCreate(&start) );
gpuErrchk( cudaEventCreate(&stop) );
scanf("%d", &a_nlines);
scanf("%d", &a_ncolumns);
scanf("%d", &b_nlines);
scanf("%d", &b_ncolumns);
c_nlines = a_nlines;
c_ncolumns = b_ncolumns;
#ifdef __DEBUG
printf("a_nlines: %d\na_ncolumns: %d\nb_nlines: %d\nb_ncolumns: %d\nc_nlines: %d\nc_ncolumns: %d\n", a_nlines, a_ncolumns, b_nlines, b_ncolumns, c_nlines, c_ncolumns);
#endif
if ( a_ncolumns != b_nlines )
{
printf("Number of columns in Matrix A should be equals to number of lines in Matrix B\n");
return EXIT_FAILURE;
}
a_size = a_nlines * a_ncolumns * sizeof(int);
b_size = b_nlines * b_ncolumns * sizeof(int);
c_size = c_nlines * c_ncolumns * sizeof(int);
gpuErrchk( cudaMalloc((void **) &d_a, a_size) );
gpuErrchk( cudaMalloc((void **) &d_b, b_size) );
gpuErrchk( cudaMalloc((void **) &d_c, c_size) );
a = (int *)malloc(a_size);
b = (int *)malloc(b_size);
c = (int *)malloc(c_size);
memset(c, 0, c_nlines*c_ncolumns*sizeof(int));
for (i = 0; i < a_nlines; i++)
{
for (j = 0; j < a_ncolumns; j++)
{
scanf("%d", &a[i * a_ncolumns + j]);
}
}
for (i = 0; i < b_nlines; i++)
{
for (j = 0; j < b_ncolumns; j++)
{
scanf("%d", &b[i * b_ncolumns + j]);
}
}
gpuErrchk( cudaMemcpy(d_a, a, a_size, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_b, b, b_size, cudaMemcpyHostToDevice) );
dim3 tbloco = dim3(
(int) std::ceil( (double) c_ncolumns / NTHREADS_X ),
(int) std::ceil( (double) c_nlines / NTHREADS_Y ),
1
);
dim3 tthreads = dim3(
NTHREADS_X,
NTHREADS_Y,
1
);
#ifdef __DEBUG
printf("tbloco.x: %d tbloco.y: %d tbloco.z: %d\n", tbloco.x, tbloco.y, tbloco.z);
printf("tthreads.x: %d tthreads.y: %d\n", tthreads.x, tthreads.y);
#endif
cudaEventRecord(start);
// kernel call
matrix_mul<<<tbloco,tthreads>>>(d_a, d_b, d_c, a_ncolumns, c_nlines,
c_ncolumns, (int) std::ceil( (double) a_ncolumns / NTHREADS_X));
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaEventRecord(stop) );
gpuErrchk( cudaMemcpy(c, d_c, c_size, cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventSynchronize(stop) );
#ifndef __NO_OUTPUT
// print Matrix
for (i = 0; i < c_nlines; i++)
{
for (j = 0; j < c_ncolumns; j++)
{
printf("%d ", c[i * c_ncolumns + j]);
}
printf("\n");
}
printf("\n");
#endif
#ifdef __TIME
float milliseconds = 0;
gpuErrchk( cudaEventElapsedTime(&milliseconds, start, stop) );
printf("%.5f\n", milliseconds);
#endif
free(a); free(b); free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
21,604 | /* Kernel for vector squaring */
__global__ void threechannel(float in[], int red[], int green[], int blue[], int ret[], int num)
{
ret[threadIdx.x] = in[threadIdx.x];
}
|
21,605 | #include <stdio.h>
#define N 32
template<int T>
__global__ void reduce(const int *in, float *out_stud, float *out_que) {
__shared__ int tile[T * (T + 1)];
int x4 = blockIdx.x*(T/4) + threadIdx.x;
int y = blockIdx.y* T + threadIdx.y;
int width = gridDim.x*(T/4);
int4 val = reinterpret_cast<const int4*>(in)[y*width + x4];
reinterpret_cast<int4*>(tile)[(threadIdx.y*(T/4)) + threadIdx.x] = val;
__syncthreads();
if (threadIdx.x == 0) {
int sum = 0;
for (int i = 0; i < T; i++) {
sum += tile[threadIdx.y*T + i];
}
atomicAdd(out_stud + y, sum);
sum = 0;
int x = blockIdx.x*T + threadIdx.y;
for (int i = 0; i < T; i++) {
sum += tile[i*T + threadIdx.y];
}
atomicAdd(out_que + x, sum);
}
}
__global__
void divide(float *arr, float count) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
arr[i] /= count;
}
void solveGPU(
const int *results, // questions * students
float *avg_stud, // score per student: total / questions -> len Y
float *avg_que, // score per question: total / students -> len X
const int Y, // students: always divisible by 32
const int X // questions: always divisible by 32
) {
// int n = X * Y;
// int parts = n/BLOCKS;
// reset arrays
cudaMemset(avg_stud, 0, Y*sizeof(avg_stud[0]));
cudaMemset(avg_que, 0, X*sizeof(avg_que[0]));
dim3 blocks(X/N, Y/N);
dim3 threads(N/4, N);
reduce<N><<<blocks, threads>>>(results, avg_stud, avg_que);
// divide results
divide<<<Y/N, N>>>(avg_stud, X);
divide<<<X/N, N>>>(avg_que, Y);
if (cudaPeekAtLastError() != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(cudaGetLastError()));
}
}
|
21,606 | #include <stdio.h>
#include <stdlib.h>
const int threadsPerBlock = 4;
// Square matrix multiplication with dimention that has power of 8
__global__ void kernel(float *a, float *b, float *c, int i, int n){
// // const int shared_size = n;
__shared__ float cache[threadsPerBlock];
int k = threadIdx.x;
int j = blockIdx.x;
//
int a_idx = i*n+k;
int b_idx = j+k*n;
//
if(a_idx < n*n){
cache[k] = a[a_idx] * b[b_idx];
}
__syncthreads();
//
// // blockDim has to be a power of 2
int iter = blockDim.x/2;
while( iter!= 0){
cache[k] += cache[k + iter];
__syncthreads();
iter/=2;
}
// c[j+n*i] = a[i];
c[j+n*i] = cache[0];
}
void print_mat(float *a, int n){
for(int j = 0; j < n; j++){
for(int i = 0; i < n; i++){
printf("%.3f\t", a[i+n*j]);
}
printf("\n");
}
}
void print_vec(float *a, int n){
for(int i = 0; i < n; i++){
printf("%.3f\t", a[i]);
}
printf("\n");
}
int main(int argc, char** argv){
int n = threadsPerBlock;
int matDim = n*n;
// dim3 grid(n, n);
float *a_host, *b_host, *c_host;
a_host = (float*) malloc(matDim*sizeof(float));
b_host = (float*) malloc(matDim*sizeof(float));
c_host = (float*) malloc(matDim*sizeof(float));
float *a_dev, *b_dev, *c_dev;
cudaMalloc((float**) &a_dev, matDim*sizeof(float));
cudaMalloc((float**) &b_dev, matDim*sizeof(float));
cudaMalloc((float**) &c_dev, matDim*sizeof(float));
for(int i = 0; i<matDim; i++){
a_host[i] = i+1;
b_host[i] = i;
}
cudaMemcpy(a_dev, a_host, matDim*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_dev, b_host, matDim*sizeof(float), cudaMemcpyHostToDevice);
// i row
// j col
for(int i = 0; i < n; i++){
kernel<<<n, n>>>(a_dev, b_dev, c_dev, i, n);
}
cudaMemcpy(c_host, c_dev, matDim*sizeof(float), cudaMemcpyDeviceToHost);
printf("----\n");
print_mat(c_host, n);
//Free the mem allocation
free(a_host);
free(b_host);
free(c_host);
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
}
|
21,607 | #include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/stat.h>
#define BLOCK_DIM 1024
#define SEED 26
__global__ void data(double *a, double *b, double *c, int count) {
int t_id = blockDim.x * blockIdx.x + threadIdx.x;
if (t_id < count)
c[t_id] = a[t_id] + b[t_id];
}
int main (int argc, char *argv[]) {
if (argc != 2) {
printf("Usage: %s number\n", argv[0]);
exit(1);
}
// Initialize host variables
int i = 0;
double *hA, *hB, *hC, *refC;
int num = atoi(argv[1]);
long size = num*sizeof(double);
hA = (double *)malloc(size);
hB = (double *)malloc(size);
hC = (double *)malloc(size);
refC = (double *)malloc(size);
// Timing variables;
cudaEvent_t incl_start, incl_end;
cudaEvent_t excl_start, excl_end;
float time_incl, time_excl;
cudaEventCreate(&incl_start);
cudaEventCreate(&incl_end);
cudaEventCreate(&excl_start);
cudaEventCreate(&excl_end);
// Populate hA, hB, refC
srand(SEED);
for (i=0; i<num; i++) {
hA[i] = -10 + rand() % 20;
hB[i] = -10 + rand() % 20;
refC[i] = hA[i] + hB[i];
}
// Device memory allocation
double *dA, *dB, *dC;
cudaMalloc((void **)&dA, size);
cudaMalloc((void **)&dB, size);
cudaMalloc((void **)&dC, size);
cudaEventRecord(incl_start, 0);
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
// Launch kernel
int GRID_DIM = (num % BLOCK_DIM == 0)
? num / BLOCK_DIM
: (int)(num / BLOCK_DIM) + 1;
dim3 dimGrid(GRID_DIM, 1, 1);
dim3 dimBlock(BLOCK_DIM, 1, 1);
cudaEventRecord(excl_start, 0);
data<<<dimGrid, dimBlock>>>(dA, dB, dC, num);
cudaEventRecord(excl_end, 0);
cudaEventSynchronize(excl_end);
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
cudaEventRecord(incl_end, 0);
cudaEventSynchronize(incl_end);
// Verify results
for (i=0; i<num; i++) {
if (fabs(refC[i] - hC[i]) > 1e-12) {
printf("FAIL\n");
exit(1);
}
}
// Print metrics
cudaEventElapsedTime(&time_incl, incl_start, incl_end);
cudaEventElapsedTime(&time_excl, excl_start, excl_end);
//time_incl *= 1000;
//time_excl *= 1000;
//printf("Inclusive: %f\n", incl_diff);
//printf("Exclusive: %f\n", excl_diff);
//printf("Size = %d\n Inclusive Time = %f\n Exclusive Time = %f\n", num, time_incl, time_excl);
printf("%d\t%f\t%f\n", num, time_incl, time_excl);
// Cleanup
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(hA);
free(hB);
free(hC);
free(refC);
return 0;
}
|
21,608 | #include "includes.h"
__global__ void ComputeL2Distance(float *corrData, int numPts1) {
// Get the global point index, not the local index within our 16x16 chunk
const int p1 = blockIdx.x * 16 + threadIdx.x;
const int p2 = blockIdx.y * 16 + threadIdx.y;
// Make sure p1 and p2 are both within bounds
if (p1 < numPts1) {
const int idx = p1 * gridDim.y * 16 + p2;
if (corrData[idx] > -1) corrData[idx] = 2 - 2 * corrData[idx];
else corrData[idx] = FLT_MAX;
}
} |
21,609 | #include <stdio.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <vector>
#include <math.h>
#include <string>
#include <stdlib.h>
// qsub -I -q coc-ice -l nodes=1:ppn=1:nvidiagpu,walltime=2:00:00,pmem=2gb
// qsub -I -q coc-ice -l nodes=1:ppn=1:nvidiagpu:teslap100,walltime=2:00:00,pmem=2gb
// ssh -x nolivares3@coc-ice.pace.gatech.edu
using namespace std;
// global code runs on the device
// need to have one thread correspond to multiple gridpoints
__global__ void gpuIt(float *tNew,float *tOld,float *tOrig,int x,int y,int z,float k,float st) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
// may want an if(i < x*y*z) to prevent overflowing, likea thisa
if(i < x*y*z){
if(i == 0){ // top left corner
tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i] + tOld[i+x] - 4*tOld[i]);
//tNew[i] = 1;
}
else if(i == x-1){ // top right corner
tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]);
//tNew[i] = 3;
}
else if(i == x*y - 1){ // bottom right corner
tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]);
//tNew[i] = 5;
}
else if(i == x*y - x){ // bottom left corner
tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i] - 4*tOld[i]);
//tNew[i] = 7;
}
else if(i%x == 0){ // left side
tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i] + tOld[i-x] + tOld[i+x] - 4*tOld[i]);
//tNew[i] = 8;
}
else if(i%x == x-1){ // right side
tNew[i] = tOld[i] + k*(tOld[i] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]);
//tNew[i] = 4;
}
else if(i - x < 0){ // top row
tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i] + tOld[i+x] - 4*tOld[i]);
//tNew[i] = 2;
}
else if(i + x > x*y){ // bottom row
tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i] - 4*tOld[i]);
//tNew[i] = 6;
}
else{
tNew[i] = tOld[i] + k*(tOld[i+1] + tOld[i-1] + tOld[i-x] + tOld[i+x] - 4*tOld[i]);
//tNew[i] = 9;
}
//tNew[i] = i; // for debugging
// replace heaters
if(tOrig[i] != st){
tNew[i] = tOrig[i];
}
//tNew[i] = i%x;
}
}
// thisll work for 3d, less if/elses this way
__global__ void gpuIt3(float *tNew,float *tOld,float *tOrig,int x,int y,int z,float k,float st) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < x*y*z){
if(i == 0){ // front upper left corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 0;
}
else if(i == x-1){ // front upper right corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .1;
}
else if(i == x*y-1){ // front lower right corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .2;
}
else if(i == x*y-x){ // front lower left corner
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .3;
}
else if(i == x*y*(z-1) ){ // back upper left corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .4;
}
else if(i == x*y*(z-1) + x-1){ // back upper right corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .5;
}
else if(i == x*y*z-1){ // back lower right corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .6;
}
else if(i == x*y*z - x){ // back lower left corner
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .7;
}
else if(i - x < 0){ // front top edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = .8;
}
else if(i%x == x-1 && i<x*y){ // front right edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = .9;
}
else if(i+x > x*y && i < (x*y)){ // front bottom edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1;
}
else if(i%x == 0 && i<x*y){ // front left edge
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 2;
}
else if(i > (x*y*z - x*y) && i < (x*y*z - (x-1)*y)){ // back top edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 3;
}
else if(i%x == x-1 && i > (x*y*(z-1))){ // back right edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 4;
}
else if(i+x > x*y*z){ // back bottom edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 5;
}
else if(i%x == 0 && i > x*y*(z-1)){ // back left edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 6;
}
// the corner sides going front to back
else if(i%(x*y) == 0){ // upper left edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 7;
}
else if(i%(x*y) == x-1){ // upper right edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 8;
}
else if(i%(x*y) == x*y-1){ // lower right edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 9;
}
else if(i%(x*y) == x*y-x){ // lower left edge
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 9.1;
}
// else ifs here are vague because other options already completed
else if(i < x*y){ // front face
tNew[i] = tOld[i]+k*(tOld[i]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.1;
}
else if(i > x*y*(z-1)){ // back face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.2;
}
else if(i%(x*y) < x){ // top face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.3;
}
else if(i%(x*y) > x*(y-1)){ // bottom face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.4;
}
else if(i%(x) == x-1){ // right face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i]-6*tOld[i]);
//tNew[i] = 1.5;
}
else if(i%(x) == 0){ // left face
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i]+tOld[i+1]-6*tOld[i]);
//tNew[i] = 1.6;
}
else{ // all in the middle
// front back top bottom left right
tNew[i] = tOld[i]+k*(tOld[i-(x*y)]+tOld[i+(x*y)]+tOld[i-x]+tOld[i+x]+tOld[i-1]+tOld[i+1]-6*tOld[i]);
}
//tNew[i] = i%(x*y);
// replace heaters
if(tOrig[i] != st){
tNew[i] = tOrig[i];
}
}
}
// blockIdx.x for blocks, threadIdx.x for threads
// name<<<blocks,threads per block>>>
//
// host is cpu
int main(int argc, char** argv) {
//cout<<argv[1]<<endl;
int dim;
ifstream inFile(argv[1]);
string line;
string dims;
//string heaters;
getline(inFile,line);
if(argv[1]==NULL){
cout<<"Error, no config file specified."<<endl;
}
// move to next nums
while(line[0] == '#' || line.length() == 1){getline(inFile,line);}
// gets 2d/3d as an int of 2 or 3
if(line[0] != '#' && line.length() != 1){
dim = line[0] - 48; // ascii starts at 48
//cout<<"dim "<<dim<<endl;
}
getline(inFile,line);
// k value
while(line[0] == '#' || line.length() == 1){getline(inFile,line);}
int delimPos = line.find('\n');
string kstring = line.substr(0,delimPos);
float K = strtof(&kstring[0],NULL);
//cout<<"K "<<K<<endl;
// timesteps
getline(inFile,line);
while(line[0] == '#' || line.length() == 1){getline(inFile,line);}
delimPos = line.find('\n');
string ts = line.substr(0,delimPos);
float TS = strtof(&ts[0],NULL);
//cout<<"TS "<<TS<<endl;
// dims
getline(inFile,line);
while(line[0] == '#' || line.length() == 1){getline(inFile,line);}
delimPos = line.find(',');
string xd = line.substr(0,delimPos);
float XD = strtof(&xd[0],NULL);
//cout<<"XD "<<XD<<endl;
float YD, ZD;
if(dim == 3){
int delimPos2 = line.find(',',delimPos+1);
string yd = line.substr(delimPos+1,delimPos2);
YD = strtof(&yd[0],NULL);
//cout<<"YD "<<YD<<endl;
string zd = line.substr(delimPos2+1,line.length());
ZD = strtof(&zd[0],NULL);
//cout<<"ZD "<<ZD<<endl;
//cout<<"d1 "<<delimPos<<" d2 "<<delimPos2<<endl;
}else if (dim == 2){ // 2d only needs y
string yd = line.substr(delimPos+1,line.length());
YD = strtof(&yd[0],NULL);
//cout<<"YD "<<YD<<endl;
ZD = 1;
}else{
if(argv[1]!=NULL){
cout<<"Config file read error"<<endl;
}
}
// starting temp
getline(inFile,line);
while(line[0] == '#' || line.length() == 1){getline(inFile,line);}
delimPos = line.find('\n');
string startstring = line.substr(0,delimPos);
float StTmp = strtof(&startstring[0],NULL);
//cout<<"StTmp "<<StTmp<<endl;
// heaters
int hDims[300];
float hTemps[75];
char*dup; // get around line being annoying
int count = 0;
while(getline(inFile,line)){
if(line[0] != '#' && line.length() > 1){
if(dim == 2){
dup = new char[line.size() + 1];
copy(line.begin(),line.end(),dup);
dup[line.size()] = '\0';
hDims[0+count*4] = atoi(strtok(dup,","));
//cout<<"this boi"<<hDims[0+count*4]<<endl;
for(int i = 1; i < 4; i++){
hDims[i+count*4] = atoi(strtok(NULL,","));
}
hTemps[count] = atof(strtok(NULL,","));
delete dup;
count++;
}else{ // for 3d
dup = new char[line.size() + 1];
copy(line.begin(),line.end(),dup);
dup[line.size()] = '\0';
hDims[0+count*6] = atoi(strtok(dup,","));
//cout<<"this boi"<<hDims[0+count*4]<<endl;
for(int i = 1; i < 6; i++){
hDims[i+count*6] = atoi(strtok(NULL,","));
}
hTemps[count] = atof(strtok(NULL,","));
delete dup;
count++;
}
}
}
//for(int i = 0; i < count*4; i++){
// cout<<hDims[i]<<endl;
//}
//cout<<"htemps 0 and 1 "<<hTemps[0]<<" "<<hTemps[1]<<endl;
// use count to figure out how many heaters there are
inFile.close();
// k can be from 0 to 1/(number of neighbors)
float k = K;
int xDim = XD;
int yDim = YD;
int zDim = ZD;
int timeSteps = TS;
int gSize = xDim*yDim*zDim; // -1 for zero indexing
float tOrig[gSize];
float tOld[gSize];
float tNew[gSize];
//cout<<"StTmp "<<StTmp<<endl;
// sets starting temp for nodes
for(int i = 0; i < gSize; i++){
tOrig[i] = StTmp + 0.0;
}
// place heaters
int hLoc; // as one coord
int hWidth,hHeight,hDepth;
if(zDim == 1){
for(int i = 0; i < count; i++){
hLoc = hDims[0 + 4*i] + hDims[1 + 4*i]*xDim;
hWidth = hDims[2 + 4*i];
hHeight = hDims[3 + 4*i];
for(int j = 0; j < hHeight; j++){
for(int k = 0; k < hWidth; k++){
//cout<<"k "<<k<<" j "<<j<<" hTemps[c] "<<hTemps[i]<<endl;
tOrig[hLoc+k + xDim*j] = hTemps[i];
//cout<<"tOrig[]"<<tOrig[hLoc+k + xDim*j]<<endl;
}
}
}
}if(zDim > 1){ // 3d
for(int i = 0; i < count; i++){
hLoc = hDims[0 + 6*i] + hDims[1 + 6*i]*xDim + hDims[2 + 6*i]*xDim*yDim;
hWidth = hDims[3 + 6*i];
hHeight = hDims[4 + 6*i];
hDepth = hDims[5 + 6*i];
for(int h = 0; h < hDepth; h++){
for(int j = 0; j < hHeight; j++){
for(int k = 0; k < hWidth; k++){
//cout<<"k "<<k<<" j "<<j<<" hTemps[c] "<<hTemps[i]<<endl;
tOrig[hLoc+k + xDim*j + xDim*yDim*h] = hTemps[i];
//cout<<"tOrig[]"<<tOrig[hLoc+k + xDim*j]<<endl;
}
}
}
}
}
memcpy(tNew,tOrig, sizeof(tOld));
memcpy(tOld,tNew, sizeof(tOld));
// gets block count with maxed threads
// each thread should handle one gridpoint at a time
float gS = gSize;
int BLOCKS = ceil(gS/1024);
//cout<<"BLOCKS "<<BLOCKS<<endl;
float *d_new, *d_old, *d_orig;// *d_temp;
cudaMalloc((void**)&d_new, gSize*sizeof(float)); // may need to be sizeof(gSize)
cudaMalloc((void**)&d_old, gSize*sizeof(float)); // but I don't think so
cudaMalloc((void**)&d_orig, gSize*sizeof(float));
cudaMemcpy(d_new, tNew, gSize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_old, tOld, gSize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_orig, tOrig, gSize*sizeof(float), cudaMemcpyHostToDevice);
// have to alternate calls, d_old = d_new doesn't work anymore for some reason
// cout<<"timeSteps/2 "<<timeSteps/2<<endl;
if(zDim == 1){
for(int i = 0; i < timeSteps; i++){
gpuIt<<<BLOCKS,1024>>>(d_new,d_old,d_orig,xDim,yDim,zDim,k,StTmp);
cudaDeviceSynchronize(); // blocks all CPU until GPU done, TA says correct
//cout<<"2d Call "<<i<<endl;
i++;
if(i < timeSteps){
gpuIt<<<BLOCKS,1024>>>(d_old,d_new,d_orig,xDim,yDim,zDim,k,StTmp);
cudaDeviceSynchronize();
//cout<<"2d Call "<<i<<endl;
}
}
}
if(zDim > 1){
for(int i = 0; i < timeSteps; i++){
gpuIt3<<<BLOCKS,1024>>>(d_new,d_old,d_orig,xDim,yDim,zDim,k,StTmp);
cudaDeviceSynchronize(); // blocks all CPU until GPU done, TA says correct
//cout<<"3d Call "<<i<<endl;
i++;
if(i < timeSteps){
gpuIt3<<<BLOCKS,1024>>>(d_old,d_new,d_orig,xDim,yDim,zDim,k,StTmp);
cudaDeviceSynchronize();
//cout<<"3d Call "<<i<<endl;
}
}
}
// since d_new and d_old alternate, get the most recent one
if(timeSteps%2 == 1){
cudaMemcpy(tNew, d_new, gSize*sizeof(float), cudaMemcpyDeviceToHost);
}else{
cudaMemcpy(tNew, d_old, gSize*sizeof(float), cudaMemcpyDeviceToHost);
}
// outputs to terminal for easy viewing
//for(int h = 0; h < zDim; h++){
// for(int i = 0; i < yDim; i++){
// for(int j = 0; j < xDim; j++){
// cout<<tNew[j + i*xDim + h*xDim*yDim]<<" ";
// }
// cout<<endl;
// }
// cout<<endl;
//}
ofstream outFile;
outFile.open("heatDoutput.csv");
for(int h = 0; h < zDim; h++){
for(int i = 0; i < yDim; i++){
for(int j = 0; j < xDim; j++){
outFile<<tNew[j + i*xDim + h*xDim*yDim]<<", ";
}
outFile<<endl;
}
outFile<<endl;
}
outFile.close();
cudaFree(d_new);
cudaFree(d_old);
cudaFree(d_orig);
}
|
21,610 | #include "includes.h"
__global__ void kernelGetOmega(const int N, double *omega, double *kSqr, const double sigma2, const double sigma4, const double lambda, const double g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
omega[i] = sqrt(1 + kSqr[i] + 3 * lambda * sigma2 + 15 * g * sigma4);
}
} |
21,611 |
/*******************************
1 - Install nvidia-cuda-toolkit
2 - Compile this program using:
nvcc add.cu -o add_cuda.out
*******************************/
/*
Program that runs block size dynamically. As the number of threads increase,
the number of blocks is determined as a function of threads and input size.
This provides a constant optimal performance even though the number of threads
change
*/
#include <iostream>
#include <math.h>
#include <ctime>
#include <cstdio>
//CUDA kernel to add elements of the matrix
// __global__ converts a function into a CUDA kernel
__global__
void add(int n, float *x, float *y)
{
// index of the current thread within the block
int index = blockIdx.x * blockDim.x + threadIdx.x;
// number of threads in a block
int stride = blockDim.x * gridDim.x;
// run each addition on a separate thread
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
for(int t = 32; t <= 1024; t+=32)
{
int N = 1<<24; // 2^24 elements
// Memory allocation in CUDA is done with cudaMallocManaged( , )
float *x; float *y;
cudaMallocManaged( &x, N*sizeof(float) );
cudaMallocManaged( &y, N*sizeof(float) );
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
std::clock_t start = clock();
// Launch the 'add' kernel, which invokes it in the GPU
int blockSize = t;
int numBlocks = (N + blockSize - 1) / blockSize;
std::cout << "BlockSize = " << t << ",NumBlocks = " << numBlocks << "\n";
add<<<numBlocks,blockSize>>>(N, x, y);
// Wait for the GPU to synchronize before accessign through host(CPU)
cudaDeviceSynchronize();
std::clock_t stop = clock();
int duration = 1000 * (stop - start) / (double)CLOCKS_PER_SEC;
//std::cout << "Running time using " << t << " threads = " << duration << "\n";
std::cout << duration << "\n";
// Check for errors (all values should be 3.0f)
/*float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
*/
// Deallocating memory using cudaFree()
cudaFree(x);
cudaFree(y);
}
return 0;
}
|
21,612 | /*
Copyright 2018 - The OPRECOMP Project Consortium, Alma Mater Studiorum
Università di Bologna. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#ifndef FLOAT
#define FLOAT double
#endif
#define KERNEL_RADIUS 8
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
#define DATA_W 4096
#define DATA_H 4096
const int DATA_SIZE = DATA_W * DATA_H * sizeof(FLOAT);
const int KERNEL_SIZE = KERNEL_W * sizeof(FLOAT);
__device__ __constant__ FLOAT d_Kernel[KERNEL_W];
__global__
void convolutionRowGPU(
FLOAT *d_Result,
FLOAT *d_Data,
int dataW,
int dataH,
int kernelR
){
int index=blockIdx.x * blockDim.x +threadIdx.x; // global thread id
if(index >= dataW*dataH) return;
int y = index/dataW;
int x = index - y*dataW;
int k, d;
FLOAT sum;
sum = FLOAT(0.0f);
for(k = -kernelR; k <= kernelR; k++){
d = x + k;
if(d >= 0 && d < dataW)
sum += d_Data[y * dataW + d] * d_Kernel[kernelR - k];
}
d_Result[y * dataW + x] = sum;
}
__global__
void convolutionColumnGPU(
FLOAT *d_Result,
FLOAT *d_Data,
int dataW,
int dataH,
int kernelR
){
int index=blockIdx.x * blockDim.x +threadIdx.x; // global thread id
if(index >= dataW*dataH) return;
int y = index/dataW;
int x = index - y*dataW;
int k, d;
FLOAT sum;
sum = FLOAT(0.0f);
for(k = -kernelR; k <= kernelR; k++){
d = y + k;
if(d >= 0 && d < dataH)
sum += d_Data[d * dataW + x] * d_Kernel[kernelR - k];
}
d_Result[y * dataW + x] = sum;
}
int main(int argc, char **argv){
int i;
FLOAT
*h_Kernel,
*h_DataA;
FLOAT
*d_DataA,
*d_DataB;
h_Kernel = (FLOAT *)malloc(KERNEL_SIZE);
h_DataA = (FLOAT *)malloc(DATA_SIZE);
cudaMalloc( (void **)&d_DataA, DATA_SIZE);
cudaMalloc( (void **)&d_DataB, DATA_SIZE);
FLOAT kernelSum = 0;
for(i = 0; i < KERNEL_W; i++){
FLOAT dist = (FLOAT)(i - KERNEL_RADIUS) / (FLOAT)KERNEL_RADIUS;
h_Kernel[i] = expf(- dist * dist / 2);
kernelSum += h_Kernel[i];
}
for(i = 0; i < KERNEL_W; i++)
h_Kernel[i] /= kernelSum;
srand(5497);
for(i = 0; i < DATA_W * DATA_H; i++)
h_DataA[i] = (FLOAT)rand() / (FLOAT)RAND_MAX;
cudaMemcpyToSymbol(d_Kernel, h_Kernel, KERNEL_SIZE);
cudaMemcpy(d_DataA, h_DataA, DATA_SIZE, cudaMemcpyHostToDevice);
int blockSize=256;
int numBlocks = ((DATA_W * DATA_H)+blockSize-1)/blockSize;
//for(i = 0; i < DATA_W * DATA_H; i++)
// printf("%.15f,", h_DataA[i]);
convolutionRowGPU<<<numBlocks, blockSize>>>(
d_DataB,
d_DataA,
DATA_W,
DATA_H,
KERNEL_RADIUS
);
//cudaMemcpy(h_DataA, d_DataB, DATA_SIZE, cudaMemcpyDeviceToHost);
//for(i = 0; i < DATA_W * DATA_H; i++)
// printf("%.15f,", h_DataA[i]);
convolutionColumnGPU<<<numBlocks, blockSize>>>(
d_DataA,
d_DataB,
DATA_W,
DATA_H,
KERNEL_RADIUS
);
cudaMemcpy(h_DataA, d_DataA, DATA_SIZE, cudaMemcpyDeviceToHost);
for(i = 0; i < DATA_W * DATA_H; i++)
printf("%.15f,", h_DataA[i]);
return 0;
}
|
21,613 | /*
* madd_gpu.cu -- Device code for matrix additon benchmark
*
* Michael McThrow
*/
#define get_element_index(i, j, cols) ((i) * (cols) + (j))
__global__ void madd_kernel(unsigned int *a, unsigned int *b, unsigned int *c,
unsigned int rows, unsigned int cols)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = get_element_index(row, col, cols);
c[index] = a[index] + b[index];
}
|
21,614 | // See CUDA BY EXAMPLE for a basic gpu info display thing :)
|
21,615 | #include "includes.h"
__global__ void createRaysOrthoKernel(float4* rays, int width, int height, float x0, float y0, float z, float dx, float dy, unsigned rayMask )
{
int rayx = threadIdx.x + blockIdx.x*blockDim.x;
int rayy = threadIdx.y + blockIdx.y*blockDim.y;
if( rayx >= width || rayy >= height )
return;
float tMinOrMask = 0.0f;
if( rayMask )
tMinOrMask = __int_as_float( rayMask );
int idx = rayx + rayy*width;
rays[2*idx+0] = make_float4( x0+rayx*dx, y0+rayy*dy, z, tMinOrMask ); // origin, tmin
rays[2*idx+1] = make_float4( 0, 0, 1, 1e34f ); // dir, tmax
} |
21,616 | #include "includes.h"
__global__ void cunn_LookupTable_accGradParametersKernel( float *input, float *indices, float *gradOutput, float *gradWeight, float *count, float defaultScale, long numel, long stride) {
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceeding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) {
do {
const int startFeature = threadIdx.x + blockIdx.y * blockDim.x;
const int weightRow = ((int) input[idx] - 1) * stride;
const int gradOutputRow = ((int) indices[idx] - 1) * stride;
const float scale = count ? defaultScale / count[idx] : defaultScale;
const int SZ = 4;
float gradient[SZ];
float weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride) {
gradient[ii] = gradOutput[gradOutputRow + featureDim];
weight[ii] = gradWeight[weightRow + featureDim];
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride) {
gradWeight[weightRow + featureDim] = weight[ii];
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
} |
21,617 | /**************************************************************************
* Unix-like crypt(3) Algorithm for Password Encryption
*
* File : crypt3.c
* Purpose : Provides crypt(3) functionality to ANSI C compilers
* without a need for the crypt library.
* Author : Michael Dipperstein
* Date : November 3, 1998
*
***************************************************************************
* The source in this file is heavily borrowed from the crypt3.c file
* found on several ftp sites on the Internet. The original source
* claimed to be BSD, but was not distributed with any BSD license or
* copyright claims. I am releasing the source that I have provided into
* public domain without any restrictions, warranties, or copyright
* claims of my own.
*
* The code below has been cleaned and compiles correctly under, gcc,
* lcc, and Borland's bcc C compilers. A bug involving the left and
* right halves of the encrypted data block in the widely published
* crypt3.c source has been fixed by this version. All implicit register
* declarations have been removed, because they generated suboptimal code.
* All constant data has been explicitly declared as const and all
* declarations have been given a minimal scope, because I'm paranoid.
*
* Caution: crypt() returns a pointer to static data. I left it this way
* to maintain backward compatibility. The downside is that
* successive calls will cause previous results to be lost.
* This can easily be changed with only minor modifications to
* the function crypt().
**************************************************************************/
/* Initial permutation */
__device__ static const char IP[] =
{
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
};
/* Final permutation, FP = IP^(-1) */
__device__ static const char FP[] = {
40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
};
/**************************************************************************
* Permuted-choice 1 from the key bits to yield C and D.
* Note that bits 8,16... are left out:
* They are intended for a parity check.
**************************************************************************/
__device__ static const char PC1_C[] =
{
57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
};
__device__ static const char PC1_D[] =
{
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4,
};
/* Sequence of shifts used for the key schedule. */
__device__ static const char shifts[] =
{1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1};
/**************************************************************************
* Permuted-choice 2, to pick out the bits from the CD array that generate
* the key schedule.
**************************************************************************/
__device__ static const char PC2_C[] =
{
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
};
__device__ static const char PC2_D[] =
{
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32,
};
/* The C and D arrays used to calculate the key schedule. */
__device__ static char C[28];
__device__ static char D[28];
/* The key schedule. Generated from the key. */
__device__ static char KS[16][48];
/* The E bit-selection table. */
__device__ static char E[48];
__device__ static const char e2[] =
{
32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1,
};
/**************************************************************************
* Function: setkey
*
* Description: Set up the key schedule from the encryption key.
*
* Inputs: char *key
* pointer to 64 character array. Each character represents a
* bit in the key.
*
* Returns: none
**************************************************************************/
__device__ void setkey(char *key)
{
int i, j, k, temp;
/**********************************************************************
* First, generate C and D by permuting the key. The low order bit of
* each 8-bit char is not used, so C and D are only 28 bits apiece.
**********************************************************************/
for(i = 0; i < 28; i++)
{
C[i] = key[PC1_C[i] - 1];
D[i] = key[PC1_D[i] - 1];
}
/**********************************************************************
* To generate Ki, rotate C and D according to schedule and pick up a
* permutation using PC2.
**********************************************************************/
for(i = 0; i < 16; i++)
{
/* rotate */
for(k = 0; k < shifts[i]; k++)
{
temp = C[0];
for(j = 0; j < 28 - 1; j++)
C[j] = C[j+1];
C[27] = temp;
temp = D[0];
for(j = 0; j < 28 - 1; j++)
D[j] = D[j+1];
D[27] = temp;
}
/* get Ki. Note C and D are concatenated */
for(j = 0; j < 24; j++)
{
KS[i][j] = C[PC2_C[j] - 1];
KS[i][j + 24] = D[PC2_D[j] - 28 -1];
}
}
/* load E with the initial E bit selections */
for(i=0; i < 48; i++)
E[i] = e2[i];
}
/**************************************************************************
* The 8 selection functions. For some reason, they give a 0-origin
* index, unlike everything else.
**************************************************************************/
__device__ static const char S[8][64] =
{
{
14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13
},
{
15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9
},
{
10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12
},
{
7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14
},
{
2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3
},
{
12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13
},
{
4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12
},
{
13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11
}
};
/**************************************************************************
* P is a permutation on the selected combination of the current L and key.
**************************************************************************/
__device__ static const char P[] =
{
16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25,
};
/* The combination of the key and the input, before selection. */
__device__ static char preS[48];
/**************************************************************************
* Function: encrypt
*
* Description: Uses DES to encrypt a 64 bit block of data. Requires
* setkey to be invoked with the encryption key before it may
* be used. The results of the encryption are stored in block.
*
* Inputs: char *block
* pointer to 64 character array. Each character represents a
* bit in the data block.
*
* Returns: none
**************************************************************************/
__device__ void encrypt(char *block)
{
int i, ii, temp, j, k;
char left[32], right[32]; /* block in two halves */
char old[32];
char f[32];
/* First, permute the bits in the input */
for(j = 0; j < 32; j++)
left[j] = block[IP[j] - 1];
for(;j < 64; j++)
right[j - 32] = block[IP[j] - 1];
/* Perform an encryption operation 16 times. */
for(ii= 0; ii < 16; ii++)
{
i = ii;
/* Save the right array, which will be the new left. */
for(j = 0; j < 32; j++)
old[j] = right[j];
/******************************************************************
* Expand right to 48 bits using the E selector and
* exclusive-or with the current key bits.
******************************************************************/
for(j =0 ; j < 48; j++)
preS[j] = right[E[j] - 1] ^ KS[i][j];
/******************************************************************
* The pre-select bits are now considered in 8 groups of 6 bits ea.
* The 8 selection functions map these 6-bit quantities into 4-bit
* quantities and the results are permuted to make an f(R, K).
* The indexing into the selection functions is peculiar;
* it could be simplified by rewriting the tables.
******************************************************************/
for(j = 0; j < 8; j++)
{
temp = 6 * j;
k = S[j][(preS[temp + 0] << 5) +
(preS[temp + 1] << 3) +
(preS[temp + 2] << 2) +
(preS[temp + 3] << 1) +
(preS[temp + 4] << 0) +
(preS[temp + 5] << 4)];
temp = 4 * j;
f[temp + 0] = (k >> 3) & 01;
f[temp + 1] = (k >> 2) & 01;
f[temp + 2] = (k >> 1) & 01;
f[temp + 3] = (k >> 0) & 01;
}
/******************************************************************
* The new right is left ^ f(R, K).
* The f here has to be permuted first, though.
******************************************************************/
for(j = 0; j < 32; j++)
right[j] = left[j] ^ f[P[j] - 1];
/* Finally, the new left (the original right) is copied back. */
for(j = 0; j < 32; j++)
left[j] = old[j];
}
/* The output left and right are reversed. */
for(j = 0; j < 32; j++)
{
temp = left[j];
left[j] = right[j];
right[j] = temp;
}
/* The final output gets the inverse permutation of the very original. */
for(j = 0; j < 64; j++)
{
i = FP[j];
if (i < 33)
block[j] = left[FP[j] - 1];
else
block[j] = right[FP[j] - 33];
}
}
/**************************************************************************
* Function: crypt
*
* Description: Clone of Unix crypt(3) function.
*
* Inputs: char *pw
* pointer to 8 character encryption key (user password)
* char *salt
* pointer to 2 character salt used to modify the DES results.
*
* Returns: Pointer to static array containing the salt concatenated
* on to the encrypted results. Same as stored in passwd file.
**************************************************************************/
__global__ void crypt(char* pw, char* salt, char* trip) {
int i, j, temp;
char c,
block[66]; /* 1st store key, then results */
char iobuf[16]; /* encrypted results */
for(i = 0; i < 66; i++)
block[i] = 0;
/* break pw into 64 bits */
for(i = 0, c = *pw; c && (i < 64); i++)
{
for(j = 0; j < 7; j++, i++)
block[i] = (c >> (6 - j)) & 01;
pw++;
c = *pw;
}
/* set key based on pw */
setkey(block);
for(i = 0; i < 66; i++)
block[i] = 0;
for(i = 0; i < 2; i++)
{
/* store salt at beginning of results */
c = *salt++;
iobuf[i] = c;
if(c > 'Z')
c -= 6;
if(c > '9')
c -= 7;
c -= '.';
/* use salt to effect the E-bit selection */
for(j = 0; j < 6; j++)
{
if((c >> j) & 01)
{
temp = E[6 * i + j];
E[6 * i +j] = E[6 * i + j + 24];
E[6 * i + j + 24] = temp;
}
}
}
/* call DES encryption 25 times using pw as key and initial data = 0 */
for(i = 0; i < 25; i++)
encrypt(block);
/* format encrypted block for standard crypt(3) output */
for(i=0; i < 11; i++)
{
c = 0;
for(j = 0; j < 6; j++)
{
c <<= 1;
c |= block[6 * i + j];
}
c += '.';
if(c > '9')
c += 7;
if(c > 'Z')
c += 6;
iobuf[i + 2] = c;
}
iobuf[i + 2] = '\0';
/* prevent premature NULL terminator */
if(iobuf[1] == '\0')
iobuf[1] = iobuf[0];
trip = iobuf;
return;
}
|
21,618 | /* Lab2Matrix.cu
*
* Created on: 29 Feb 2020
* Author: sc01716
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define BLOCK_SIZE 16
// Matrices are stored in row-major order
typedef struct {
int width;
int height;
float* elements;
} Matrix;
__global__ void MatrixMultKern(const Matrix A, const Matrix B, const Matrix C) {
// Calculate the column index of C and B
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Calculate the row index of C and of A
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ((row < A.height) && (col < B.width)) {
float Cvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < A.width; ++k) {
Cvalue += A.elements[row * A.width + k]
* B.elements[k * B.width + col];
}
C.elements[row * C.width + col] = Cvalue;
}
}
// Matrix multiplication -Host Code
//Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatrixMult(const Matrix h_A, const Matrix h_B, Matrix h_C) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Load A and B into device memory
Matrix d_A;
d_A.width = h_A.width;
d_A.height = h_A.height;
size_t size = h_A.width * h_A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, h_A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = h_B.width;
d_B.height = h_B.height;
size = h_B.width * h_B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, h_B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in Device memory
Matrix d_C;
d_C.width = h_C.width;
d_C.height = h_C.height;
size = h_C.width * h_C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke Kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(d_B.width / dimBlock.x, d_A.height / dimBlock.y);
cudaEventRecord(start);
MatrixMultKern<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaEventRecord(stop);
// Read C from Device to Host
cudaMemcpy(h_C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy h_C off device: %s\n", cudaGetErrorString(err));
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Elapsed time was: %f\n milliseconds", milliseconds);
// Free Device Memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main(int argc, char* argv[]) {
Matrix A, B, C;
// Read Dimensions of A and B
A.height = atoi(argv[1]);
A.width = atoi(argv[2]);
B.height = A.width;
B.width = atoi(argv[3]);
A.elements = (float*) malloc(A.width * A.height * sizeof(float));
B.elements = (float*) malloc(B.width * B.height * sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*) malloc(C.width * C.height * sizeof(float));
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
A.elements[i * A.width + j] = (float) (rand() % 3);
for (int i = 0; i < B.height; i++)
for (int j = 0; j < B.width; j++)
B.elements[i * B.width + j] = (float) (rand() % 2);
MatrixMult(A, B, C);
//printing full matrix
/*
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < A.width; j++)
printf("%f ", A.elements[i * A.width + j]);
printf("\n");
}
printf("\n");
for (int i = 0; i < B.height; i++) {
for (int j = 0; j < B.width; j++)
printf("%f ", B.elements[i * B.width + j]);
printf("\n");
}
printf("\n");
for (int i = 0; i < C.height; i++) {
for (int j = 0; j < C.width; j++)
printf("%f ", C.elements[i * C.width + j]);
printf("\n");
}
printf("\n");
*/
//printing only part of the matrix
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++)
printf("%f ", A.elements[i * A.width + j]);
printf("\n");
}
printf("\n");
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++)
printf("%f ", B.elements[i * B.width + j]);
printf("\n");
}
printf("\n");
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++)
printf("%f ", C.elements[i * C.width + j]);
printf("\n");
}
printf("\n");
printf("%d ",C.width);
return 0;
}
|
21,619 | #include <cstdio>
#include <cstdlib>
#include <vector>
bool check(int*vec, size_t n)
{
bool res = true;
for (size_t k = 0; k < n - 1; k++)
{
res = res & (vec[k] <= vec[k + 1]);
}
printf("%d\n", res);
return res;
}
__global__ void bucketSort(int *key, int *bucket, int *a, int *b, int n, int range)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
bucket[i] = 0;
for (int j=0; j<n; j++){
if(i==key[j])
{bucket[i]+=1;
}
__syncthreads();
a[i] = bucket[i];
b[i] = bucket[i];
__syncthreads();
}
for (int j = 1; j < range; j <<= 1)
{
b[i] = a[i];
__syncthreads();
if(i>=j)
a[i] += b[i-j];
__syncthreads();
}
printf(" - %d %d \n", a[i], i);
for (int j=0; bucket[i] > 0; bucket[i]--)
{
key[j + a[i - 1]] = i;
j++;
__syncthreads();
}
}
int main()
{
int n = 1000;
int range = 1024; // seem to be an upper limit for a single block
int *key, *a, *b, *bucket;
cudaMallocManaged(&key, n * sizeof(int));
cudaMallocManaged(&a, range * sizeof(int));
cudaMallocManaged(&b, range * sizeof(int));
cudaMallocManaged(&bucket, range * sizeof(int));
for (int i = 0; i < n; i++)
{
key[i] = rand() % range;
printf("%d ", key[i]);
}
printf("\n");
bucketSort<<<1, range>>>(key, bucket, a, b, n, range);
cudaDeviceSynchronize();
printf("\n");
for (int i = 0; i < n; i++)
{
printf("%d ", key[i]);
}
printf("\n");
check(key, n);
printf("\n");
cudaFree(a);
cudaFree(b);
cudaFree(bucket);
cudaFree(key);
}
|
21,620 | #include <iostream>
#include <vector>
#define NUM 512
class Double
{
public:
__device__
float operator()(float val)
{
return 2*val;
}
};
template<typename F>
__global__
void gpu_kernel(float * buf, F func)
{
int idx = threadIdx.x;
buf[idx] = func(buf[idx]);
}
void gpu_run(void)
{
std::cout << std::endl << "gpu" << std::endl;
std::vector<float> h_buf(NUM, 1);
float * d_buf;
cudaMalloc(&d_buf, NUM*sizeof(float));
cudaMemcpy(d_buf, h_buf.data(), NUM*sizeof(float), cudaMemcpyHostToDevice);
gpu_kernel<<<1, NUM>>>(d_buf, Double());
cudaMemcpy(h_buf.data(), d_buf, NUM*sizeof(float), cudaMemcpyDeviceToHost);
std::cout << " 0:" << h_buf[0] << std::endl;
std::cout << "511:" << h_buf[511] << std::endl;
cudaFree(d_buf);
}
int main()
{
gpu_run();
}
|
21,621 | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cuda_runtime_api.h>
namespace nvinfer1
{
namespace plugin
{
__global__ void generateVoxels_kernel(
int max_num_points,
float *points, unsigned int* points_size,
float min_x_range, float max_x_range,
float min_y_range, float max_y_range,
float min_z_range, float max_z_range,
float pillar_x_size, float pillar_y_size, float pillar_z_size,
int grid_y_size, int grid_x_size, int num_point_values,
int max_points_per_voxel,
unsigned int *mask, float *voxels)
{
int point_idx = blockIdx.x * blockDim.x + threadIdx.x;
int batch_idx = point_idx / max_num_points;
int point_idx_in_frame = point_idx % max_num_points;
if(point_idx_in_frame >= points_size[batch_idx]) return;
float px = points[num_point_values * point_idx];
float py = points[num_point_values * point_idx + 1];
float pz = points[num_point_values * point_idx + 2];
float pw = points[num_point_values * point_idx + 3];
float pt;
if (num_point_values == 5) {
pt = points[num_point_values * point_idx + 4];
}
if(px<min_x_range||px>=max_x_range
|| py<min_y_range||py>=max_y_range
|| pz<min_z_range||pz>=max_z_range) return;
int voxel_idx = floorf((px - min_x_range)/pillar_x_size);
int voxel_idy = floorf((py - min_y_range)/pillar_y_size);
unsigned int voxel_index = (batch_idx * grid_y_size + voxel_idy) * grid_x_size + voxel_idx;
unsigned int point_id = atomicAdd(&(mask[voxel_index]), 1);
if(point_id >= max_points_per_voxel) return;
float *address = voxels + (voxel_index*max_points_per_voxel + point_id)*num_point_values;
atomicExch(address+0, px);
atomicExch(address+1, py);
atomicExch(address+2, pz);
atomicExch(address+3, pw);
if (num_point_values == 5) {
atomicExch(address+4, pt);
}
}
__global__ void generateBaseFeatures_kernel(
int batch_size,
unsigned int *mask, float *voxels,
int grid_y_size, int grid_x_size,
unsigned int *pillar_num,
int max_pillar_num,
int max_points_per_voxel,
int num_point_values,
float *voxel_features,
unsigned int *voxel_num_points,
unsigned int *coords)
{
int voxel_id = blockIdx.x * blockDim.x + threadIdx.x;
int voxel_idx = voxel_id % grid_x_size;
int voxel_idy = (voxel_id / grid_x_size) % grid_y_size;
int batch_id = voxel_id / (grid_y_size * grid_x_size);
if (batch_id >= batch_size) return;
unsigned int count = mask[voxel_id];
if( !(count>0) ) return;
count = count<max_points_per_voxel?count:max_points_per_voxel;
int current_pillarId = 0;
current_pillarId = atomicAdd(pillar_num + batch_id, 1);
voxel_num_points[batch_id * grid_y_size * grid_x_size + current_pillarId] = count;
int4 coord = {0, 0, voxel_idy, voxel_idx};
((int4*)coords)[batch_id * max_pillar_num + current_pillarId] = coord;
for (int i=0; i<count; i++){
int inIndex = voxel_id*max_points_per_voxel + i;
int outIndex = (batch_id * grid_x_size * grid_y_size + current_pillarId)*max_points_per_voxel + i;
if (num_point_values == 4) {
((float4*)voxel_features)[outIndex] = ((float4*)voxels)[inIndex];
}
else if (num_point_values == 5) {
for(int k=0; k<5;k++)
voxel_features[5 * outIndex + k] = voxels[5 * inIndex + k];
}
}
}
void generateVoxels_launch(
int batch_size, int max_num_points,
float *points, unsigned int* points_size,
float min_x_range, float max_x_range,
float min_y_range, float max_y_range,
float min_z_range, float max_z_range,
float pillar_x_size, float pillar_y_size, float pillar_z_size,
int grid_y_size, int grid_x_size, int num_point_values,
int max_points_per_voxel,
unsigned int *mask, float *voxels,
cudaStream_t stream)
{
int threadNum = 256;
dim3 blocks((batch_size * max_num_points + threadNum - 1) / threadNum);
dim3 threads(threadNum);
generateVoxels_kernel<<<blocks, threads, 0, stream>>>
(max_num_points,
points, points_size,
min_x_range, max_x_range,
min_y_range, max_y_range,
min_z_range, max_z_range,
pillar_x_size, pillar_y_size, pillar_z_size,
grid_y_size, grid_x_size, num_point_values,
max_points_per_voxel,
mask, voxels);
}
void generateBaseFeatures_launch(
int batch_size,
unsigned int *mask, float *voxels,
int grid_y_size, int grid_x_size,
unsigned int *pillar_num,
int max_pillar_num,
int max_points_per_voxel,
int num_point_values,
float *voxel_features,
unsigned int *voxel_num_points,
unsigned int *coords,
cudaStream_t stream)
{
int blockSize = 1024;
dim3 threads(blockSize);
dim3 blocks((batch_size * grid_y_size * grid_x_size + blockSize - 1) / blockSize);
generateBaseFeatures_kernel<<<blocks, threads, 0, stream>>>
(
batch_size,
mask, voxels, grid_y_size, grid_x_size,
pillar_num,
max_pillar_num,
max_points_per_voxel,
num_point_values,
voxel_features,
voxel_num_points,
coords
);
}
__global__ void generateFeatures_kernel(
int batch_size,
int dense_pillar_num,
float* voxel_features,
unsigned int* voxel_num_points,
unsigned int* coords, unsigned int *params,
float voxel_x, float voxel_y, float voxel_z,
float range_min_x, float range_min_y, float range_min_z,
unsigned int voxel_features_size, unsigned int max_points,
unsigned int max_voxels,
float* features)
{
int warp_size = max_points;
int pillar_idx = blockIdx.x * 4 + threadIdx.x/warp_size;
int point_idx = threadIdx.x % warp_size;
// In case the actual number of points is less than warp_size
// E.g., warp_size=32, max_points=20
if (point_idx >= max_points) return;
int batch_idx = pillar_idx / max_voxels;
if (batch_idx >= batch_size) return;
int pillar_idx_in_frame = pillar_idx % max_voxels;
int dense_pillar_idx = pillar_idx_in_frame + dense_pillar_num * batch_idx;
int pillar_idx_inBlock = threadIdx.x/warp_size;
// Limit number of voxels to max_voxels
unsigned int num_pillars = params[batch_idx] > max_voxels ? max_voxels : params[batch_idx];
// Update max_voxel to actual number
if (pillar_idx_in_frame == 0 && point_idx == 0) {
params[batch_idx] = num_pillars;
}
if (pillar_idx_in_frame >= num_pillars) return;
//load src
__shared__ float pillarSM[4][64][5]; // up to 64 points per pillar
__shared__ float4 pillarSumSM[4]; //4*4
__shared__ int4 cordsSM[4]; //4*4
__shared__ int pointsNumSM[4]; //4
__shared__ float pillarOutSM[4][64][11]; // up to 11 features per point
if (point_idx == 0) {
pointsNumSM[pillar_idx_inBlock] = voxel_num_points[dense_pillar_idx];
cordsSM[pillar_idx_inBlock] = ((int4*)coords)[dense_pillar_idx];
pillarSumSM[pillar_idx_inBlock] = {0,0,0,0};
}
for(int k=0; k<5; k++) {
pillarSM[pillar_idx_inBlock][point_idx][k] = voxel_features[5 * (dense_pillar_idx*max_points + point_idx) + k];
}
__syncthreads();
//calculate sm
if (point_idx < pointsNumSM[pillar_idx_inBlock]) {
atomicAdd(&(pillarSumSM[pillar_idx_inBlock].x), pillarSM[pillar_idx_inBlock][point_idx][0]);
atomicAdd(&(pillarSumSM[pillar_idx_inBlock].y), pillarSM[pillar_idx_inBlock][point_idx][1]);
atomicAdd(&(pillarSumSM[pillar_idx_inBlock].z), pillarSM[pillar_idx_inBlock][point_idx][2]);
}
__syncthreads();
//feature-mean
float4 mean;
float validPoints = pointsNumSM[pillar_idx_inBlock];
mean.x = pillarSumSM[pillar_idx_inBlock].x / validPoints;
mean.y = pillarSumSM[pillar_idx_inBlock].y / validPoints;
mean.z = pillarSumSM[pillar_idx_inBlock].z / validPoints;
mean.x = pillarSM[pillar_idx_inBlock][point_idx][0] - mean.x;
mean.y = pillarSM[pillar_idx_inBlock][point_idx][1] - mean.y;
mean.z = pillarSM[pillar_idx_inBlock][point_idx][2] - mean.z;
//calculate offset
float x_offset = voxel_x / 2.0f + cordsSM[pillar_idx_inBlock].w * voxel_x + range_min_x;
float y_offset = voxel_y / 2.0f + cordsSM[pillar_idx_inBlock].z * voxel_y + range_min_y;
float z_offset = voxel_z / 2.0f + cordsSM[pillar_idx_inBlock].y * voxel_z + range_min_z;
//feature-offset
float4 center;
center.x = pillarSM[pillar_idx_inBlock][point_idx][0] - x_offset;
center.y = pillarSM[pillar_idx_inBlock][point_idx][1] - y_offset;
center.z = pillarSM[pillar_idx_inBlock][point_idx][2] - z_offset;
//store output
if (point_idx < pointsNumSM[pillar_idx_inBlock]) {
for(int k=0; k<5; k++)
pillarOutSM[pillar_idx_inBlock][point_idx][k] = pillarSM[pillar_idx_inBlock][point_idx][k];
pillarOutSM[pillar_idx_inBlock][point_idx][5] = mean.x;
pillarOutSM[pillar_idx_inBlock][point_idx][5 + 1] = mean.y;
pillarOutSM[pillar_idx_inBlock][point_idx][5 + 2] = mean.z;
pillarOutSM[pillar_idx_inBlock][point_idx][5 + 3] = center.x;
pillarOutSM[pillar_idx_inBlock][point_idx][5 + 4] = center.y;
if (5 + 5 < voxel_features_size)
pillarOutSM[pillar_idx_inBlock][point_idx][warp_size + 5] = center.z;
} else {
for (int k = 0; k < voxel_features_size; k++)
pillarOutSM[pillar_idx_inBlock][point_idx][k] = 0;
}
__syncthreads();
for(int i = 0; i < voxel_features_size; i ++) {
int outputSMId = pillar_idx_inBlock*64*11 + point_idx * 11 + i;
int outputId = pillar_idx*max_points*voxel_features_size + point_idx * voxel_features_size + i;
features[outputId] = ((float*)pillarOutSM)[outputSMId] ;
}
}
__global__ void generateFeatures_kernel_4x(
int batch_size,
int dense_pillar_num,
float* voxel_features,
unsigned int* voxel_num_points, unsigned int* coords,
unsigned int *params,
float voxel_x, float voxel_y, float voxel_z,
float range_min_x, float range_min_y, float range_min_z,
unsigned int voxel_features_size, unsigned int max_points,
unsigned int max_voxels,
float* features)
{
int warp_size = max_points;
int pillar_idx = blockIdx.x * 4 + threadIdx.x / warp_size;
int point_idx = threadIdx.x % warp_size;
// In case the actual number of points is less than warp_size
// E.g., warp_size=32, max_points=20
if (point_idx >= max_points) return;
int batch_idx = pillar_idx / max_voxels;
if (batch_idx >= batch_size) return;
int pillar_idx_in_frame = pillar_idx % max_voxels;
int dense_pillar_idx = pillar_idx_in_frame + dense_pillar_num * batch_idx;
int pillar_idx_inBlock = threadIdx.x / warp_size;
// Limit number of voxels to max_voxels
unsigned int num_pillars = params[batch_idx] > max_voxels ? max_voxels : params[batch_idx];
// Update max_voxel to actual number
if (pillar_idx_in_frame == 0 && point_idx == 0) {
params[batch_idx] = num_pillars;
}
if (pillar_idx_in_frame >= num_pillars) return;
//load src
__shared__ float4 pillarSM[4][64]; // up to 64 points per pillar
__shared__ float4 pillarSumSM[4]; //4*4
__shared__ int4 cordsSM[4]; //4*4
__shared__ int pointsNumSM[4]; //4
__shared__ float pillarOutSM[4][64][11]; // up to 11 output features per point
if (point_idx == 0) {
pointsNumSM[pillar_idx_inBlock] = voxel_num_points[dense_pillar_idx];
cordsSM[pillar_idx_inBlock] = ((int4*)coords)[pillar_idx];
pillarSumSM[pillar_idx_inBlock] = {0,0,0,0};
}
pillarSM[pillar_idx_inBlock][point_idx] = ((float4*)voxel_features)[dense_pillar_idx*max_points + point_idx];
__syncthreads();
//calculate sm
if (point_idx < pointsNumSM[pillar_idx_inBlock]) {
atomicAdd(&(pillarSumSM[pillar_idx_inBlock].x), pillarSM[pillar_idx_inBlock][point_idx].x);
atomicAdd(&(pillarSumSM[pillar_idx_inBlock].y), pillarSM[pillar_idx_inBlock][point_idx].y);
atomicAdd(&(pillarSumSM[pillar_idx_inBlock].z), pillarSM[pillar_idx_inBlock][point_idx].z);
}
__syncthreads();
//feature-mean
float4 mean;
float validPoints = pointsNumSM[pillar_idx_inBlock];
mean.x = pillarSumSM[pillar_idx_inBlock].x / validPoints;
mean.y = pillarSumSM[pillar_idx_inBlock].y / validPoints;
mean.z = pillarSumSM[pillar_idx_inBlock].z / validPoints;
mean.x = pillarSM[pillar_idx_inBlock][point_idx].x - mean.x;
mean.y = pillarSM[pillar_idx_inBlock][point_idx].y - mean.y;
mean.z = pillarSM[pillar_idx_inBlock][point_idx].z - mean.z;
//calculate offset
float x_offset = voxel_x / 2.0f + cordsSM[pillar_idx_inBlock].w * voxel_x + range_min_x;
float y_offset = voxel_y / 2.0f + cordsSM[pillar_idx_inBlock].z * voxel_y + range_min_y;
float z_offset = voxel_z / 2.0f + cordsSM[pillar_idx_inBlock].y * voxel_z + range_min_z;
//feature-offset
float4 center;
center.x = pillarSM[pillar_idx_inBlock][point_idx].x - x_offset;
center.y = pillarSM[pillar_idx_inBlock][point_idx].y - y_offset;
center.z = pillarSM[pillar_idx_inBlock][point_idx].z - z_offset;
//store output
if (point_idx < pointsNumSM[pillar_idx_inBlock]) {
pillarOutSM[pillar_idx_inBlock][point_idx][0] = pillarSM[pillar_idx_inBlock][point_idx].x;
pillarOutSM[pillar_idx_inBlock][point_idx][1] = pillarSM[pillar_idx_inBlock][point_idx].y;
pillarOutSM[pillar_idx_inBlock][point_idx][2] = pillarSM[pillar_idx_inBlock][point_idx].z;
pillarOutSM[pillar_idx_inBlock][point_idx][3] = pillarSM[pillar_idx_inBlock][point_idx].w;
pillarOutSM[pillar_idx_inBlock][point_idx][4] = mean.x;
pillarOutSM[pillar_idx_inBlock][point_idx][5] = mean.y;
pillarOutSM[pillar_idx_inBlock][point_idx][6] = mean.z;
pillarOutSM[pillar_idx_inBlock][point_idx][7] = center.x;
pillarOutSM[pillar_idx_inBlock][point_idx][8] = center.y;
pillarOutSM[pillar_idx_inBlock][point_idx][9] = center.z;
} else {
pillarOutSM[pillar_idx_inBlock][point_idx][0] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][1] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][2] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][3] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][4] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][5] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][6] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][7] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][8] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][9] = 0;
}
__syncthreads();
for(int i = 0; i < voxel_features_size; i ++) {
int outputSMId = pillar_idx_inBlock*64*11 + point_idx * 11 + i;
int outputId = pillar_idx*max_points*voxel_features_size + point_idx * voxel_features_size + i;
features[outputId] = ((float*)pillarOutSM)[outputSMId] ;
}
}
int generateFeatures_launch(
int batch_size,
int dense_pillar_num,
float* voxel_features,
unsigned int* voxel_num_points,
unsigned int* coords,
unsigned int *params,
float voxel_x, float voxel_y, float voxel_z,
float range_min_x, float range_min_y, float range_min_z,
unsigned int voxel_features_size, unsigned int max_points,
unsigned int max_voxels, unsigned int num_point_values,
float* features,
cudaStream_t stream)
{
unsigned int warp_size = max_points;
dim3 blocks((batch_size * max_voxels + 3) / 4);
dim3 threads(4*warp_size);
if (num_point_values == 4) {
generateFeatures_kernel_4x<<<blocks, threads, 0, stream>>>
(batch_size,
dense_pillar_num,
voxel_features,
voxel_num_points,
coords,
params,
voxel_x, voxel_y, voxel_z,
range_min_x, range_min_y, range_min_z,
voxel_features_size, max_points,
max_voxels,
features);
}
else {
generateFeatures_kernel<<<blocks, threads, 0, stream>>>
(batch_size,
dense_pillar_num,
voxel_features,
voxel_num_points,
coords,
params,
voxel_x, voxel_y, voxel_z,
range_min_x, range_min_y, range_min_z,
voxel_features_size, max_points,
max_voxels,
features);
}
auto err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
return err;
}
} // namespace plugin
} // namespace nvinfer1
|
21,622 | #include<bits/stdc++.h>
using namespace std;
bool sortcol( const int* v1,
const int* v2 ) {
/*if(v1[1]>v2[1]){
printf("Pakda gaya\n");
}*/
return v1[1] < v2[1];
}
void my_custom_sort(int * &input1,int size){
int **list1;
list1=new int*[size];
for(int i=0;i<size;i++){
list1[i]=new int[3];
list1[i][0]=input1[i*3+0];
list1[i][1]=input1[i*3+1];
list1[i][2]=input1[i*3+2];
}
sort(list1, list1+size,sortcol);
for(int i=0;i<size;i++){
input1[i*3+0]=list1[i][0];
input1[i*3+1]=list1[i][1];
input1[i*3+2]=list1[i][2];
}
for( int i = 0 ; i < size ; i++ )
{
delete[] list1[i]; // delete array within matrix
}
delete[] list1;
}
bool sortcol1( const int* v1,
const int* v2 ) {
return v1[0] < v2[0];
}
void my_custom_sort2(int * &input1,int size){
int **list1;
list1=new int*[size];
//printf("cs2\n");
for(int i=0;i<size;i++){
list1[i]=new int[3];
}
for(int i=0;i<size;i++){
list1[i][0]=input1[i*3+0];
list1[i][1]=input1[i*3+1];
list1[i][2]=input1[i*3+2];
}
sort(list1, list1+size,sortcol1);
//printf("cs2\n");
for(int i=0;i<size;i++){
input1[i*3+0]=list1[i][0];
input1[i*3+1]=list1[i][1];
input1[i*3+2]=list1[i][2];
}
for( int i = 0 ; i < size ; i++ )
{
delete[] list1[i]; // delete array within matrix
}
delete[] list1;
}
void transpose(int* input, int size){
for(int i=0;i<size;i++){
int tmp = input[i*3+0];
input[i*3+0] = input[i*3+1];
input[i*3+1] = tmp;
}
}
__global__ void transpose_parallel(int * input,int size){
int tx=(blockIdx.x*256)+threadIdx.x;
if((tx*3+1)<size){
int temp=input[tx*3+0];
input[tx*3+0] = input[tx*3+1];
input[tx*3+1] = temp;
}
}
void copy(int* dest, int* src, int size){
for(int i=0;i<size;i++)
dest[i] = src[i];
}
void insert(int* output, int row, int col, int val, int &size){
for(int i=0;i<size;i++)
if(output[i*3+0]==row && output[i*3+1]==col){
output[i*3+2]+=val;
return;
}
//output[size] = new int[3];
output[size*3+0] = row;
output[size*3+1] = col;
output[size*3+2] = val;
size++;
}
__global__ void oddeven(int* x,int I,int n)
{
int id=(blockIdx.x*256)+threadIdx.x;
if(I==0 && ((id*6+4)< n)){
if(x[id*6+1]>x[id*6+4]){
int X=x[id*6+1];
x[id*6+1]=x[id*6+4];
x[id*6+4]=X;
X=x[id*6];
x[id*6]=x[id*6+3];
x[id*6+3]=X;
X=x[id*6+2];
x[id*6+2]=x[id*6+5];
x[id*6+5]=X;
}
}
if(I==1 && ((id*6+7)< n)){
if(x[id*6+4]>x[id*6+7]){
int X=x[id*6+4];
x[id*6+4]=x[id*6+7];
x[id*6+7]=X;
X=x[id*6+3];
x[id*6+3]=x[id*6+6];
x[id*6+6]=X;
X=x[id*6+5];
x[id*6+5]=x[id*6+8];
x[id*6+8]=X;
}
}
}
__global__ void oddeven2(int* x,int I,int n)
{
int id=(blockIdx.x*256)+threadIdx.x;
if(I==0 && ((id*6+3)< n)){
if(x[id*6]>x[id*6+3]){
int X=x[id*6+1];
x[id*6+1]=x[id*6+4];
x[id*6+4]=X;
X=x[id*6];
x[id*6]=x[id*6+3];
x[id*6+3]=X;
X=x[id*6+2];
x[id*6+2]=x[id*6+5];
x[id*6+5]=X;
}
}
if(I==1 && ((id*6+6)< n)){
if(x[id*6+3]>x[id*6+6]){
int X=x[id*6+4];
x[id*6+4]=x[id*6+7];
x[id*6+7]=X;
X=x[id*6+3];
x[id*6+3]=x[id*6+6];
x[id*6+6]=X;
X=x[id*6+5];
x[id*6+5]=x[id*6+8];
x[id*6+8]=X;
}
}
}
void multiply2(int* &input1, int* &input2, int* &output,int &size1, int &size2, int &size3, int vertices){
int *list1_d,*list2_d;
//my_custom_sort2(input1, size1);
cudaMalloc(&list1_d,size1*3*sizeof(int));
cudaMemcpy(list1_d,input1,size1*3*sizeof(int),cudaMemcpyHostToDevice);
int gd1,gd2,bd1,bd2;
gd1=size1/(2*256);
gd1=gd1+1;
bd1=256;
gd2=size2/(2*256);
gd2=gd2+1;
bd2=256;
for(int i=0;i<size1;i++){
//int size=n/2;
oddeven2<<<gd1,bd1>>>(list1_d,i%2,size1*3);
}
//my_custom_sort2(input2, size2);
cudaMalloc(&list2_d,size2*3*sizeof(int));
cudaMemcpy(list2_d,input2,size2*3*sizeof(int),cudaMemcpyHostToDevice);
for(int i=0;i<size2;i++){
//int size=n/2;
oddeven2<<<gd2,bd2>>>(list2_d,i%2,size2*3);
/*cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));*/
}
cudaMemcpy(input1,list1_d,size1*3*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(input2,list2_d,size2*3*sizeof(int),cudaMemcpyDeviceToHost);
output = new int[vertices];
int outputSize=0;
int kOld=0;
int kNew=0;
for(int i=0;i<size1;i++){
int ans=0;
int row1= input1[i*3+0];
int col1= input1[i*3+1];
int j;
if(i!=0 && row1==input1[(i-1)*3+0])
j=kOld;
else{
j=kNew;
kOld = kNew;
}
while(j<size2){
int row2 = input2[j*3+0];
int col2 = input2[j*3+1];
if(row1==row2 && col1==col2){
insert(output, row1, row2, input1[i*3+2]*input2[j*3+2], outputSize);
j++;
}
else if(row1==row2 && col1!=col2)
j++;
else
break;
}
kNew=j;
}
size3 = outputSize;
}
void multiply1(int* &input1, int* &input2, int* &output,int* &list1_d ,int &size1, int &size2, int &size3, int vertices){
//transpose(input2, size2);
int bd,gd;
if(size1>256){
gd=size1/256;
gd=gd+1;
}
else{
gd=1;
bd=size1;
}
cudaMalloc(&list1_d,size1*3*sizeof(int));
cudaMemcpy(list1_d,input2,size1*3*sizeof(int),cudaMemcpyHostToDevice);
transpose_parallel<<< gd,bd >>> (list1_d,size1*3);
//cudaMemcpy(input2,list1_d,size1*3*sizeof(int),cudaMemcpyDeviceToHost);
//my_custom_sort(input2,size2);
gd=size1/(2*256);
gd=gd+1;
bd=256;
for(int i=0;i<size1;i++){
//int size=n/2;
oddeven<<<gd,bd>>>(list1_d,i%2,size1*3);
}
printf("Final Ans: ");
cudaMemcpy(input2,list1_d,size1*3*sizeof(int),cudaMemcpyDeviceToHost);
/*for(int i=0;i<size1;i++){
printf("%d\n",input2[i*3+1]);
}*/
//my_custom_sort(input2,size2);
copy(input1, input2,size1*3);
output = new int[3*vertices*vertices];
int outputSize=0;
int kOld=0;
int kNew=0;
for(int i=0;i<size1;i++){
int ans=0;
int row1= input1[i*3+0];
int col1= input1[i*3+1];
int j;
if(i!=0 && col1==input1[(i-1)*3+1])
j=kOld;
else{
j=kNew;
kOld = kNew;
}
while(j<size2){
int row2 = input2[j*3+0];
int col2 = input2[j*3+1];
if(col1==col2){
insert(output, row1, row2, input1[i*3+2]*input2[j*3+2], outputSize);
j++;
}
else
break;
}
kNew=j;
}
size3 = outputSize;
multiply2(output, input2, input1, size3, size2, size1, vertices);
}
int main(int argc, char** argv){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
ifstream infile(argv[1]);
int *list1,*list2,*list3,*list1_d;
int* degree;
int size1, size2, size3;
int a,b,vertices,edges,f=0, index1=0, index2=0;
while(infile>>a>>b){
if(f==0){
vertices=a;
edges=b;
list1=new int[6*edges];
list2=new int[6*edges];
degree=new int[vertices];
for(int i=0;i<vertices;i++)
degree[i] = 0;
f=1;
}
else{
list1[index1*3+0]=a;
list1[index1*3+1]=b;
list1[index1*3+2]=1;
index1++;
list1[index1*3+0]=b;
list1[index1*3+1]=a;
list1[index1*3+2]=1;
index1++;
list2[index2*3+0]=a;
list2[index2*3+1]=b;
list2[index2*3+2]=1;
index2++;
list2[index2*3+0]=b;
list2[index2*3+1]=a;
list2[index2*3+2]=1;
index2++;
degree[a]++;
degree[b]++;
}
}
size1= size2=2*edges;
//auto start = high_resolution_clock::now();
float milliseconds;
cudaEventRecord(start);
multiply1(list1,list2, list3,list1_d, size1, size2, size3, vertices);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds,start,stop);
//multiply2(list3,list2, list1, size3, size2, size1, vertices);
//auto stop = high_resolution_clock::now();
//auto duration = duration_cast<milliseconds>(stop - start);
float cc=0;
for(int i=0;i<size1;i++){
if(list1[i*3+0]==list1[i*3+1]){
if(degree[list1[i*3+0]]>=2){
cc=cc+((float(list1[i*3+2]/2))/((degree[list1[i*3+0]]*(degree[list1[i*3+0]]-1))/2));
}
}
}
//printf("sdfg\n");
cc=cc/vertices;
printf("%f\n",cc);
printf("%f\n",milliseconds);
//cout << duration.count() << endl;
return 0;
} |
21,623 |
#include<stdio.h>
#include <cuda.h>
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = rand()%20000;
}
void add_array(int* a, int N)
{
for (int i = 0; i < N; ++i)
a[i] =i;
}
__global__ void binary_search(int* a, int* b, bool* c, int sizeofa) //kernal function
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
printf(" %d\n", index);
int key = b[index];
int min = 0, max = sizeofa;
int mid = sizeofa / 2;
while (min != mid)
{
if (key == a[mid])
{
break;
}
else if (key < a[mid])
{
min = min;
max = mid;
}
else {
min = mid;
max = max;
}
mid = (min + max) / 2;
}
if (key == a[mid])
c[index] = true;
else
c[index] = false;
printf(" %d %d %d %d\n", index, key, a[mid],c[index]);
}
int main()
{
int N = 10000; //size of given array
int M = 1000; //Number of searching element
size_t size = N * sizeof(int);
size_t size2 = M * sizeof(int);
//allocate memory for host array
int* vector1 = (int*)malloc(size);
int* vector2 = (int*)malloc(size2);
bool* vector3 = (bool*)malloc(M * sizeof(bool));
//insert number into array
add_array(vector1,N);
//insert random elements to search
random_ints(vector2,M);
//create device array pointer
int *d_vector1;
int *d_vector2;
bool *d_vector3;
//allocate device memory for vector
cudaMalloc(& d_vector1, size);
cudaMalloc(& d_vector2, size2);
cudaMalloc(& d_vector3, M*sizeof(bool));
//copy vectors from host memory to dvice memory
cudaMemcpy(d_vector1,vector1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_vector2,vector2, size2, cudaMemcpyHostToDevice);
//call kernal
binary_search<<<M,1>>>(d_vector1, d_vector2, d_vector3,N);
cudaMemcpy(vector3,d_vector3, M * sizeof(bool), cudaMemcpyDeviceToHost);
for (int i = 0; i < M; i++)
{
if(vector3[i]==true)
printf("%d is present in array\n",vector2[i]);
else if(vector3[i] == 0)
printf("%d is not present in array\n", vector2[i]);
}
}
|
21,624 | #include <iostream>
#include <cuda_runtime.h>
#define DATA_SIZE 1048576
#define THREAD_NUM 256
#define BLOCK_NUM 32
bool initCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count == 0){
std::cout<<"There is no device."<<std::endl;
return false;
}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){
if(prop.major>=1) break;
}
}
if(i==count){
std::cout<<"There is no device supporting CUDA 1.x/n"<<std::endl;
return false;
}
cudaSetDevice(i);
return true;
}
void generateNumbers(int* number, int size)
{
for(int i=0;i<size;i++){
number[i] = rand() % 10;
}
}
__global__
static void sumOfSquares(int* num, int* result, clock_t* time)
{
extern __shared__ int shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int offset = 1, mask = 1;
//const int size = DATA_SIZE/THREAD_NUM;
if(tid == 0) time[bid] = clock();
shared[tid] = 0;
for(int i=bid*THREAD_NUM+tid;i<DATA_SIZE;i+=THREAD_NUM*BLOCK_NUM){
shared[tid] += num[i]*num[i];
}
__syncthreads();
while(offset<THREAD_NUM){
if((tid & mask) == 0){
shared[tid] += shared[tid+offset];
}
offset += offset;
mask = offset+mask;
__syncthreads();
}
if(tid == 0){
time[bid+BLOCK_NUM] = clock();
result[bid] = shared[0];
}
}
int sumOfSquared(int* data){
int sum = 0;
for(int i=0;i<DATA_SIZE;i++){
sum += data[i]*data[i];
}
return sum;
}
int main()
{
if(!initCUDA()) return 0;
std::cout<<"CUDA initialized"<<std::endl;
int data[DATA_SIZE];
generateNumbers(data, DATA_SIZE);
int* gpudata, *result;
clock_t* time;
cudaMalloc((void**)&gpudata, sizeof(int)*DATA_SIZE);
cudaMalloc((void**)&result, sizeof(int)*BLOCK_NUM);
cudaMalloc((void**)&time, sizeof(clock_t)*BLOCK_NUM*2);
cudaMemcpy(gpudata, data, sizeof(int)*DATA_SIZE, cudaMemcpyHostToDevice);
sumOfSquares<<<BLOCK_NUM,THREAD_NUM,THREAD_NUM*sizeof(int)>>>(gpudata, result, time);
int sum[THREAD_NUM*BLOCK_NUM];
clock_t time_used[BLOCK_NUM*2];
cudaMemcpy(sum, result, sizeof(int)*BLOCK_NUM, cudaMemcpyDeviceToHost);
cudaMemcpy(&time_used, time, sizeof(clock_t)*BLOCK_NUM*2, cudaMemcpyDeviceToHost);
cudaFree(gpudata);
cudaFree(result);
cudaFree(time);
int final_sum = 0;
for(int i=0;i<BLOCK_NUM;i++){
final_sum += sum[i];
}
clock_t timeUsed;
for(int i=0;i<BLOCK_NUM;i++){
timeUsed += (time_used[i+BLOCK_NUM]-time_used[i]);
}
std::cout<<"sum (GPU): "<<final_sum<<"; time:"<<timeUsed<<std::endl;
clock_t start = clock();
int sumCpu = sumOfSquared(data);
clock_t usedTime = clock() - start;
std::cout<<"sum (CPU): "<<sumCpu<<"; time:"<<usedTime<<std::endl;
return 0;
} |
21,625 | /**************************************
***************************************
* Code Can be compiled using --> nvcc kernel5.cu -lcurand if the cuRand lib is the envirement PATH
* else use nvcc kernel5.cu -L</path/to/the/lib> -lcurand
***************************************
**************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math.h>
__global__ void MC_test(unsigned int seed,curandState *states,unsigned int numsim,unsigned int *results)
{
extern __shared__ int sdata[];
int i;
int nthreads = gridDim.x * blockDim.x;
unsigned int innerpoint=0;
int tx=threadIdx.x;
int idx = blockIdx.x * blockDim.x + tx;
curandState *state =states + idx;
float x,y,l2norm2;
sdata[tx]=0;
__syncthreads();
curand_init(seed, tx, 0, state);
__syncthreads();
for(i=tx;i<numsim;i+=nthreads){
x = curand_uniform(state);
y = curand_uniform(state);
l2norm2 = x * x + y * y;
if (l2norm2 < static_cast<float>(1))
{
innerpoint++;;
}
}
__syncthreads();
sdata[tx]=innerpoint;
__syncthreads();
//-------reduction
for (unsigned int s=blockDim.x/2;s>0;s>>=1){
if(tx < s){
sdata[tx]=sdata[tx]+sdata[tx+s];
}
}
//-----------------
__syncthreads();
if(tx==0){
results[blockIdx.x]=sdata[0];
}
}
void caller(unsigned int numsim){
static curandState *states=NULL;
unsigned int *results;
unsigned int seed=rand();
float pi=0;
float r_pi= 3.14159265358979323846;
dim3 block;
dim3 grid;
block.x=1<<10;
grid.x=2;//=(numsim +block.x -1)/block.x; //ceil((float)numsim/(float)(block.x));
printf(" \n grid %d block %d ",grid.x,block.x);
cudaMallocManaged(&states,sizeof(curandState)*block.x * grid.x);
cudaMallocManaged(&results,2*sizeof(unsigned int));
results[0]=0;
results[1]=0;
MC_test<<<grid , block, block.x*sizeof(unsigned int)>>>(seed,states,numsim,results);
cudaDeviceSynchronize();
pi=4*(float)(results[0]+results[1])/(float)(numsim);
printf(":: sims= %d, MC_pi= %f , error= %f \t",numsim,pi,abs(pi-r_pi));
cudaFree(states);
}
int main(){
unsigned int N=50;
for (int i=1; i < N ;i++){
caller(1<<i);
}
printf("\n");
return 0;
}
|
21,626 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_gid_calculation_2d_2d(int *data) {
int tid = blockDim.x * threadIdx.y + threadIdx.x;
int num_threads_in_a_block = blockDim.x * blockDim.y;
int block_offset = blockIdx.x * num_threads_in_a_block;
int num_threads_in_a_row = num_threads_in_a_block * gridDim.x;
int row_offset = blockIdx.y * num_threads_in_a_row;
int gid = tid + block_offset + row_offset;
printf("blockIdx.x: %d, blockIdx.y: %d, threadIdx.x: %d, gid: %d, - data: "
"%d \n",
blockIdx.x, blockIdx.y, tid, gid, data[gid]);
}
int main() {
int array_size = 16;
int array_byte_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33, 87, 45, 23, 12, 342, 56, 44, 99};
for (int i = 0; i < array_size; i++) {
printf("%d ", h_data[i]);
}
printf("\n \n");
int *d_data;
cudaMalloc((void **)&d_data, array_byte_size);
cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
dim3 block(2, 2);
dim3 grid(2, 2);
unique_gid_calculation_2d_2d<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
21,627 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<time.h>
#include <stdio.h>
#include <stdlib.h>
#define max(a,b) (((a)>(b))?(a):(b))
//Costos de la funcion SW
#define indel -1
#define match 2
#define mismatch -1
#define TILE_WIDTH 8
#define NUM_THREADS 32
const int arraySize = 65536;
#define Width arraySize+1
int SIZE = arraySize+1 * arraySize+1;
int SIZED = SIZE * sizeof(int);
cudaError_t SWHelper(int (*c)[arraySize+1], const char *a, const char *b, size_t size);
cudaError_t SWHelperL(int* c, const char *a, const char *b, size_t size);
__global__ void SmithWKernelExpand(int (*c)[arraySize+1], const char *a, const char *b, const int *k)
{
int i = threadIdx.x+1;
int j = ((*k)-i)+1;
int north=c[i][(j)-1]+indel; //Indel
int west=c[i-1][j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i-1][(j)-1]+match; //Match
else
northwest=c[i-1][(j)-1]+mismatch; //Mismatch
c[i][j] = max(max(north, west),max(northwest,0));
}
__global__ void SmithWKernelShrink(int (*c)[arraySize+1], const char *a, const char *b, const int *k)
{
int i = threadIdx.x+((*k)-arraySize)+1;
int j = ((*k)-i)+1;
int north=c[i][(j)-1]+indel; //Indel
int west=c[i-1][j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i-1][(j)-1]+match; //Match
else
northwest=c[i-1][(j)-1]+mismatch; //Mismatch
c[i][j] = max(max(north, west),max(northwest,0));
}
__global__ void SmithWKernelExpandL(int *c, const char *a, const char *b, const int *k){
int i = threadIdx.x+1;
int j = ((*k)-i)+1;
int north=c[i*(arraySize+1)+(j)-1]+indel; //Indel
int west=c[i*(arraySize+1)-1+j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i*(arraySize+1)-1+(j)-1]+match; //Match
else
northwest=c[i-1+(j)-1]+mismatch; //Mismatch
c[i*(arraySize+1)+j] = max(max(north, west),max(northwest,0));
}
__global__ void SmithWKernelShrinkL(int *c, const char *a, const char *b, const int *k)
{
int i = threadIdx.x+((*k)-arraySize)+1;
int j = ((*k)-i)+1;
int north=c[i*(arraySize+1)+(j)-1]+indel; //Indel
int west=c[i*(arraySize+1)-1+j]+indel;
int northwest;
if (((int) a[i-1])==((int)b[(j)-1]))
northwest=c[i*(arraySize+1)-1+(j)-1]+match; //Match
else
northwest=c[i*(arraySize+1)-1+(j)-1]+mismatch; //Mismatch
c[i*(arraySize+1)+j] = max(max(north, west),max(northwest,0));
}
void print(int c[arraySize+1][arraySize+1]){
int j=0,i=0;
for (i = 0; i < arraySize+1; i++) {
for (j = 0; j < arraySize+1; j++) {
printf("%d \t", c[i][j]);
}
printf("\n");
}
}
void printL(int *c){
int j=0,i=0;
for (i = 0; i < arraySize+1; i++) {
for (j = 0; j < arraySize+1; j++) {
printf("%d \t", c[i*arraySize+1 + j]);
}
printf("\n");
}
}
//matriz de entrada, i y j (salida) posicion de mayor valor
__global__ void MaximosTiled(int *c,int &i,int &j)
{
__shared__ int sub_matriz[TILE_WIDTH][TILE_WIDTH];
//__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int max_local = 0;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
//Row*Width + ph*TILE_WIDTH + tx
for (int ph = 0; ph < Width/TILE_WIDTH; ++ph) {
//Cargando los datos en las submatrices puestas en memoria compartida
if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width && ((ph*TILE_WIDTH+ty)<Width && Col<Width))
sub_matriz[ty][tx] = c[Row*Width + ph*TILE_WIDTH + tx];
__syncthreads();
//Multiplicando las submatrices
if(max_local < sub_matriz[tx][ty]){
max_local = sub_matriz[tx][ty];
i=Row*Width;
j=ph*TILE_WIDTH + tx;
}
__syncthreads();
}
}
void traceback_tiled(int *c, char a[], char b[]){
int j=0,i=0;
int maxi=0,maxj=0,max=0;
int *c_d;
//Separando memoria para la matriz en el device
cudaMalloc((void **)&c_d, SIZED);
//Cargando la data c en el device
cudaMemcpy(c_d, c, SIZED, cudaMemcpyHostToDevice);
//Definiendo
//Numero de threads por bloque
dim3 threadsPerBlock(32,32);
//Numero de bloques por SM
dim3 blocksPerGrid(800,800);
threadsPerBlock.x = NUM_THREADS;
threadsPerBlock.y = NUM_THREADS;
blocksPerGrid.x = ceil(double(Width) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(Width) / double(threadsPerBlock.y));
MaximosTiled <<<blocksPerGrid, threadsPerBlock>>>(c_d,maxi,maxj);
cudaFree(c_d);
i=maxi;
j=maxj;
printf("The optimal local alignment starts at index %d for a, and index %d for b.\n", i,j);
while (c[i*Width+j]!=0 && i>=0 && j>=0 ){
printf("\n");
if (c[i*Width+j]==c[(i-1)*Width+(j)-1]+match){ //From match
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i*Width+j]==c[i-1*Width+(j)-1]+mismatch){ //From mismatch
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i*Width+j]==c[i*Width+(j)-1]+indel){ //North
j--;
printf("- -- %c", b[j]);
}
else{ //Else has to be from West
i--;
printf("%c -- -", a[i]);
}
}
printf("\n\nThe optimal local alignment ends at index %d for a, and index %d for b.\n", i,j);
}
void traceback(int c[arraySize+1][arraySize+1], char a[], char b[]){
int j=0,i=0;
int maxi=0,maxj=0,max=0;
for (i = 0; i < arraySize+1; i++) {
for (j = 0; j < arraySize+1; j++) {
if(c[i][j]>max){
maxi=i;
maxj=j;
max=c[i][j];
}
}
}
i=maxi;
j=maxj;
printf("The optimal local alignment starts at index %d for a, and index %d for b.\n", i,j);
while (c[i][j]!=0 && i>=0 && j>=0 ){
printf("\n");
if (c[i][j]==c[i-1][(j)-1]+match){ //From match
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i][j]==c[i-1][(j)-1]+mismatch){ //From mismatch
i--;
j--;
printf("%c -- %c", a[i], b[j]);
}
else if (c[i][j]==c[i][(j)-1]+indel){ //North
j--;
printf("- -- %c", b[j]);
}
else{ //Else has to be from West
i--;
printf("%c -- -", a[i]);
}
}
printf("\n\nThe optimal local alignment ends at index %d for a, and index %d for b.\n", i,j);
}
int main()
{
char b[arraySize];
char a[arraySize];
int i=0;
//Generar las secuencias
srand (time(NULL));
printf("\nString a is: ");
for(i=0;i<arraySize;i++)
{
int gen1=rand()%4;
switch(gen1)
{
case 0:a[i]='a';
break;
case 1: a[i]='c';
break;
case 2: a[i]='g';
break;
case 3: a[i]='t';
}
//a[i]='a';
printf("%c ", a[i]);
}
printf("\nString b is: ");
for(i=0;i<arraySize;i++)
{
int gen1=rand()%4;
switch(gen1)
{
case 0:b[i]='a';
break;
case 1: b[i]='c';
break;
case 2: b[i]='g';
break;
case 3: b[i]='t';
}
//b[i]='a';
printf("%c ", b[i]);
}
printf("\nOkay, generated the string \n");
int c[arraySize+1][arraySize+1] = { {0} };
int *h_c = (int *)malloc(SIZED);
clock_t start=clock();
// Run the SW Helper function
cudaError_t cudaStatus = SWHelperL(h_c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "SWHelper failed!");
return 1;
}
clock_t end=clock();
print(c);
//Imprimiendo la matrix final resultado
//print(c);
// LLamando al cudadevice
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
traceback_tiled(h_c,a,b);
printf("\n\nEnter any number to exit.");
printf("\n\nTotal time taken is %f seconds\n",(double)(end-start)/CLOCKS_PER_SEC);
int x;
scanf("%d", &x);
return 0;
}
// Funcion de soporte para SW
cudaError_t SWHelper(int (*c)[arraySize+1], const char *a, const char *b, size_t size)
{
char *dev_a;
char *dev_b;
int (*dev_c)[arraySize+1] = {0};
int (*j)=0;
int *dev_j;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, (size+1) * (size+1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
// goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_j, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
cudaStatus = cudaMemcpy(dev_j, &j, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c, (size+1) * (size+1) * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
int i=0;
clock_t start1=clock();
// Launch a kernel on the GPU with one thread for each element.
//Expanding Phase
for (i=1; i<size+1; i++){
cudaStatus = cudaMemcpy(dev_j, &i, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!", cudaStatus);
//goto Error;
}
SmithWKernelExpand<<<1, i>>>(dev_c, dev_a, dev_b, dev_j);
}
//Shrink Phase
for (int k=size-1; k>0; k--, i++){
cudaStatus = cudaMemcpy(dev_j, &i, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
SmithWKernelShrink<<<1, k>>>(dev_c, dev_a, dev_b, dev_j);
}
clock_t end1=clock();
printf("\n\nKernel Time taken is %f seconds\n",(double)(end1-start1)/CLOCKS_PER_SEC);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching SmithWKernel!\n", cudaStatus);
// goto Error;
}
// Copy output vector from GPU buffer to host memory.
//cudaStatus = cudaMemcpy2D(c,size * size * sizeof(int),dev_c,size * size * sizeof(int),size * size * sizeof(int),size * size * sizeof(int),cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(c, dev_c, (size+1) * (size+1) * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
}
//Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
cudaError_t SWHelperL(int* c, const char *a, const char *b, size_t size)
{
char *dev_a;
char *dev_b;
int (*dev_c);
int (*j)=0;
int *dev_j;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, (size+1) * (size+1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
// goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_j, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
//goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
cudaStatus = cudaMemcpy(dev_j, &j, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c, (size+1) * (size+1) * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
int i=0;
clock_t start1=clock();
// Launch a kernel on the GPU with one thread for each element.
//Expanding Phase
for (i=1; i<size+1; i++){
cudaStatus = cudaMemcpy(dev_j, &i, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!", cudaStatus);
//goto Error;
}
SmithWKernelExpandL<<<1, i>>>(dev_c, dev_a, dev_b, dev_j);
}
//Shrink Phase
for (int k=size-1; k>0; k--, i++){
cudaStatus = cudaMemcpy(dev_j, &i, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
SmithWKernelShrinkL<<<1, k>>>(dev_c, dev_a, dev_b, dev_j);
}
clock_t end1=clock();
printf("\n\nKernel Time taken is %f seconds\n",(double)(end1-start1)/CLOCKS_PER_SEC);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching SmithWKernel!\n", cudaStatus);
// goto Error;
}
// Copy output vector from GPU buffer to host memory.
//cudaStatus = cudaMemcpy2D(c,size * size * sizeof(int),dev_c,size * size * sizeof(int),size * size * sizeof(int),size * size * sizeof(int),cudaMemcpyDeviceToHost);
cudaStatus = cudaMemcpy(c, dev_c, (size+1) * (size+1) * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
}
//Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
21,628 | // Exemplo para o curso de Super Computacao
// Criado por: Luciano P. Soares (10 de Abril de 2018)
#include <stdio.h>
#include <stdlib.h>
//#include <cuda.h>
//#include <cuda_runtime.h>
/* Informacoes da GPU */
int main() {
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("Numero de devices (GPU) = %d\n\n", dev_count );
cudaDeviceProp dev_prop;
for (int i = 0; i < dev_count; i++) {
printf("\tDevice (%d)\n", i);
cudaGetDeviceProperties(&dev_prop, i);
printf("\t\tNumero maximo de Bloco\n");
printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxGridSize[0],dev_prop.maxGridSize[1],dev_prop.maxGridSize[2] );
printf("\t\tNumero maximo de Threads por Bloco = %d\n", dev_prop.maxThreadsPerBlock );
printf("\t\t\t Dimensao maxima em x = %d, y = %d, z = %d\n", dev_prop.maxThreadsDim[0],dev_prop.maxThreadsDim[1],dev_prop.maxThreadsDim[2] );
printf("\t\tNumero maximo de Streaming Multiprocessors (SMs) = %d\n", dev_prop.multiProcessorCount );
printf("\t\tFrequencia de Clock = %d\n", dev_prop.clockRate );
printf("\t\tTamanho do Warp = %d\n", dev_prop.warpSize );
}
return 0;
}
|
21,629 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#include <iomanip>
#include <stdio.h>
using namespace std;
struct Node{
int key;
int nextIdx;
int nextLevel;
};
__global__ void assign(Node *sl, Node *data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = data[i].key;
sl[k - 1].key = k;
sl[k - 1].nextIdx = k;
}
__global__ void connect(Node *sl, int N)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int level = 1;
for (int i = 2; i < N+1; i = i * 2)
{
if (id%i == 0)
{
int newid = id + level*N;
sl[newid].key = sl[id].key;
sl[newid].nextIdx = newid + i;
sl[newid].nextLevel = newid - N;
}
level++;
}
}
struct timespec diff(timespec start, timespec end)
{
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
return temp;
}
void print(Node *sl, int N, int MAX_LEVEL)
{
cout << "Index:" << endl;
for (int i = 0; i<N*MAX_LEVEL; i++)
{
cout << setw(4) << sl[i].key;
if ((i + 1) % N == 0)
cout << endl;
}
printf("NextIndex:\n");
for (int i = 0; i<N*MAX_LEVEL; i++)
{
cout << setw(4) << sl[i].nextIdx;
if ((i + 1) % N == 0)
cout << endl;
}
printf("NextLevel:\n");
for (int i = 0; i<N*MAX_LEVEL; i++)
{
cout << setw(4) << sl[i].nextLevel;
if ((i + 1) % N == 0)
cout << endl;
}
}
int main()
{
int N=1024*1024, MAX_LEVEL=21;
Node *sl, *data;
double time_used, sum=0;
cudaError_t err = cudaSuccess;
struct timespec start, end, temp;
int loop;
for(MAX_LEVEL=21;MAX_LEVEL<30;MAX_LEVEL++)
{
for(loop=1;loop<11;loop++)
{
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start);
err = cudaMallocManaged(&sl, N * sizeof(Node) * MAX_LEVEL);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to malloc sl in loop %d : %s\n", loop, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMallocManaged(&data, N * sizeof(Node));
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to malloc data in loop %d : %s\n", loop, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i = 0; i < N; i++)
{
data[i].key =i+1;
}
int random = 1000;
while (random--) {
int i = rand() % N;
int j = rand() % N;
int tmp = data[i].key;
data[i].key = data[j].key;
data[j].key = tmp;
}
for (int i = 0; i < MAX_LEVEL * N; i++) {
sl[i].key = -1;
sl[i].nextLevel = -1;
sl[i].nextIdx = -1;
}
int block = N/1024;
assign <<< block, 1024 >>> (sl,data);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to assign in loop %d : %s\n", loop, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
connect <<< block,1024 >>> (sl, N);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to connect in loop %d : %s\n", loop, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaFree(sl);
cudaFree(data);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end);
temp = diff(start, end);
time_used = 1000 * (temp.tv_sec + (double)temp.tv_nsec / 1000000000.0);
sum += time_used;
cout << "loop: " << loop << " time: " << time_used << endl;
// cout << sum << endl;
}
cout << "Data:" << N << endl << "Maxlevel: " << MAX_LEVEL << endl << sum/10 << endl << endl;
N = N *2;
sum=0;
}
return 0;
}
|
21,630 | #include <iostream>
#include <cstdlib>
__global__ void func(int* a) {
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
a[tid] = tid;
}
void check(const int* a,
const int size) {
for (int i = 0; i < size; i++) {
if (a[i] != i) {
std::cerr << "Error occurs (a[i] != i) at " << __FILE__ << " " << __LINE__ << "\n";
std::exit(1);
}
}
}
int main() {
const int tb_size = 128;
const int gr_size = 1000;
const auto size = tb_size * gr_size;
int* a_h = new int[size];
int* a_d = nullptr;
cudaMalloc((void**)&a_d, size * sizeof(int));
func<<<gr_size, tb_size>>>(a_d);
cudaMemcpy(a_h, a_d,
size * sizeof(int),
cudaMemcpyDeviceToHost);
check(a_h, size);
cudaFree(a_d);
delete [] a_h;
}
|
21,631 | //nvcc -o lab5_31 lab5_31.cu
/*Author:Pedro Silva*/
/*3. Implemente um programa em CUDA que devolva a transposta de uma matriz
Teste para vários tamanhos da matriz.*/ |
21,632 | #include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void absErr(int N, int M, float *A, float *Y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
if (i < N && j < M)
{
A[index] = fabsf(__fsub_rn(A[index], Y[index]));
// A[index] = abs(A[index]-Y[index])
}
} |
21,633 | #include "includes.h"
#define K 3
#define BLCH 8
#define BLCW 32
__global__ void compute_gpu(float *img, float *f, float * out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW){
int idY = blockDim.y * blockIdx.y + threadIdx.y;
int idX = blockDim.x * blockIdx.x + threadIdx.x;
int inm1, inm2, inm3, inm4, inf, ind1, ind2, ind3;
inm1 = 0;
inf = 0;
ind1 = 0;
for (int mi = 0; mi < imgN; mi++){
ind1 += convW * convH;
inm1 += imgW * imgH;
if (idX < convH && idY < convW){
ind2 = ind1 + convW * idX;
inm2 = inm1 + imgW * idX;
ind3 = ind2 + idY;
inm3 = inm2 + idY;
for (int fi = 0; fi < nF; fi++){
inm4 = inm3 + imgW * fi;
inf = ind3*nF*nF + fi*nF;
for (int fj = 0; fj < nF; fj++){
out[ind3] += img[inm4+fj] * f[inf+fj];
}
}
}
}
} |
21,634 | #include "includes.h"
__global__ void FpropH(float* layer1, const float* synH, const int offset)
{
int i = blockDim.x*blockIdx.x + threadIdx.x; //256
int j = blockDim.y*blockIdx.y + threadIdx.y; //256
atomicAdd(&layer1[256*offset + j], layer1[256*(offset-1) + i] * synH[i*256 + j]);
//__syncthreads();
//if (i == 0)
// layerH[j] = layer1[j];
} |
21,635 | // IFF-6/11 Nerijus Dulke L4a
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
using namespace std;
// masyvu skaicius
const int N = 4;
// automobiliu skaicius masyve
const int K = 10;
// maksimalus pavadinimo simboliu skaicius
const int MAX_ILGIS = 15;
struct Automobilis {
public:
char pavadinimas[N * MAX_ILGIS + 1];
int galia;
double kuroSanaudos;
// host konstruktorius kvieciamas is CPU
__host__ Automobilis() : galia(0), kuroSanaudos(0.0) {
memset(pavadinimas, ' ', N * MAX_ILGIS - 1);
pavadinimas[N * MAX_ILGIS] = '\0';
}
// device konstruktorius kvieciamas is GPU
__device__ Automobilis(char pavadinimas[], int galia, double kuroSanaudos) {
for (int i = 0; i < N * MAX_ILGIS; i++)
{
this->pavadinimas[i] = pavadinimas[i];
}
this->galia = galia;
this->kuroSanaudos = kuroSanaudos;
}
// destruktorius kvieciamas is CPU ir GPU
__host__ __device__ ~Automobilis() {};
};
// funkcija skirta sudeti masyvu elementu lauku reiksmes
__global__ void sudeti(Automobilis* automobiliai, Automobilis* rezultatai) {
// paimamas gijos indeksas
int index = threadIdx.x;
int galia = 0;
double kuroSanaudos = 0.0;
char pavadinimai[N * MAX_ILGIS];
for (int i = 0; i < N; i++)
{
// kadangi duomenys yra viename masyve o ne matricojse,
// [i * K + index] yra atitinkamas elementas is masyvo
galia += automobiliai[i * K + index].galia;
kuroSanaudos += automobiliai[i * K + index].kuroSanaudos;
for (int j = 0; j < MAX_ILGIS; j++)
{
pavadinimai[MAX_ILGIS * i + j] = automobiliai[i * K + index].pavadinimas[j];
}
}
rezultatai[index] = Automobilis(pavadinimai, galia, kuroSanaudos);
}
cudaError_t vykdyti(Automobilis** duomenu_matrica, Automobilis* rezultatai) {
cudaError_t status;
// GPU kintamieji
Automobilis* device_rezultatai = new Automobilis[K];
Automobilis* device_duomenys = new Automobilis[K * N];
// duomenys perkeliami is matricos i masyva
Automobilis* duomenu_masyvas = new Automobilis[K * N];
for (int i = 0; i < N; i++)
{
for (int j = 0; j < K; j++)
{
duomenu_masyvas[i * K + j] = duomenu_matrica[i][j];
}
}
// Pasirenkamas GPU irenginys
status = cudaSetDevice(0);
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida pasirenkant GPU");
goto Error;
}
// Paskiriama atmintis GPU
status = cudaMalloc((void**)&device_duomenys, N * K * sizeof(Automobilis));
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida paskiriant atminti");
goto Error;
}
status = cudaMalloc((void**)&device_rezultatai, K * sizeof(Automobilis));
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida paskiriant atminti");
goto Error;
}
// Nukopijuojami duomenys i GPU
status = cudaMemcpy(device_duomenys, duomenu_masyvas, N * K * sizeof(Automobilis), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida kopijuojant i GPU");
goto Error;
}
status = cudaMemcpy(device_rezultatai, rezultatai, K * sizeof(Automobilis), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida kopijuojant i GPU");
goto Error;
}
// vykdoma 1 giju bloke, naudojant K giju
sudeti<<<1, K>>>(device_duomenys, device_rezultatai);
// patikrinama ar vykdant sudeti atsirado klaidu
status = cudaGetLastError();
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida vykdant sudeti");
goto Error;
}
// laukiama vykdymo pabaigos
status = cudaDeviceSynchronize();
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida sinchronizuojant");
goto Error;
}
// kuopijuojami rezultatai i CPU
status = cudaMemcpy(rezultatai, device_rezultatai, K * sizeof(Automobilis), cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida kopijuojant i CPU");
goto Error;
}
// ivykus klaidai atlaisvinama atmintis
Error:
delete[] duomenu_masyvas;
cudaFree(device_duomenys);
cudaFree(device_rezultatai);
return status;
}
// funkcija skirta skaityti duomenims is failo
void skaityti(Automobilis** automobiliai) {
ifstream F("IFF_6_11_Dulke_Nerijus_L4.txt");
string pavadinimas;
for (int i = 0; i < N; i++)
{
Automobilis* automobiliai_temp = new Automobilis[K];
F.ignore();
for (int j = 0; j < K; j++)
{
F >> pavadinimas;
for (unsigned int k = 0; k < pavadinimas.length(); k++)
{
automobiliai_temp[j].pavadinimas[k] = pavadinimas[k];
}
F >> automobiliai_temp[j].galia >> automobiliai_temp[j].kuroSanaudos;
F.ignore();
}
automobiliai[i] = automobiliai_temp;
}
F.close();
}
// funkcija skirta spausdinti pradinius duomenis i faila
void spausdintiDuomenis(Automobilis** automobiliai) {
ofstream F("IFF_6_11_Dulke_Nerijus_L4a_rez.txt");
for (int i = 0; i < N; i++)
{
F << " ----- Automobiliu masyvas Nr. " << (i + 1) << " ----------" << endl;
F << " |" << string(MAX_ILGIS, '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F << " |" << setw(MAX_ILGIS) << left << "Pavadinimas" << setw(13) << left << "|Galia" << setw(9) << left << "|Kuro sanaudos|" << endl;
F << " |" << string(MAX_ILGIS, '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
for (int j = 0; j < K; j++) {
F << setw(3) << left << (j + 1) << "|";
for (int k = 0; k < MAX_ILGIS; k++) F << automobiliai[i][j].pavadinimas[k];
F << "|" << setw(12) << left << automobiliai[i][j].galia << "|";
F << setw(13) << left << fixed << setprecision(2) << automobiliai[i][j].kuroSanaudos << "|" << endl;
}
F << " |" << string(MAX_ILGIS, '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F << endl;
}
}
// funkcija skirta spausdinti rezultatus i faila
void spausdintiRezultatus(Automobilis* automobiliai) {
ofstream F("IFF_6_11_Dulke_Nerijus_L4a_rez.txt", ios::app);
int lineNr = 1;
F << " ************" << endl;
F << " Rezultatai" << endl;
F << " ************" << endl;
F << " |" << string((N * MAX_ILGIS), '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F << " |" << setw(N * MAX_ILGIS) << left << "Sujungti pavadinimai" << setw(13) << left << "|Galia" << setw(9) << left << "|Kuro sanaudos|" << endl;
F << " |" << string((N * MAX_ILGIS), '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
for (int i = 0; i < K; i++) {
F << setw(3) << left << lineNr++ << "|" << setw(N * MAX_ILGIS) << left << automobiliai[i].pavadinimas;
F << "|" << setw(12) << left << automobiliai[i].galia << "|";
F << setw(13) << left << fixed << setprecision(2) << automobiliai[i].kuroSanaudos << "|" << endl;
}
F << " |" << string((N * MAX_ILGIS), '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F.close();
}
int main() {
// dvimatis duomenu masyvas
Automobilis** automobiliai = new Automobilis*[N];
Automobilis* rezultatai = new Automobilis[K];
skaityti(automobiliai);
// vykdom pagrindine funkcija ir tikrinama ar neivyko klaidu
cudaError_t status = vykdyti(automobiliai, rezultatai);
if (status != cudaSuccess) {
fprintf(stderr, "Ivyko klaida");
return 1;
}
//atspausdinami duomenys ir rezultatai
spausdintiDuomenis(automobiliai);
spausdintiRezultatus(rezultatai);
// atlaisvinama atmintis
delete[] automobiliai;
delete[] rezultatai;
return 0;
}
|
21,636 | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<sys/time.h>
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
//================ Perform matrix transpose multiply on GPU ===========================
__global__ void MatrixTransposeMultiplyDevice(double *d_A, double *CDevice, int matrixSize) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k;
double sum = 0.0;
for(k = 0; k < matrixSize; k++) {
sum += d_A[ k * matrixSize + i ] * d_A [ k * matrixSize + j];
}
CDevice [ i * matrixSize + j ] = sum;
}
//======================================================================================
//================ Perform matrix transpose multiply on CPU ============================
void MatrixMultiplyHost(double *A, double *CHost, int dim) {
for (int i = 0; i < dim; i++)
for (int j = 0; j < dim; j++) {
double sum = 0.0;
for (int k = 0; k < dim; k++) {
sum += A[ (k * dim) + i ] * A[ (k * dim) + j ];
}
CHost[ i*dim + j] = sum;
}
}
//======================================================================================
//================== Validate if two matrices are the same =============================
int MatrixTransposeHostValidate(double *A, double *C, int dim)
{
for (int i = 0; i < dim; i++)
for (int j = 0; j < dim; j++)
if(C[i * dim + j] != A[i * dim + j]) {
return 0;
}
return 1;
}
//======================================================================================
//================================== Print the matrix ==================================
void printMatrix(double *A, int dim) {
for(int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
printf("%f ",A[i * dim + j]);
}
printf("\n");
}
}
//======================================================================================
//============================= Initialize the matrix ==================================
void initMatrix(double *A, int dim) {
for (int i= 0; i< dim; i++){
for (int j = 0; j < dim; j++) {
A[i* dim + j] = (float)(rand()/(float)RAND_MAX) + 1;
}
}
}
//======================================================================================
int main(void) {
//============================= Host Code ===================================
double *A, *CHost, *CD;
double GFLOPS_host;
int dim = 1024;
const double totalOperations = (1.0*dim*dim)*(dim*2);
size_t memSize = sizeof(double) * dim * dim;
// Allocate memory for the matrices
A = (double *) malloc(memSize);
CHost = (double *) malloc(memSize);
CD = (double *) malloc(memSize);
// Initialize A
initMatrix(A, dim);
// Measure the time for the computation on CPU
double start_time, end_time;
printf("**************************************************\n");
printf("Matrix multiply on Host(CPU)\n");
// Start time for matrix multiply on CPU
start_time = rtclock();
// Perform the matrix transpose on the CPU
MatrixMultiplyHost(A, CHost, dim);
// End time for matrix transpose on CPU
end_time = rtclock();
// Print stats for the CPU
double time_diff = end_time - start_time;
printf("Time taken for matrix multiplication on CPU (sec) = %.5f\n", time_diff);
GFLOPS_host = totalOperations / (1.0e9*time_diff);
printf("GFLOPS/sec in Host = %f\n", GFLOPS_host);
//printf("Host Multiply\n");
//printMatrix(CHost, dim);
printf("**************************************************\n");
printf("\n");
//=============================================================================
//============================= Device Code ===================================
double GFLOPS_device;
double *d_A, *CDevice;
// Define thread hierarchy
int tpb = 32;
int nblocks= dim/tpb;
// Start time for matrix transpose on GPU
start_time = rtclock();
// Allocate device memory
cudaMalloc( (void**) &d_A, memSize);
cudaMalloc( (void**) &CDevice, memSize);
// Make a copy of the matrix A to d_A
cudaMemcpy(d_A, A, memSize, cudaMemcpyHostToDevice);
// Launch kernel
dim3 dimGrid(nblocks, nblocks);
dim3 dimBlock(tpb, tpb);
// Perform the matrix transpose on the GPU device
MatrixTransposeMultiplyDevice<<< dimGrid, dimBlock>>>(d_A, CDevice, dim);
cudaMemcpy(CD, CDevice, memSize, cudaMemcpyDeviceToHost);
// End time for matrix transpose on GPU
end_time = rtclock();
// Print stats for the GPU
printf("**************************************************\n");
printf("Matrix multiply on Device(GPU)\n");
double time_diff_device = end_time - start_time;
printf("Time taken for matrix multiplication transpose in GPU with %-2d block(s) of %-4d threads (sec) = %.5f\n", nblocks, tpb, time_diff_device);
GFLOPS_device = totalOperations / (1.0e9*time_diff_device);
printf("GFLOPS/sec in Device = %f\n", GFLOPS_device);
printf("**************************************************\n");
printf("\n");
//=========================================================================
// Verfiy results between the CPU and GPU
if(!MatrixTransposeHostValidate(CD, CHost, dim))
fprintf(stderr, "Wrong results for matrix multiplication on GPU\n");
// Free memory
cudaFree(d_A);
cudaFree(CDevice);
free(A);
free(CHost);
free(CD);
}
|
21,637 | #include<stdio.h>
#include<iostream>
extern "C" {
__global__ void GPU_add(
int n,
int* d_a,
int* d_b
);
void calling_routine_c (
int n,
int* d_a,
int* d_b
)
{
//printf("cuda c stream = %lld\n",streamid);
// Call the cuda kernel:
GPU_add<<<1,1024>>>(
n,
d_a,
d_b
);
printf("Completed an add kernel\n");
} // end calling routine
} // extern "C"
|
21,638 | #include "includes.h"
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
} |
21,639 | /*
* SumSquares.cu
*
* Copyright 2021 mike <mike@fedora33>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*
*
*/
#include <stdio.h>
#include <math.h>
#include <cuda.h>
__global__ void kernel(ulong* d_squares, const ulong n_squares, ulong* d_results, ulong N) {
ulong i = threadIdx.x + (blockIdx.x * blockDim.x);
if(i < N) {
// scan in reverse the squares array
// save first square which divides i in results[i]
if(i > 3) {
for(int x = n_squares-1; x > 0; x -= 1) {
if((i % d_squares[x]) == 0) {
d_results[i] = d_squares[x];
break;
}
} // for...
} else {
d_results[i] = i;
}
} //
}
int main(int argc, char **argv)
{
cudaError_t error_id;
// Allocate and set the host 'squares' array
ulong N = 1024*1024*2*2*2;
ulong root_max = (ulong)floor(sqrt((double)N));
const ulong n_squares = root_max + 1;
ulong h_squares[n_squares];
for(int x = 0; x < n_squares; x += 1) h_squares[x] = x*x;
// Allocate host results array
ulong *h_results = (ulong*)malloc(sizeof(ulong)*(N+1));
if(h_results == NULL) {
printf("malloc failed\n");
exit(1);
}
// Allocate memory on device for 'squares'
ulong *d_squares;
error_id = cudaMalloc((void**)&d_squares, sizeof(ulong)*n_squares);
if(error_id != cudaSuccess) {
printf("cudaMalloc squares failed with %d\n", error_id);
exit(1);
}
// Copy squares to device
error_id = cudaMemcpy(d_squares, h_squares, sizeof(ulong)*n_squares,
cudaMemcpyHostToDevice);
if(error_id != cudaSuccess) {
printf("cudaMemcpy squares to device failed with %d\n", error_id);
exit(1);
}
// Allocate memory on device for N results
ulong *d_results;
error_id = cudaMalloc((void**)&d_results, sizeof(ulong)*(N+1));
if(error_id != cudaSuccess) {
printf("cudaMalloc results failed with %d\n", error_id);
exit(1);
}
// Set configuration parameters
const ulong Nthreads = 1024; // max number threads/block
const ulong Nblocks = (N/Nthreads)+1;
dim3 grid_size=(Nblocks); dim3 block_size=Nthreads;
// launch kernel
kernel<<<grid_size, block_size>>>(d_squares, n_squares, d_results, (N+1));
// Wait for device to finish?
//cudaDeviceSynchronize();
// copy N results back to host
error_id = cudaMemcpy(h_results, d_results, sizeof(ulong)*(N+1),
cudaMemcpyDeviceToHost);
if(error_id != cudaSuccess) {
printf("cudaMemcpy to host failed with %d\n", error_id);
exit(1);
}
// Print results array
// for(int x = 0; x < N+1; ++x) printf("%d:%ld ", x, h_results[x]);
// printf("\n");
// Cleanup
cudaFree(d_squares);
cudaFree(d_results);
cudaFree(h_results);
return 0;
}
|
21,640 | #include "cuda.h" // Unix系统下调用CUDA使用"cuda.h",Win系统下调用CUDA使用"cuda.runtime"
#include "stdio.h" // 标准输入输出,后面调用函数printf
#define N 10000
__global__ void vectorAdd(float *A, float *B, float *C)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
C[i*N+j] = A[i*N+j] + B[i*N+j];
printf("======================================================\n");
printf("blockIdx.x:%d\tblockIdx.y:%d\n", blockIdx.x, blockIdx.y);
printf("blockDim.x:%d\tblockDim.y:%d\n", blockDim.x, blockDim.y);
printf("threadIdx.x:%d\tthreadIdx.y:%d\n", threadIdx.x, threadIdx.y);
printf("i:%d j:%d C[i*N+j]:C[%2d]:%f\n", i, j, i*N+j, C[i*N+j]);
}
int main()
{
// 设置使用的GPU下标(从0开始)
// 多个GPU在终端用命令'nvidia-smi'查看下标
int gpuDeviceIdx = 0;
cudaSetDevice(gpuDeviceIdx);
// 初始化设备(CPU)端变量
// 使用cudaMalloc方法分配指定大小的空间
float *Ad, *Bd, *Cd;
int size = N*N*sizeof(float);
cudaMalloc((void**)&Ad, size);
cudaMalloc((void**)&Bd, size);
cudaMalloc((void**)&Cd, size);
// 设置程序在GPU上运行参数
// 1.grid下的block数目
// 2.block下的thread数目
dim3 bpg(10, 10); // 每个网格下的线程块个数(block num. per grid)
// bpg(blockDim.x, blockDim.y)
dim3 tpb(10, 10); // 每个线程块下的线程个数(thread num. per block)
// tpb(threadIdx.x, threadIdx.y)
// 根据资源分配的参数
// 在GPU上执行核函数(__global__修饰的函数)
vectorAdd<<<bpg, tpb>>>(Ad, Bd, Cd);
// 释放空间
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
return 0;
}
|
21,641 | //thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
extern "C"
void cuda_HistogramOrder(unsigned int* _histogramData, unsigned int* _reference, unsigned int* _order, unsigned int _size)
{
unsigned int* d_histogramData_ptr;
unsigned int* d_tmp_ptr;
unsigned int* d_reference_ptr;
unsigned int* d_order_ptr;
unsigned int mem_size = sizeof(unsigned int)*_size;
cudaMalloc(&d_histogramData_ptr, mem_size);
cudaMalloc(&d_tmp_ptr, mem_size);
cudaMalloc(&d_reference_ptr, mem_size);
cudaMalloc(&d_order_ptr,mem_size);
cudaMemcpy(d_histogramData_ptr, _histogramData, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_reference_ptr, _reference, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_order_ptr, _reference, mem_size, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned int> d_histogramData(d_histogramData_ptr);
thrust::device_ptr<unsigned int> d_reference(d_reference_ptr);
thrust::device_ptr<unsigned int> d_order(d_order_ptr);
thrust::device_ptr<unsigned int> d_tmp(d_tmp_ptr);
thrust::sort_by_key(d_histogramData, d_histogramData + _size, d_reference, thrust::greater<unsigned int>());
cudaMemcpy(d_tmp_ptr, d_reference_ptr, mem_size, cudaMemcpyDeviceToDevice);
thrust::sort_by_key(d_tmp, d_tmp + _size, d_order);
cudaMemcpy(_histogramData, d_histogramData_ptr, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(_reference, d_reference_ptr, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(_order, d_order_ptr, mem_size, cudaMemcpyDeviceToHost);
cudaFree(d_histogramData_ptr);
cudaFree(d_reference_ptr);
cudaFree(d_tmp_ptr);
}
extern "C"
unsigned int binarySearch(unsigned int* _data, unsigned int _value, unsigned int _size)
{
unsigned int begin = 0, end = _size-1;
unsigned int middle;
while(begin <= end)
{
middle = (begin + end)/2;
if(_data[middle] == _value)
return middle;
else
{
if(_value > _data[middle])
begin = middle +1;
else
end = middle -1;
}
}
return 0;
} |
21,642 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
//Definicion del kernel
__global__ void gpuMatmult(int* m1, int* m2, int* ans, int n){
int k, sum = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
for (k = 0; k < n; k++) {
sum += m1[j * n + k] * m2[k * n + i];
}
ans[j * n + i] = sum;
}
}
int main(int argc, char const *argv[])
{
//Definicion de variables
double timeGPU;
FILE *f1, *f2, *f3;
int *h_m1, *h_m2, *h_ans, *d_m1, *d_m2, *d_ans;
int m1Row, m1Col, m2Row, m2Col;
//Comprobacion de parametros
if (argc != 3){
printf("Cantidad de parametros incorrecta!!\n");
}else{
//Creacion de archivos
f1 = fopen(argv[1],"r");
f2 = fopen(argv[2],"r");
f3 = fopen("matres.txt","w");
//Lectura de dimensiones de matrices
fscanf(f1, "%d", &m1Row); fscanf(f1, "%d", &m1Col);
fscanf(f2, "%d", &m2Row); fscanf(f2, "%d", &m2Col);
//Definicion de tamaño para asignar memoria
size_t m1_size = m1Row * m1Col * sizeof(int);
size_t m2_size = m2Row * m2Col * sizeof(int);
size_t ans_size = m1Col * m2Row * sizeof(int);
//Asignacion de memoria en el Host
h_m1 = (int *)malloc(m1_size);
h_m2 = (int *)malloc(m2_size);
h_ans = (int *)malloc(ans_size);
//Lectura de archivos y almacenamiento en el Host
for (int i = 0; i < m1Row; i++){
for (int j = 0; j < m1Col; j++){
fscanf(f1, "%d", &h_m1[i * m1Row + j]);
getc(f1);//saltar las comas (,)
}
}
for (int k = 0; k < m2Row; k++){
for (int l = 0; l < m2Col; l++){
fscanf(f2, "%d", &h_m2[k * m2Row + l]);
getc(f2);//saltar las comas (,)
}
}
//Asignacion de memoria en el Device
if (cudaSuccess != cudaMalloc((void **) &d_m1, m1_size))
printf("Error asignando memoria para d_m1\n");
if (cudaSuccess != cudaMalloc((void **) &d_m2, m2_size))
printf("Error asignando memoria para d_m2\n");
if (cudaSuccess != cudaMalloc((void **) &d_ans, ans_size))
printf("Error asignando memoria para d_ans\n");
//Copia de datos del Host al Device
if (cudaSuccess != cudaMemcpy(d_m1, h_m1, m1_size, cudaMemcpyHostToDevice))
printf("Error copiando datos a d_m1\n");
if (cudaSuccess != cudaMemcpy(d_m2, h_m2, m2_size, cudaMemcpyHostToDevice))
printf("Error copiando datos a d_m2\n");
int size = m1Row;//Tamaño de las matrices (ambas cuadradas)
//Definicion de estructuras para la cantidad de hilos y bloques
dim3 blockDim(32,32);
dim3 gridDim((int)ceil((float)size/blockDim.x), (int)ceil((float)size/blockDim.y));
clock_t startGPU = clock();
//LLamado al kernel
gpuMatmult<<<gridDim, blockDim>>>(d_m1, d_m2, d_ans, m1Row);
if (cudaSuccess != cudaGetLastError())
printf("Error en el llamado al kernel\n");
//Copia de datos del Device al Host
if (cudaSuccess != cudaMemcpy(h_ans, d_ans, ans_size, cudaMemcpyDeviceToHost))
printf("Error copiando datos desde d_ans a h_ans\n");
timeGPU = ((double)(clock() - startGPU))/CLOCKS_PER_SEC;
//
printf("m1(%d x %d), m2(%d x %d)\n",m1Row,m1Col,m2Row,m2Col);
printf("GPU tiempo = %.6f segundos\n",timeGPU);
//Copia del resutlado en el archivo de respuesta
for (int i = 0; i < m1Row; i++) {
for (int j = 0; j < m2Col; j++) {
fprintf(f3, "%d," ,h_ans[i * m2Col + j]);
}
fseek(f3, -1, SEEK_END);
fprintf(f3, "\n");
}
//Liberacion de memoria
free(h_m1); free(h_m2); free(h_ans);
cudaFree(d_m1); cudaFree(d_m2); cudaFree(h_ans);
//printf("ans[2] = %d\n",h_ans[2]);
}
return 0;
} |
21,643 | /* declare a 1d array and copy it to each block
each thread find the maximum of the 1d array. This will be replaced by the inner loop in HW
use reduce sum up the mymaximum found by each thread
finally each block return a sum of maximum.
parallel_max_each_chunk<<<dimGrid,dimBlock,(n+numthreadsBlock)*sizeof(float)>>>(dmaxarr, darr, n);
how to run:
./a.out n num_blocks
It seems you can request less memory
*/
#include <stdio.h>
#include <cuda.h>
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int n);
int main(int argc, char **argv) {
//generate a 1d array
int n = atoi(argv[1]);
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i =0; i < n; i++) {
arr[i] = (float)i/2.0f;
}
const int numthreadsBlock = 8;
int numChunk = atoi(argv[2]);
float *maxarr = (float *)malloc(numChunk * sizeof(float));
// declare GPU memory pointers
float *darr, * dmaxarr;
cudaMalloc((void **)&darr, n*sizeof(float));
cudaMalloc((void **)&dmaxarr, numChunk*sizeof(float));
cudaMemcpy(darr, arr, n*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(numChunk,1);
dim3 dimBlock(numthreadsBlock,1,1);
parallel_max_each_chunk<<<dimGrid,dimBlock,(n+numthreadsBlock)*sizeof(float)>>>(dmaxarr, darr, n);
cudaThreadSynchronize();
cudaMemcpy(maxarr, dmaxarr, numChunk*sizeof(float), cudaMemcpyDeviceToHost);
//check the results
bool judge = true;
for (i=0; i < numChunk; i++) {
printf("%d sum of max %f\n ", i, maxarr[i]);
judge = judge && ( (n-1)*numthreadsBlock/2.0 == maxarr[i]);
}
printf("\n--------correct or wrong---------\n");
printf(judge ? "right\n": "wrong\n");
// check the exit state of CUDA code
cudaError_t error = cudaGetLastError();
if (error !=cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
}
return 0;
}
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int n) {
int tid = threadIdx.x;
int j;
int chunkSize = (n+blockDim.x-1)/blockDim.x;
extern __shared__ float sdata[];
for (j = 0; j < chunkSize; j++) {
if (tid * chunkSize +j <n)
sdata[tid*chunkSize + j ] = darr[tid*chunkSize + j];
}
__syncthreads();
// each thread find the maximum of the sdata
extern __shared__ float mymaxval[];
int mymax = 0;
for ( j =0; j < n; j++)
{
if (mymax < sdata[j]) { mymax = sdata[j];}
}
mymaxval[tid] = mymax;
//do reduce on the chunk of array on the shared memory
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
mymaxval[tid] += mymaxval[tid+s];
}
__syncthreads();
}
// the sum of the maximum found by each thread
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxval[0];
}
}
|
21,644 | #include <stdio.h>
#define BLOCK_SIZE 256
__global__ void calculateNext(double* oldCylinder, double* newCylinder, const unsigned long long int numSlices) {
int i = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if (i < numSlices) {
if (i == 0) {
newCylinder[i] = (oldCylinder[i] + oldCylinder[i + 1]) / 2.0;
} else if (i == numSlices - 1) {
newCylinder[i] = (oldCylinder[i - 1] + oldCylinder[i]) / 2.0;
} else {
newCylinder[i] = (oldCylinder[i - 1] + oldCylinder[i + 1]) / 2.0;
}
}
}
__global__ void initializeArray(double* cylinder, const unsigned long long int numSlices, const int numImpulses, const unsigned long long int* impulses, const double* concentrations) {
int i = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if (i < numSlices) {
cylinder[i] = 0.0;
for (int k = 0; k < numImpulses; k++) {
if (i == impulses[k]) {
cylinder[i] = concentrations[k];
break;
}
}
}
}
extern "C" double gpuCalculate(const unsigned long long int numSlices, const unsigned long long int totalTime, const unsigned long long int desiredPoint, const int numImpulses, const unsigned long long int* impulses, const double* concentrations) {
cudaError_t mallocResult;
double* oldCylinder;
double* newCylinder;
double* temp;
unsigned long long int* deviceImpulses;
double* deviceConcentrations;
mallocResult = cudaMalloc((void**) &oldCylinder, numSlices * sizeof(double));
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Malloc failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaMalloc((void**) &newCylinder, numSlices * sizeof(double));
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Malloc failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaMalloc((void**) &deviceImpulses, numImpulses * sizeof(unsigned long long int));
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Malloc failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaMalloc((void**) &deviceConcentrations, numImpulses * sizeof(double));
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Malloc failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaMemcpy(deviceImpulses, impulses, numImpulses * sizeof(unsigned long long int), cudaMemcpyHostToDevice);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Memcpy failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaMemcpy(deviceConcentrations, concentrations, numImpulses * sizeof(double), cudaMemcpyHostToDevice);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Memcpy failed, exiting...\n");
exit(EXIT_FAILURE);
}
dim3 dimBlock(BLOCK_SIZE);
unsigned long long int gridSize = ceil(numSlices / (double) BLOCK_SIZE);
dim3 dimGrid(gridSize);
initializeArray<<<dimGrid, dimBlock>>>(oldCylinder, numSlices, numImpulses, deviceImpulses, deviceConcentrations);
for (int i = 0; i < totalTime; i++) {
calculateNext<<<dimGrid, dimBlock>>>(oldCylinder, newCylinder, numSlices);
temp = oldCylinder;
oldCylinder = newCylinder;
newCylinder = temp;
}
double answer;
mallocResult = cudaMemcpy(&answer, &oldCylinder[desiredPoint], sizeof(double), cudaMemcpyDeviceToHost);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Memcpy failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaFree(oldCylinder);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA free failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaFree(newCylinder);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA free failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaFree(deviceImpulses);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA free failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaFree(deviceConcentrations);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA free failed, exiting...\n");
exit(EXIT_FAILURE);
}
return answer;
} |
21,645 | #include <iostream>
#include <fstream>
#include <string.h>
#include <sys/time.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
using namespace std;
//Poner esto a 1 para imprimir los resultados
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return((double)tp.tv_sec + (double)tp.tv_usec*1e-6);
}
int maximo_local(int* vector,int N){
int maximo = 0;
for(int i = 0; i < N; i++){
if(vector[i] > maximo)
maximo = vector[i];
}
return maximo;
}
__global__ void kernel_reduccion_intervalos(int* device_vector,int* device_salida){
extern __shared__ int datos[];
int tid = threadIdx.x; //numero de hebra
int posicion = blockIdx.x * blockDim.x + threadIdx.x;
int index = 0;
datos[tid] = device_vector[posicion];
for(int i = 1; i > blockDim.x; i *= 2){
index = 2 * i * tid;
if(index < blockDim.x){
if(datos[tid] < datos[tid+i]){
datos[tid] = datos[tid+i];
}
}
__syncthreads();
}
//Guardo los resultados en el vector D
if(device_salida[blockIdx.x] == 0){
device_salida[blockIdx.x] = datos[0];
}
}
__global__ void kernel_reduccion_secuencial(int* device_vector,int* device_salida){
extern __shared__ int datos[];
int tid = threadIdx.x; //numero de hebra
int posicion = blockIdx.x * blockDim.x + threadIdx.x;
datos[tid] = device_vector[posicion];
__syncthreads();
for(int i = blockDim.x/2; i > 0; i >>= 1){
if(tid < i){
if(datos[tid] < datos[tid+1]){
datos[tid] = datos[tid+1];
}
}
__syncthreads();
}
//Guardo los resultados en el vector D
if(device_salida[blockIdx.x] == 0){
device_salida[blockIdx.x] = datos[0];
}
}
__device__ void desenrrollado_reduce_32(volatile int* datos, int tid){
if(datos[tid] < datos[tid+32]) datos[tid] = datos[tid+32];
if(datos[tid] < datos[tid+16]) datos[tid] = datos[tid+16];
if(datos[tid] < datos[tid+8]) datos[tid] = datos[tid+8];
if(datos[tid] < datos[tid+4]) datos[tid] = datos[tid+4];
if(datos[tid] < datos[tid+2]) datos[tid] = datos[tid+2];
if(datos[tid] < datos[tid+1]) datos[tid] = datos[tid+1];
}
__global__ void kernel_reduccion_desenrrollado_parcial(int* device_vector,int* device_salida){
extern __shared__ int datos[];
int tid = threadIdx.x; //numero de hebra
int posicion = blockIdx.x * blockDim.x + threadIdx.x;
datos[tid] = device_vector[posicion];
for(int i = blockDim.x/2; i > 32; i >>= 1){
if(tid < i){
if(datos[tid] < datos[tid+1]){
datos[tid] = datos[tid+1];
}
}
__syncthreads();
}
if(tid < 32) desenrrollado_reduce_32(datos,tid);
//Guardo los resultados en el vector D
if(device_salida[blockIdx.x] == 0){
device_salida[blockIdx.x] = datos[0];
}
}
//para bloques de 2048
__global__ void kernel_reduccion_desenrrollado_total(int* device_vector,int* device_salida){
extern __shared__ int datos[];
int tid = threadIdx.x; //numero de hebra
int posicion = blockIdx.x * blockDim.x + threadIdx.x;
datos[tid] = device_vector[posicion];
if(blockDim.x >= 2048){
if(tid < 1024){
if(datos[tid] < datos[tid + 1024]){
datos[tid] = datos[tid + 1024];
}
}
}
if(blockDim.x >= 1024){
if(tid < 512){
if(datos[tid] < datos[tid + 512]){
datos[tid] = datos[tid + 512];
}
}
__syncthreads();
}
if(blockDim.x >= 512){
if(tid < 256){
if(datos[tid] < datos[tid + 256]){
datos[tid] = datos[tid + 256];
}
}
__syncthreads();
}
if(blockDim.x >= 256){
if(tid < 128){
if(datos[tid] < datos[tid + 128]){
datos[tid] = datos[tid + 128];
}
}
__syncthreads();
}
if(blockDim.x >= 128){
if(tid < 64){
if(datos[tid] < datos[tid + 64]){
datos[tid] = datos[tid + 64];
}
}
__syncthreads();
}
if(tid < 32) desenrrollado_reduce_32(datos,tid);
__syncthreads();
//Guardo los resultados en el vector D
if(device_salida[blockIdx.x] == 0){
device_salida[blockIdx.x] = datos[0];
}
}
int main(int argc, char* argv[]){
bool imprimir = false;
if(argc < 4){
cout << "Sintaxis: ./program <Numero de kernel a ejecutar> <Tamaño del problema> <Numero de bloques> " << endl;
exit(-1);
}
//Depuracion de errores CUDA
int devID;
cudaError_t error_cuda;
error_cuda = cudaGetDevice(&devID);
if(error_cuda != cudaSuccess){
cout << "Error. No hay tarjeta grafica nvdia o no esta instalado el driver" << endl;
exit(-1);
}
cudaDeviceProp propiedades;
cudaGetDeviceProperties(&propiedades, devID);
if(imprimir)
cout << "Device " << devID << " " << propiedades.name << " con capacidad computacional: " << propiedades.major << "." << propiedades.minor << endl;
//Kernel a ejecutar
int kernel = atoi(argv[1]);
//Tamaño del problema
int N = atoi(argv[2]);
int bloques_por_grid = atoi(argv[3]);
//Memoria que es necesaria reservar para el vector device
int device_memory = N*sizeof(int);
int* vector = new int[N];
int* resultado = new int[N];
int* device_vector;
int* device_salida;
//Reservor la memoria para el vector device
error_cuda = cudaMalloc( (void**) &device_vector, device_memory);
if(error_cuda != cudaSuccess){
cout << "No se ha podido reservar memoria para el vector <device_vector>" << endl;
exit(-1);
}
error_cuda = cudaMalloc( (void**) &device_salida, bloques_por_grid*sizeof(int));
if(error_cuda != cudaSuccess){
cout << "No se ha podido reservar memoria para el vector <device_salida>" << endl;
exit(-1);
}
//rellena el vecto con numero de 1 a N aleatorios
int numero_random = 0;
int maximo = -1;
srand(time(NULL));
for(int i = 0; i < N; i++){
numero_random = rand() % N + 1;
if(numero_random > maximo)
maximo = numero_random;
vector[i] = numero_random;
resultado[i] = 0;
}
//Copio el contenido del vector a el vector device
error_cuda = cudaMemcpy(device_vector,vector,device_memory, cudaMemcpyHostToDevice);
if(error_cuda != cudaSuccess){
cout << "No se pudo copair el contenido de <vector> a el vector device <device_vector>" << endl;
exit(-1);
}
//Pongo a 0 todas las casillas del vector <<vector_salida>>
error_cuda = cudaMemcpy(device_salida,resultado, bloques_por_grid*sizeof(int), cudaMemcpyHostToDevice);
if(error_cuda != cudaSuccess){
cout << "No se pudo copair el contenido de <resultado> a el vector device <device_salida>" << endl;
exit(-1);
}
int hebras = ceil(N/bloques_por_grid);
double tiempo = cpuSecond();
switch(kernel){
case 0:
kernel_reduccion_intervalos<<<bloques_por_grid,hebras,sizeof(int)*bloques_por_grid>>>(device_vector, device_salida);
break;
case 1:
kernel_reduccion_secuencial<<<bloques_por_grid,hebras,sizeof(int)*bloques_por_grid>>>(device_vector, device_salida);
break;
case 2:
kernel_reduccion_desenrrollado_parcial<<<bloques_por_grid,hebras,sizeof(int)*bloques_por_grid>>>(device_vector, device_salida);
break;
case 3:
kernel_reduccion_desenrrollado_total<<<bloques_por_grid,hebras,sizeof(int)*bloques_por_grid>>>(device_vector, device_salida);
break;
}
tiempo = cpuSecond() - tiempo;
cudaMemcpy(resultado, device_salida, bloques_por_grid*sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
int max = maximo_local(resultado,bloques_por_grid);
if(imprimir){
cout << "Kernel: " << kernel << " Hebras: " << hebras << " Bloques: " << bloques_por_grid << " Tiempo: " << tiempo << endl;
cout << "El maximo obtenido al rellenar el ciclo es: " << maximo << endl;
cout << "El maximo obtenido al usar reduccion es: " << max << endl;
}else{
cout << tiempo << " ";
}
/*
Algoritmo secuencial
double tiempo = cpuSecond();
int max = maximo_local(vector,N);
tiempo = cpuSecond() - tiempo;
cout << tiempo << " ";
*/
cudaFree(device_salida);
cudaFree(device_vector);
free(vector);
free(resultado);
}
|
21,646 | /*------------------------------------------------------------------------------
# File Name : matrix_multiply_double.cu
#
# Author : Ki-Hwan Kim (wbkifun@korea.ac.kr)
#
# Written date : 2010. 8. 17
# Modify date :
#
# Copyright : GNU GPL
#
# Description :
# CUDA example
# Matrix Multiplication C=AxB
# This cuda kernel is not optimized.
------------------------------------------------------------------------------*/
#include <stdlib.h>
#include <stdio.h>
__host__ void matrix_multiply_cpu(int n, double **a, double **b, double **c) {
int i, j, k;
double tc;
for (i=0; i<n; i++) {
for (j=0; j<n; j++) {
tc = 0;
for (k=0; k<n; k++) tc += a[i][k]*b[k][j];
c[i][j] = tc;
}
}
}
__global__ void matrix_multiply_gpu(int n, double *a, double *b, double *c) {
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
double tc = 0;
for (int k=0; k<n; k++) tc += a[i*n+k]*b[k*n+j];
c[i*n+j] = tc;
}
int main() {
int n=512; // n x n matrix; must be a multiple of 16
int i,j;
printf("Matrix Multiplication C=AxB (%dx%d) (double precision)\n", n,n);
// Allocate matrices in the host memory
double **a, **b, **c_cpu, **c_gpu;
a = (double **) malloc(n*sizeof(double *));
a[0] = (double *) malloc(n*n*sizeof(double));
for (i=0; i<n; i++) a[i] = a[0] + i*n;
b = (double **) malloc(n*sizeof(double *));
b[0] = (double *) malloc(n*n*sizeof(double));
for (i=0; i<n; i++) b[i] = b[0] + i*n;
c_cpu = (double **) calloc(n, sizeof(double *));
c_cpu[0] = (double *) calloc(n*n, sizeof(double));
for (i=0; i<n; i++) c_cpu[i] = c_cpu[0] + i*n;
c_gpu = (double **) calloc(n, sizeof(double *));
c_gpu[0] = (double *) calloc(n*n, sizeof(double));
for (i=0; i<n; i++) c_gpu[i] = c_gpu[0] + i*n;
// Initialize the matrix a, b
for (i=0; i<n*n; i++) {
a[0][i] = (i/111)*(i%11)*0.1;
b[0][i] = (i/113)*(i%13)*0.1;
}
// Allocate matrices in the device memory
double *a_dev, *b_dev, *c_dev;
cudaMalloc ( (void**) &a_dev, n*n*sizeof(double) );
cudaMalloc ( (void**) &b_dev, n*n*sizeof(double) );
cudaMalloc ( (void**) &c_dev, n*n*sizeof(double) );
// Copy the a, b matrices from host to device
cudaMemcpy ( a_dev, a[0], n*n*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy ( b_dev, b[0], n*n*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy ( c_dev, c_gpu[0], n*n*sizeof(double), cudaMemcpyHostToDevice );
// CUDA Kernel execution
dim3 dimBlock(16,16,1);
dim3 dimGrid(n/16,n/16);
printf("GPU Kernel Execution...\n");
matrix_multiply_gpu <<<dimGrid,dimBlock>>> (n, a_dev, b_dev, c_dev);
// Copy the c matrix from host to device
printf("Copy the c matrix from host to device...\n");
cudaMemcpy (c_gpu[0], c_dev, n*n*sizeof(double), cudaMemcpyDeviceToHost);
// CPU Function execution
printf("CPU Function Execution...\n");
matrix_multiply_cpu(n, a, b, c_cpu);
// Verify two results
printf("Verify the results...");
double v;
int err=0;
for (i=0; i<n; i++) {
for (j=0; j<n; j++) {
v = c_cpu[i][j] - c_gpu[i][j];
if (v > 0.01) {
printf("c[%d][%d]=%g\n", i, j, v);
err = 1;
}
}
}
if (err == 0) printf("OK!\n");
}
|
21,647 | /*
Name: Daniyal Manair
Student Number: 20064993
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <vector>
#include <stdio.h>
#include <random>
#include <algorithm>
#include <chrono>
#include <map>
__global__ void MatrixMulGPU(float* A, float* B, float* C, const int N) {
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = row * N + col;
if (row < N && col < N){
C[idx] = 0.0;
for (int i = 0; i < N; i++)
C[idx] += A[row * N + i] * B[i * N + col];
}
}
__global__ void MatrixMulGPUSingle(float* A, float* B, float* C, const int N) {
for (int row = 0; row < N; row++) {
for (int col = 0; col < N; col++) {
if (row < N && col < N) {
C[row * N + col] = 0.0;
for (int k = 0; k < N; k++)
C[row * N + col] += A[row * N + k] * B[k * N + col];
}
}
}
}
void initialData(float* matrix, const int N){
for (int i = 0; i < (N*N); i++)
matrix[i] = (float)(rand() & 0xFF) / 10.0f;
}
void MatrixMulCPU(float* A, float* B, float* C, const int N){
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
for (int k = 0; k < N; k++)
C[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
}
void checkResult(float* CPU, float* GPU, const int N) {
double epsilon = 1.0E-8;
for (int i = 0; i < (N*N); i++){
if (abs(CPU[i] - GPU[i]) > epsilon){
printf("CPU %f GPU %f ", CPU[i], GPU[i]);
printf("Arrays do not match.\n\n");
return;
}
}
printf("Test PASSED\n\n");
}
void printArr(float* matrix, const int N) {
printf("[");
for (int i = 0; i < (N*N); i++)
printf("%f,", matrix[i]);
printf("\b]\n");
}
float GPUtest(float* C_A, float* C_B, float* CPUResult, const int blockSize, const int N){
// Initialize variables
cudaEvent_t gStart, gEnd;
float timeDuration;
float *G_A, *G_B, *G_C, *GPUResult;
size_t size = N * N * sizeof(float);
// Initialize GPU variables
cudaMalloc((void**)&G_A, size);
cudaMalloc((void**)&G_B, size);
cudaMalloc((void**)&G_C, size);
GPUResult = (float*)malloc(size);
memset(GPUResult, 0.0, size);
cudaEventCreate(&gStart);
cudaEventCreate(&gEnd);
// Copy over the data
cudaMemcpy(G_A, C_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(G_B, C_B, size, cudaMemcpyHostToDevice);
// Perform GPU comparison
if (blockSize == 0){
cudaEventRecord(gStart);
MatrixMulGPUSingle <<<1, 1>>> (G_A, G_B, G_C, N);
cudaEventRecord(gEnd);
} else {
// Create block
int numBlocks = N / blockSize;
if (N % blockSize) numBlocks++;
dim3 block(blockSize, blockSize, 1);
dim3 grid(numBlocks, numBlocks, 1);
cudaEventRecord(gStart);
MatrixMulGPU <<<grid, block >>> (G_A, G_B, G_C, N);
cudaEventRecord(gEnd);
}
cudaEventSynchronize(gEnd);
cudaEventElapsedTime(&timeDuration, gStart, gEnd);
cudaMemcpy(GPUResult, G_C, size, cudaMemcpyDeviceToHost);
checkResult(CPUResult, GPUResult, N);
cudaFree(G_A);
cudaFree(G_B);
cudaFree(G_C);
free(GPUResult);
return timeDuration;
}
void computeMatrix(const int N) {
// Initial prints
printf("------------------------------------------------------------------------\n\n");
printf("%dx%d matrix multiplication.\n\n", N, N);
// Initialize Host variables
float *C_A, *C_B, *C_C;
size_t size = N * N * sizeof(float);
FILE *fp;
// Initialize space
C_A = (float*)malloc(size);
C_B = (float*)malloc(size);
C_C = (float*)malloc(size);
fp=fopen("machineProblem3.csv","a");
// Set with random data
initialData(C_A, N);
initialData(C_B, N);
memset(C_C, 0.0, size);
// Serial Test CPU
auto cStart = std::chrono::high_resolution_clock::now();
MatrixMulCPU(C_A, C_B, C_C, N);
auto cEnd = std::chrono::high_resolution_clock::now();
auto timeElapse = (std::chrono::duration_cast<std::chrono::microseconds>(cEnd - cStart).count())/1000.0;
printf("The CPU took %f to perform the computation.\n\n", timeElapse);
fprintf(fp,"%d,CPU,0,%f\n",N,timeElapse);
// Test Complete parallel Computation
int blockSizes [] = {0, 2, 4, 10, 20, 25};
float timeDuration;
for (int i = 0; i < 6; i++){
timeDuration = GPUtest(C_A, C_B, C_C, blockSizes[i], N);
printf("The GPU took %f to perform the computation with block size %d.\n", timeDuration, blockSizes[i]);
fprintf(fp,"%d,GPU,%d,%f\n",N,blockSizes[i],timeDuration);
}
// Free all the memory
free(C_A);
free(C_B);
free(C_C);
fclose(fp);
cudaDeviceReset();
}
void transferTimes(const int N) {
printf("------------------------------------------------------------------------\n\n");
printf("Currently transfering between %dx%d matrixs\n", N, N);
FILE *fp;
fp = fopen("machineProblem3_transfer.csv", "a");
// Initialize matrices
float* C_A, *C_B, *G_A, *G_B;;
size_t size = N * N * sizeof(float);
float time = 0;
cudaEvent_t start, end;
C_A = (float*)malloc(size);
C_B = (float*)malloc(size);
cudaMalloc((void**)&G_A, size);
cudaMalloc((void**)&G_B, size);
cudaEventCreate(&start);
cudaEventCreate(&end);
// Populate input matrices
initialData(C_A, N);
initialData(C_B, N);
cudaEventRecord(start);
cudaEventSynchronize(start);
cudaMemcpy(G_A, C_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(G_B, C_B, size, cudaMemcpyHostToDevice);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
printf("Transfered %dx%d matrix from CPU to GPU in %fms\n", N, N, time);
fprintf(fp, "%d,0,%f\n", N, time);
cudaEventRecord(start);
cudaEventSynchronize(start);
cudaMemcpy(C_A, G_A, size, cudaMemcpyDeviceToHost);
cudaMemcpy(C_B, G_B, size, cudaMemcpyDeviceToHost);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
printf("Transfered %dx%d matrix from GPU to CPU in %fms\n", N, N, time);
fprintf(fp, "%d,1,%f\n", N, time);
cudaFree(G_A);
cudaFree(G_B);
free(C_A);
free(C_B);
fclose(fp);
}
int main(){
FILE *fp1, *fp2;
fp1=fopen("machineProblem3.csv","w");
fp2=fopen("machineProblem3_transfer.csv","w");
fprintf(fp1,"matrixSize,processor,blockSize,time\n");
fprintf(fp2,"matrixSize,case,time\n");
fclose(fp1);
fclose(fp2);
int matrixWidths [] = {100, 200, 500, 1000, 1500, 5000};
for (int i = 0; i < 6; i++){
computeMatrix(matrixWidths[i]);
transferTimes(matrixWidths[i]);
}
printf("------------------------------------------------------------------------\n\n");
return 0;
}
|
21,648 | #include <iostream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <fstream>
#include <math.h>
#include <unistd.h>
#include <stdlib.h>
#include <string>
#include <stdio.h>
#include <cmath>
#include<sys/stat.h>
#include<ctime>
#include <cuda_runtime.h>
#include<thrust/reduce.h>
#include<cuda_runtime.h>
#include<thrust/sort.h>
#include<thrust/device_ptr.h>
#include<thrust/device_vector.h>
#include<thrust/host_vector.h>
#include<thrust/copy.h>
#include<thrust/execution_policy.h>
#include<thrust/scan.h>
using namespace std;
#define thrustSortBlockSize 4000000000
#define bucketNum 10
struct edge{
int src;
int dst;
};
struct cmpStruc{
__device__ bool operator () (const edge &a, const edge &b){
return (a.src < b.src) || (a.src == b.src && a.dst < b.dst) ;
}
}cmp;
class edgeVector{
public:
unsigned int capcity;
unsigned int esize;
edge *Edges;
edgeVector(){esize = 0; capcity = 0;}
void init(unsigned int s) { Edges = new edge [s]; capcity = s; return ;}
void addEdge(edge * E){
if(esize >= capcity) {
capcity *= 2;
edge* tmpEdges = new edge [capcity];
memcpy(tmpEdges,Edges,sizeof(edge)*esize);
delete [] Edges;
Edges = tmpEdges;
}
memcpy(Edges+esize,E,sizeof(edge));
esize ++;
}
void clear() {delete [] Edges; return ;}
};
unsigned int *edgeOffset;
int *edgeRow;
int *adjLength;
edge *Edges;
clock_t start_, end_;
bool preProcess(const char *fileName, unsigned int &_edgeNum, unsigned &_nodeNum)
{
//get file size
ifstream fin1(fileName,ios::in|ios::binary);
fin1.seekg(0,ios::end);
streampos Size = fin1.tellg();
fin1.close();
long int size = Size;
cout << "the size of input file is " << size << " Byte. " << endl;
unsigned int edgeNum = size/(sizeof(int)*2);
Edges = new edge [edgeNum];
//read data
ifstream fin(fileName, std::ios::binary);
if (fin.bad()) {
cout << "File not fould!" << endl;
return false;
}
cout << "start read data... ..." << endl;
fin.read((char *)Edges,sizeof(edge)*edgeNum);
fin.close();
cout << "end read data" << endl;
//pre work
//fine node number
int divideNum = 100;
unsigned int *maxNodeIDs = new unsigned int [divideNum];
memset(maxNodeIDs,0,sizeof(unsigned int)*divideNum);
#pragma omp parallel for
for (int d = 0; d < divideNum; d++) {
unsigned int step = edgeNum/divideNum;
unsigned int s = d*step;
unsigned int e = (d+1)*step;
if (d == divideNum - 1)
e = edgeNum;
for(unsigned int i = s; i < e; i ++)
{
if (Edges[i].src > maxNodeIDs[d])
maxNodeIDs[d] = Edges[i].src;
if (Edges[i].dst > maxNodeIDs[d])
maxNodeIDs[d] = Edges[i].dst;
}
}
unsigned int maxNodeID = maxNodeIDs[0];
for (int i = 1; i < divideNum; i ++)
if (maxNodeIDs[i] > maxNodeID)
maxNodeID = maxNodeIDs[i];
cout << "get max nodeid" << endl;
unsigned nodeNum = maxNodeID + 1;
delete [] maxNodeIDs;
//cal degrees
int * degreeRecord = new int[nodeNum];
memset(degreeRecord,0,sizeof(int)*nodeNum);
//#############################################
for (unsigned int i = 0; i < edgeNum; i++)
{
degreeRecord[Edges[i].src]++;
degreeRecord[Edges[i].dst]++;
}
#pragma omp parallel for
for (unsigned int i = 0; i < edgeNum; i ++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (degreeRecord[src] > degreeRecord[dst] || (degreeRecord[src] == degreeRecord[dst] && src < dst)) {
Edges[i].src = dst;
Edges[i].dst = src;
}
}
int * toBeMini = new int[nodeNum];
int wipedEdges = 0;
memset(toBeMini,0,sizeof(int)*nodeNum);
int totalMinied = 0;
for (unsigned int i = 0; i < nodeNum; i ++) {
if (degreeRecord[i] <= 1) {
totalMinied ++;
}
toBeMini[i] = totalMinied;
}
#pragma omp parallen for
for (unsigned int i = 0; i < edgeNum; i++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (degreeRecord[src] <= 1) {
Edges[i].src = -1;
wipedEdges ++;
continue;
}
if (degreeRecord[dst] <= 1) {
Edges[i].dst = -1;
wipedEdges ++;
continue;
}
if (src > 0) {
Edges[i].src = src - toBeMini[src-1];
}
if (dst > 0)
Edges[i].dst = dst - toBeMini[dst-1];
}
nodeNum = nodeNum - totalMinied;
delete [] toBeMini;
delete [] degreeRecord;
cout << "end rearrange dst and src" << endl;
//######################################
/*#pragma omp parallel for
for (unsigned int i = 0; i < edgeNum; i++) {
unsigned int src = Edges[i].src;
unsigned int dst = Edges[i].dst;
if (src < dst) {
Edges[i].src = dst;
Edges[i].dst = src;
}
}*/
//#########################################
//sort edges
//************sort edges && get nodeNum********
edgeVector * edgeBucket = new edgeVector [bucketNum];
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].init(edgeNum/bucketNum);
unsigned bucketStep = (nodeNum + bucketNum - 1)/bucketNum;
for (int i = 0; i < edgeNum; i ++)
{
if (Edges[i].src == -1)
continue;
int bucketID = Edges[i].src/bucketStep;
edgeBucket[bucketID].addEdge(Edges+i);
}
cout << "end pust edges in bucket" << endl;
unsigned int *bucketEdgeOffset = new unsigned int [bucketNum];
bucketEdgeOffset[0] = 0;
for (int i = 0; i < bucketNum-1; i ++) {
unsigned int bucketSize = edgeBucket[i].esize;
if (bucketSize > thrustSortBlockSize/sizeof(edge)) {
cout << "bucket " << i << "size is " << bucketSize << ", it's too large!" << endl;
return false;
}
bucketEdgeOffset[i+1] = bucketEdgeOffset[i] + bucketSize;
}
for (int i = 0; i < bucketNum; i++) {
thrust::device_vector<edge> D (edgeBucket[i].Edges, edgeBucket[i].Edges+edgeBucket[i].esize);
thrust::sort(D.begin(),D.begin()+edgeBucket[i].esize,cmp);
thrust::copy(D.begin(),D.begin()+edgeBucket[i].esize,edgeBucket[i].Edges);
}
cout << "end sort edges in GPU " << endl;
for(int i = 0; i < bucketNum; i ++) {
memcpy(Edges+bucketEdgeOffset[i],edgeBucket[i].Edges,sizeof(edge)*edgeBucket[i].esize);
}
cout << "end copy result to Edges" << endl;
delete [] bucketEdgeOffset;
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].clear();
delete [] edgeBucket;
//************end sort edges && get nodeNum********
edgeNum = edgeNum - wipedEdges;//************************************************
//unsigned int nodeNum = Edges[edgeNum-1].src + 1;
edgeOffset = new unsigned int [nodeNum+2];
edgeOffset[0] = 0;
edgeRow = new int [edgeNum+1];
adjLength = new int[nodeNum+1];
memset(adjLength,0,sizeof(int)*(nodeNum+1));
unsigned int nodePos = 0;
unsigned int edgePos = 0;
edge * edgePtr;
int formerSrc = -1,formerDst = -1;
start_ = clock();
// for (int i = 0; i < edgeNum; i++)
// printf("%d %d\n",Edges[i].src,Edges[i].dst);
for (unsigned int i = 0; i < edgeNum; i++)
{
edgePtr = Edges + i;
if (edgePtr->src == -1 || edgePtr->dst == -1)
continue;
if (edgePtr->src == edgePtr->dst) {
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
continue;
}
if ((i > 0) && (edgePtr->src == formerSrc)) {
//TODO find a more efficienty way
if(edgePtr->dst == formerDst){
continue;
}
edgeRow[edgePos++] = edgePtr->dst;
formerDst = edgePtr->dst;
continue;
}
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
edgeRow[edgePos++] = edgePtr->dst;
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
// cout << " end an edge in a loop " << endl;
}
for (unsigned i = nodePos + 1; i < nodeNum; i ++) {
edgeOffset[i] = edgePos;
adjLength[i-1] = edgeOffset[i] - edgeOffset[i-1];
}
end_ = clock();
cout << "merge and make csr use " << (double)1000*(end_-start_)/CLOCKS_PER_SEC << " ms." << endl;
edgeOffset[nodeNum] = edgePos;
edgeOffset[nodeNum+1] = edgePos + 1;
adjLength[nodeNum-1] = edgeOffset[nodeNum] - edgeOffset[nodeNum-1];
adjLength[nodeNum] = 1024;
edgeRow[edgePos] = nodeNum;
cout << "csr built, edgeNum is "<< edgePos<< ", the node num is " << nodeNum << ", origin egde num is " << edgeNum << endl;
//TODO remove empty node in edgeOffset
int maxDegreeStored = 0;
for (int i = 0; i < nodeNum; i ++)
if (adjLength[i] > maxDegreeStored)
maxDegreeStored = adjLength[i];
cout << "The max stored degree is " << maxDegreeStored << endl;
_edgeNum = edgeOffset[nodeNum];
_nodeNum = nodeNum;
delete [] Edges;
return true;
}
|
21,649 | #include "people_allocation.cuh"
#include <assert.h>
#include <algorithm>
#define NUM_CELLS 1024 // 1回に扱うセルの数
__device__
float dot(float* preference, float* feature, int numComponents) {
float ret = 0.0;
for (int i = 0; i < numComponents; ++i) {
ret += preference[i] * feature[i];
}
return ret;
}
/**
* GPUカーネル関数
*/
__global__
void cudaComputeScoreKernel(int numComponents, int numUsers, float* preferences, int numCells, float* features, float* results) {
// ユニークなIDを取得
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// 当該ユーザのpreferenceベクトルを取得
float preference[10];
for (int i = 0; i < numComponents; ++i) {
preference[i] = preferences[idx * numComponents + i];
}
__shared__ float sFeatures[NUM_CELLS];
int numIterations = ceil((float)numCells / NUM_CELLS);
for (int iter = 0; iter < numIterations; ++iter) {
// featuresをshared memoryへコピーする
for (int i = idx; i < NUM_CELLS; i += numUsers) {
for (int k = 0; k < numComponents; ++k) {
sFeatures[i * numComponents + k] = features[iter * NUM_CELLS + i * numComponents + k];
}
}
__syncthreads();
// 各セルに対してスコアを計算する
for (int i = 0; i < NUM_CELLS; ++i) {
results[iter * NUM_CELLS + idx * numCells + i] = dot(preference, &sFeatures[i * numComponents], numComponents);
}
}
}
/**
* 各ユーザのpreferenceベクトル、セルのpropertyベクトルに基づき、
* 各ユーザによる各セルのスコアを計算する。
*
* @param preferences 各ユーザのpreferenceベクトル
* @param features 各セルのpropertyベクトル
*/
void allocate_people(vector<vector<float> > preferences, vector<vector<float> > features, float** results) {
assert(preferences[0].size() == features[0].size());
int numUsers = preferences.size();
int numCells = features.size();
int numComponents = preferences[0].size();
// preferenceベクトル用に、デバイスメモリを確保
float* devPreferences;
cudaMalloc((void**)&devPreferences, sizeof(float) * numComponents * numUsers);
// propertyベクトル用に、デバイスメモリを確保
float* devFeatures;
cudaMalloc((void**)&devFeatures, sizeof(float) * numComponents * numCells);
// 結果格納用に、デバイスメモリを確保
float* devResults;
cudaMalloc((void**)&devResults, sizeof(float) * numCells * numUsers);
// デバイスメモリへ、preferenceベクトルを転送
vector<float> arrayPreferences(numUsers * numComponents);
for (int i = 0; i < numUsers; ++i) {
copy(preferences[i].begin(), preferences[i].end(), arrayPreferences.begin() + i * numComponents);
}
cudaMemcpy(devPreferences, arrayPreferences.data(), sizeof(float) * numComponents * numUsers, cudaMemcpyHostToDevice);
// デバイスメモリへ、propertyベクトルを転送
vector<float> arrayFeatures(numCells * numComponents);
for (int i = 0; i < numCells; ++i) {
copy(features[i].begin(), features[i].end(), arrayFeatures.begin() + i * numComponents);
}
cudaMemcpy(devFeatures, arrayFeatures.data(), sizeof(float) * numComponents * numCells, cudaMemcpyHostToDevice);
// GPU側の関数を呼び出す
cudaComputeScoreKernel<<<1, numUsers>>>(numComponents, numUsers, devPreferences, numCells, devFeatures, devResults);
// CPU側のメモリを確保
*results = new float[numCells * numUsers];
// 結果をCPU側のバッファへ転送する
cudaMemcpy(*results, devResults, sizeof(float) * numCells * numUsers, cudaMemcpyDeviceToHost);
// 結果を表示
for (int i = 0; i < numUsers; ++i) {
for (int j = 0; j < numCells; ++j) {
printf("%lf, ", (*results)[i * numCells + j]);
}
printf("\n");
}
// デバイスメモリを開放する
cudaFree(devPreferences);
cudaFree(devFeatures);
cudaFree(devResults);
}
|
21,650 | // 系统头文件
#include <stdlib.h>
#include <stdio.h>
// cuda头文件
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#define N 10
#define GRID_SIZE 32
#define BLOCK_SIZE 16
__global__ void matrixMultiplication(float *a, float *b, float *c, int width) {
int tx = threadIdx.x;
int ty = threadIdx.y;
float pvalue = 0;
for (int k = 0; k < width; ++k) {
float melement = a[ty * width + k];
float nelement = b[k * width + tx];
pvalue += melement * nelement;
}
c[ty * width + tx] = pvalue;
}
// 初始化向量为随机数值
void randomInit(float* data, unsigned int size) {
srand(1);
for (unsigned int i = 0; i < size; i++) {
data[i] = rand() / (float) 100000000;
}
}
// 主机端主函数
int main(void) {
float *aH, *bH, *cH, *aD, *bD, *cD;
int mem_size = N * N * sizeof(float);
// 在主机内存申请 A,B,C 向量的空间
aH = (float*) malloc(mem_size);
bH = (float*) malloc(mem_size);
cH = (float*) malloc(mem_size);
// 在 GPU 设备申请 A,B,C 向量的空间
cudaMalloc((void**) &aD, mem_size);
cudaMalloc((void**) &bD, mem_size);
cudaMalloc((void**) &cD, mem_size);
// 初始化主机内存的 A,B 向量
randomInit(aH, N * N);
randomInit(bH, N * N);
// 拷贝主机内存的 A,B 的内容到 GPU 设备的 A,B
cudaMemcpy(aD, aH, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(bD, bH, mem_size, cudaMemcpyHostToDevice);
// GPU 内核函数的维度参数
dim3 dimGrid(GRID_SIZE, GRID_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// 执行 GPU 内核函数
matrixMultiplication <<< dimGrid, dimBlock >>> (aD, bD, cD, N);
// 从 GPU 设备复制结果向量 C 到主机内存的 C
cudaMemcpy(cH, cD, mem_size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%.2f\t", cH[i * N + j]);
}
printf("\n");
}
free(aH);
free(bH);
free(cH);
cudaFree(aD);
cudaFree(bD);
cudaFree(cD);
}
|
21,651 | #include<iostream>
using namespace std;
__global__ void hello() {
printf("Hello world from device\n");
}
int main() {
hello<<<1, 1>>>();
cout << "Hello world from host" << endl;
cudaDeviceSynchronize();
return 0;
}
|
21,652 | #include <cuda.h>
#include <math.h>
__global__ void extf( double *out, double *img1, double *img2, double *grad, int rows, int columns )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// if (i >= columns || j >= rows)
// return;
out[i + j * columns] = 2 * (img1[i + j * columns] - img2[i + j * columns]) * grad[i + j * columns];
}
__global__ void intf(
double *out,
double *f,
int rows,
int columns)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < 1 || j < 1 || i >= (columns - 1) || j >= (rows - 1))
return;
out[i * rows + j] = -4 * f[i * rows + j] + f[(i - 1) * rows + j] + f[(i + 1) * rows +j] + f[i * rows + (j - 1)] + f[i * rows + (j+ 1)];
}
__global__ void add(
double *out,
double *in1,
double *in2,
int rows,
int columns)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < 2 || j < 2 || i >= (columns - 2) || j >= (rows - 2))
return;
out[i * rows + j] = in1[i * rows + j] + in2[i * rows + j];
}
__global__ void d_f(
double *out,
double *intf,
double *extf,
double rho,
double lambda,
int rows,
int columns)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < 1 || j < 1 || i >= (columns - 1) || j >= (rows - 1))
return;
out[i * rows + j] = rho * (extf[i * rows + j] + lambda * intf[i * rows + j]);
} |
21,653 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
//host
extern float *Hy, coe_Hy, dt, dz;
extern int size_space, size_Hy;
const float PI = 3.141592653589793f;
const float mu = (4 * PI)*1e-7f;
//device
extern float *dev_Hy, *dev_Ex;
void Hy_init_malloc(int );
void Hy_init_assignValue(int );
void Hy_checkout();
void Hy_transfer_host_device();
void Hy_transfer_device_host();
void Hy_init(int space_size)
{
size_Hy = space_size;
Hy_init_malloc(size_Hy);
Hy_init_assignValue(size_Hy);
}
void Hy_init_malloc(int size)
{
//host
Hy = (float *)malloc(size * sizeof(float));
//device
cudaMalloc(&dev_Hy, size * sizeof(float));
}
void Hy_init_assignValue(int size)
{
int i;
for ( i = 0; i < size; i++){
Hy[i] = 0.f;
}
coe_Hy = dt / (mu * dz);
}
void Hy_checkout(int size)
{
cout << "Hy: size = " << size << endl;
cout << "coe_Hy = " << coe_Hy;
cout << "Hy: ";
for (int i = 0; i < size; i++)
{
cout << Hy[i] << "\t";
}
cout << endl;
}
void Hy_transfer_host_device(int size_Hy)
{
cudaMemcpy(dev_Hy, Hy, size_Hy * sizeof(float), cudaMemcpyHostToDevice);
}
void Hy_transfer_device_host(int size_Hy)
{
cudaMemcpy(Hy, dev_Hy, size_Hy * sizeof(float), cudaMemcpyDeviceToHost);
}
__global__ void Hy_cmp_kernel(float* dev_Hy, float * dev_Ex, float coe_Hy, int size_space)
{
int i;
for (i = 0; i < size_space; i++){
dev_Hy[i] = dev_Hy[i] - (coe_Hy)*(dev_Ex[i + 1] - dev_Ex[i]);
//test
//dev_Hy[i] = i*10.0;
}
} |
21,654 | #include "includes.h"
__global__ void tanh_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float tmp = vector[idx]; output[idx] = tmp / (1.0 + (tmp < 0.0 ? -tmp : tmp));
}
} |
21,655 | #include "includes.h"
__global__ void cheby_calc_u( const int x_inner, const int y_inner, const int halo_depth, const double* p, double* u)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
u[index] += p[index];
} |
21,656 | //nvcc -arch=sm_30 -lcufft fft_batched.cu
#include <cuda.h>
#include <cufft.h>
#include <stdio.h>
#include <math.h>
#define DATASIZE 8
#define BATCH 3
#define GRID_DIMENSION 3
#define BLOCK_DIMENSION 3
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void conjugate(long int nelem, cufftComplex *conj);
/********/
/* MAIN */
/********/
int main ()
{
// --- Host side input data allocation and initialization
cufftReal *hostInputData = (cufftReal*)malloc(DATASIZE*BATCH*sizeof(cufftReal));
int grid_size = GRID_DIMENSION;
int block_size = BLOCK_DIMENSION;
dim3 DimGrid(grid_size, grid_size, grid_size);
dim3 DimBlock(block_size, block_size, block_size);
for (int i=0; i<BATCH; i++)
for (int j=0; j<DATASIZE; j++){
hostInputData[i*DATASIZE + j] = (cufftReal)((i + 1) + j);
printf("hostInputData[%d]=%f\n",i*DATASIZE + j,hostInputData[i*DATASIZE + j]);
}
// --- Device side input data allocation and initialization
cufftReal *deviceInputData;
gpuErrchk(cudaMalloc((void**)&deviceInputData, DATASIZE * BATCH * sizeof(cufftReal)));
cudaMemcpy(deviceInputData, hostInputData, DATASIZE * BATCH * sizeof(cufftReal), cudaMemcpyHostToDevice);
// --- Host side output data allocation
cufftComplex *hostOutputData = (cufftComplex*)malloc((DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex));
// --- Device side output data allocation
cufftComplex *deviceOutputData;
cufftComplex *fft_conj;
gpuErrchk(cudaMalloc((void**)&deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex)));
gpuErrchk(cudaMalloc((void**)&fft_conj, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex)));
// --- Batched 1D FFTs
cufftHandle handle;
int rank = 1; // --- 1D FFTs
int n[] = { DATASIZE }; // --- Size of the Fourier transform
int istride = 1, ostride = 1; // --- Distance between two successive input/output elements
int idist = DATASIZE, odist = (DATASIZE / 2 + 1); // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = BATCH; // --- Number of batched executions
printf("idist = %d\n", idist);
printf("odist = %d\n", odist);
printf("n = %d\n", n[0]);
cufftPlanMany(&handle, rank, n,
inembed, istride, idist,
onembed, ostride, odist, CUFFT_R2C, batch);
//cufftPlan1d(&handle, DATASIZE, CUFFT_R2C, BATCH);
cufftExecR2C(handle, deviceInputData, deviceOutputData);
gpuErrchk(cudaMemcpy(fft_conj, deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex), cudaMemcpyDeviceToDevice));
conjugate <<< DimGrid, DimBlock >>> ((DATASIZE / 2 + 1) * BATCH, fft_conj );
// --- Device->Host copy of the results
gpuErrchk(cudaMemcpy(hostOutputData, deviceOutputData, (DATASIZE / 2 + 1) * BATCH * sizeof(cufftComplex), cudaMemcpyDeviceToHost));
for (int i=0; i<BATCH; i++)
for (int j=0; j<(DATASIZE / 2 + 1); j++)
printf("Batch = %i j= %i real %f imag %f\n", i, j, hostOutputData[i*(DATASIZE / 2 + 1) + j].x, hostOutputData[i*(DATASIZE / 2 + 1) + j].y);
cufftDestroy(handle);
gpuErrchk(cudaFree(deviceOutputData));
gpuErrchk(cudaFree(deviceInputData));
gpuErrchk(cudaFree(fft_conj));
cudaDeviceSynchronize();
cudaDeviceReset();
return EXIT_SUCCESS;
}
__global__ void conjugate(long int nelem, cufftComplex *conj)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
int thx = threadIdx.x;
int thy = threadIdx.y;
int thz = threadIdx.z;
int NumThread = blockDim.x*blockDim.y*blockDim.z;
int idThread = (thx + thy*blockDim.x) + thz*(blockDim.x*blockDim.y);
int BlockId = (bx + by*gridDim.x) + bz*(gridDim.x*gridDim.y);
int uniqueid = idThread + NumThread*BlockId;
if (uniqueid < nelem){
printf("Unique ID = %d - conj = %f\n", uniqueid, conj[uniqueid].y*-1);
}
//__syncthreads();
}
|
21,657 | #include "includes.h"
__global__ void addInc(unsigned int* deviceInput, unsigned int* deviceOutput, int eleCnt, unsigned int* deviceInc)
{
/*
__shared__ int inc;
if (threadIdx.x == 0)
{
inc = deviceInc[blockIdx.x];
}
__syncthreads();
*/
int inc = deviceInc[blockIdx.x];
int cntInB = blockDim.x * 2;
int idxInG = blockIdx.x * cntInB + threadIdx.x;
if (idxInG < eleCnt)
{
deviceOutput[idxInG] = deviceInput[idxInG] + inc;
}
if (idxInG + blockDim.x < eleCnt)
{
deviceOutput[idxInG + blockDim.x] = deviceInput[idxInG + blockDim.x] + inc;
}
} |
21,658 | #include "includes.h"
/**
* Programma che simula il comportamento del gpdt per
* la risoluzione di un kernel di una serie di
* valori di dimensione variabile utilizzando la
* tecnologia cuda.
* compilare con:
* nvcc -o simil_gpdt_si_cuda simil_gpdt_si_cuda.cu
* lanciare con:
* ./simil_gpdt_si_cuda [numero vettori] [numero componenti] [numero di righe da calcolare] [tipo di kernel] [grado(int)/sigma(float)]
**/
using namespace std;
/**
* Funzione che riempie i vettori con numeri
* casuali compresi tra 0 e 99.
**/
__global__ void Kernel_norme(float *Vd, float *Nd, int *Vp, int *Vnp, int N, int C, int nr_max_val)
{
long int x = threadIdx.x + blockIdx.x * blockDim.x;
int pos;
if(x < N)
{
float norma = 0;
int Nr_val = Vnp[x];
for(int i = 0; i < Nr_val; i++)
{
pos = Vp[x * nr_max_val + i];
norma = norma + (Vd[x * C + pos] * Vd[x * C + pos]);
}
Nd[x] = norma;
}
} |
21,659 | // kernel to convert from OpenCV channel representation to channel-first
// see: https://docs.opencv.org/2.4/doc/tutorials/core/how_to_scan_images/how_to_scan_images.html#how-the-image-matrix-is-stored-in-the-memory
constexpr size_t BLOCK_SIZE = 1024U;
#include <cuda_runtime.h>
#include <math_constants.h>
#include <array>
__global__ void nhwc2nchwKernel(const unsigned char* __restrict__ source, float* __restrict__ dest,
int channelSize, int channelsNum, int rowElems, int rowSize)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offset = idx / channelsNum;
int channel = idx % channelsNum;
// what would the row be if we didn't have any padding
int row = idx / rowElems;
int col = idx % rowElems;
// actual element - skip padding
int sourceIdx = row * rowSize + col;
dest[channelSize * channel + offset] = (float) source[sourceIdx] / 255.f;
}
// we expect all memory to already reside on device so no need to allocate anything
void nhwc2nchw(const unsigned char * source, float * dest, int channelSize,
int channelsNum, int rowElems, int rowSize, cudaStream_t Stream)
{
const int nBlocks = (channelSize * channelsNum) / BLOCK_SIZE;
nhwc2nchwKernel<<<nBlocks, BLOCK_SIZE, 0, Stream>>>(
source, dest, channelSize, channelsNum, rowElems, rowSize);
}
template<typename scalar_t>
__global__ void normalizeChannelKernel(scalar_t* __restrict__ source,
size_t channel_stride, scalar_t mean, scalar_t std)
{
const int offset = threadIdx.x + blockIdx.x * blockDim.x;
if (offset < channel_stride) { source[offset] = (source[offset] - mean) / std; }
}
template<typename scalar_t, size_t n_ch>
void normalize_image_chw(scalar_t* image, size_t ch_stride, const std::array<scalar_t, n_ch> &mean,
const std::array<scalar_t, n_ch> &std, cudaStream_t Stream)
{
const int nBlocks = ch_stride / BLOCK_SIZE;
for (size_t ch=0; ch < n_ch; ++ch)
{
normalizeChannelKernel<scalar_t><<<nBlocks, BLOCK_SIZE, 0, Stream>>>(
&image[ch*ch_stride], ch_stride, mean[ch], std[ch]);
}
}
template void normalize_image_chw<float, 3ul>(float*, size_t, std::array<float, 3ul> const&,
std::array<float, 3ul> const&, cudaStream_t);
template<typename scalar_t, typename intergral_t>
__global__ void argmax_chw_Kernel(const scalar_t* __restrict__ source,
intergral_t* __restrict__ output, const size_t channel_stride, const size_t n_classes)
{
const int offset = threadIdx.x + blockIdx.x * blockDim.x;
if (offset < channel_stride)
{
scalar_t best_score = source[offset];
intergral_t best_cls = 0;
for (size_t cls=1; cls<n_classes; ++cls)
{
const scalar_t class_score = source[offset + cls*channel_stride];
if (class_score > best_score)
{
best_score = class_score;
best_cls = cls;
}
}
output[offset] = best_cls;
}
}
template<typename scalar_t, typename intergral_t>
void argmax_chw(const scalar_t* input, intergral_t* output,
size_t n_classes, size_t ch_stride, cudaStream_t Stream)
{
const int nBlocks = ch_stride / BLOCK_SIZE;
argmax_chw_Kernel<scalar_t, intergral_t><<<nBlocks, BLOCK_SIZE, 0, Stream>>>(
input, output, ch_stride, n_classes);
}
template void argmax_chw<float, unsigned char>(
const float*, unsigned char*, size_t, size_t, cudaStream_t);
template<typename intergral_t, size_t n_classes>
__global__ void seg_image_Kernel(const intergral_t* __restrict__ argmax_image,
u_char* __restrict__ rgb_image, const u_char* __restrict__ colour_map, size_t image_size)
{
const int offset = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ u_char smem_colour_map[n_classes * 3U];
if (threadIdx.x < n_classes * 3U)
{
smem_colour_map[threadIdx.x] = colour_map[threadIdx.x];
}
__syncthreads();
if (offset < image_size)
{
const intergral_t class_id = argmax_image[offset];
for (size_t ch=0U; ch<3U; ++ch)
{
rgb_image[3U * offset + ch] = smem_colour_map[3U * class_id + ch];
}
}
}
template<typename intergral_t, size_t n_classes>
void seg_image(const intergral_t* argmax_image, u_char* rgb_image, const u_char* colour_map,
size_t image_size, cudaStream_t Stream)
{
const int nBlocks = image_size / BLOCK_SIZE;
seg_image_Kernel<intergral_t, n_classes><<<nBlocks, BLOCK_SIZE, 0, Stream>>>(
argmax_image, rgb_image, colour_map, image_size);
}
template void seg_image<u_char, 19>(
const u_char*, u_char*, const u_char*, size_t, cudaStream_t);
template<typename scalar_t>
__global__ void flow_image_Kernel(const scalar_t* __restrict__ flow_image,
u_char* __restrict__ rgb_image, size_t image_size)
{
const int offset = threadIdx.x + blockIdx.x * blockDim.x;
constexpr float scale_factor = 8.f;
constexpr float max_flow = 256.f;
if (offset < image_size)
{
const scalar_t flow_x = flow_image[offset];
const scalar_t flow_y = flow_image[offset + image_size];
const scalar_t mag = sqrt(pow(flow_x, 2.f) + pow(flow_y, 2.f));
const scalar_t h = atan2(flow_y, flow_x) + CUDART_PI_F;
const scalar_t s = min(max(mag * scale_factor / max_flow, 0.f), 1.f);
const scalar_t v = min(max(scale_factor - s, 0.f), 1.f);
const scalar_t C = v * s;
const scalar_t X = C * (1.f - abs(fmodf(h / (CUDART_PI_F/3.f), 2.f) - 1.f));
const scalar_t m = v - C;
scalar_t r = 0;
scalar_t g = 0;
scalar_t b = 0;
if(h >= 0.f && h < CUDART_PI_F/3.f){
r = C,g = X,b = 0;
}
else if(h >= CUDART_PI_F/3.f && h < 2.f*CUDART_PI_F/3.f) {
r = X,g = C,b = 0;
}
else if(h >= 2.f*CUDART_PI_F/3.f && h < CUDART_PI_F) {
r = 0,g = C,b = X;
}
else if(h >= CUDART_PI_F && h < 4.f*CUDART_PI_F/3.f) {
r = 0,g = X,b = C;
}
else if(h >= 4.f*CUDART_PI_F/3.f && h < 5.f*CUDART_PI_F/3.f) {
r = X,g = 0,b = C;
}
else {
r = C,g = 0,b = X;
}
rgb_image[3U * offset] = static_cast<u_char>((r+m)*255.f);
rgb_image[3U * offset + 1] = static_cast<u_char>((g+m)*255.f);
rgb_image[3U * offset + 2] = static_cast<u_char>((b+m)*255.f);
}
}
template<typename scalar_t>
void flow_image(const scalar_t* flow_image, u_char* rgb_image,
size_t image_size, cudaStream_t Stream)
{
const int nBlocks = image_size / BLOCK_SIZE;
flow_image_Kernel<<<nBlocks, BLOCK_SIZE, 0, Stream>>>(
flow_image, rgb_image, image_size);
}
template void flow_image<float>(const float*, u_char*, size_t, cudaStream_t); |
21,660 | __global__ void exampleDevice(float * d){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d[idx] = idx;
}
extern "C" void exampleHost(float * h, int blockDim, int threadDim){
float * d;
cudaMalloc((void**)&d, blockDim * threadDim*sizeof(float));
exampleDevice<<<blockDim, threadDim>>>(d);
cudaMemcpy(h, d, blockDim*threadDim*sizeof(float),cudaMemcpyDeviceToHost);
}
|
21,661 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define DEFAULT_ROW 16384
#define DEFAULT_COL 16384
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
float* ia = A, *ib =B, *ic =C;
for (int iy =0; iy<ny; iy++){
for (int ix =0; ix<nx; ix++){
ic[ix] = ia[ix] + ib[ix];
//if (iy*nx + ix == 67133440) printf("the addition in host: %.6f + %.6f = %.6f\n",ia[ix],ib[ix],ic[ix]);
}
ia += nx;
ib += nx;
ic += nx;
}
}
//host side matrix comparison
int h_compareResult(float *h_C, float *d_C, int noElems){
float *host_c = h_C,*device_c = d_C;
for (int i =0; i<noElems; i++){
if (*(host_c) != *(device_c)){
#ifdef DEBUG
printf("the i = %d\n", i);
printf("the data of CPU is %.6f\n", *(host_c));
printf("the data of GPU is %.6f\n", *(device_c));
#endif
return 1;
}
host_c++;
device_c++;
}
return 0;
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
int ix = threadIdx.x + blockIdx.x*blockDim.x ;
int iy = threadIdx.y + blockIdx.y*blockDim.y ;
int idx = iy*nx + ix ;
if( (ix<nx) && (iy<ny) )
C[idx] = A[idx] + B[idx] ;
//if (idx == 0) printf("the addition in device: %.6f + %.6f = %.6f\n",A[idx],B[idx],C[idx]);
}
void initData(float* add, int noElems){
int i;
float a = 5.0;
for (i=0; i< noElems; i++){
*(add++) = ((float)rand()/(float)(RAND_MAX)) * a;
}
}
int main(int argc, char* argv[]){
if(argc != 3){
printf("Error: wrong number of argument\n");
exit(0);
}
int nx = atoi(argv[1]);
int ny = atoi(argv[2]);
int noElems = nx * ny;
int bytes = noElems * sizeof(float);
#ifdef DEBUG
printf("the input row # is %d\n",nx);
printf("the input col # is %d\n",ny);
printf("the noElems is %d\n",noElems);
printf("the bytes is %d\n",bytes);
#endif
// padding
// alloc memeory host-side
float *h_A;
float *h_B;
float *h_dC;// = (float*) malloc(bytes); //gpu result
float *h_hC = (float*) malloc(bytes); // host result
//float *h_dC;
cudaHostAlloc((void**)&h_A, bytes, 0);
cudaHostAlloc((void**)&h_B, bytes, 0);
cudaHostAlloc((void**)&h_dC, bytes,0);
// init matrices with random data
initData(h_A, noElems);
initData(h_B, noElems);
//alloc memeory device-side
float *d_A, *d_B, *d_C;
cudaMalloc( &d_A, bytes);
cudaMalloc( &d_B, bytes);
cudaMalloc( &d_C, bytes);
// check result
h_addmat( h_A, h_B, h_hC, nx, ny ) ;
//computing minimal dimension of block size y according to the spec
int min_blocky = 1;
while ((ny + min_blocky-1)/min_blocky > 65535){
min_blocky ++;
}
int block_x, block_y = min_blocky;
if (nx < 1024){
// if input nx is smaller than 1024
block_x = nx;
while (block_x > 32 && block_x %32 !=0){
// make the block_x in multiple of 32 (warp size)
block_x --;
}
}
else{
block_x = 1024;
}
while (block_x * block_y > 1024){
// check if the total number of thread in a block exceed 1024 or not, if yes, subtract block x by 32
if (block_x -32 > 0) block_x = block_x - 32;
else block_x --;
}
//printf("the final block size is x = %d and y = %d \n",block_x, block_y);
//printf("the final grid dimension is x = %d and y = %d \n",(nx + block_x-1)/block_x, (ny + block_y-1)/block_y);
double timeStampA = getTimeStamp() ;
//transfer data to dev
cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice) ;
cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice) ;
// note that the transfers would be twice as fast if h_A and h_B
double timeStampB = getTimeStamp() ;
// invoke Kernel
dim3 block( block_x, min_blocky ) ; // you will want to configure this
dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y ) ;
#ifdef DEBUG
printf("the final block size is x = %d and y = %d \n",block.x, block.y);
printf("the final grid dimension is x = %d and y = %d \n",(nx + block.x-1)/block.x, (ny + block.y-1)/block.y);
#endif
f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ;
cudaDeviceSynchronize() ;
double timeStampC = getTimeStamp() ;
//copy data back
//printf("before copy back\n");
cudaMemcpy( h_dC, d_C, bytes, cudaMemcpyDeviceToHost ) ;
//printf("after copy back\n");
double timeStampD = getTimeStamp() ;
// free GPU resources
cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ;
//cudaDeviceReset() ;
// h_dC == h+hC???
#ifdef DEBUG
float *ptr;
ptr = h_dC;
int n = 0;
ptr = ptr + n;
printf("the data of GPU at index %d before comparison is %.6f\n", n,*(ptr));
#endif
if (h_compareResult(h_hC,h_dC,noElems) == 1){
printf("the two results don't match\n");
}
else{
printf("totoal= %.6f CPU_GPU_transfer = %.6f kernel =%.6f GPU_CPU_transfer= %.6f\n",timeStampD - timeStampA,timeStampB - timeStampA, timeStampC - timeStampB, timeStampD - timeStampC );
//printf("CPU_GPU_transfer_time = %.6f\n",timeStampB - timeStampA );
//printf("kernel_time = %.6f\n",timeStampC - timeStampB );
//printf("GPU_CPU_transfer_time = %.6f\n",timeStampD - timeStampC );
}
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_dC);
free(h_hC);
cudaDeviceReset() ;
}
|
21,662 | /*
* The Game of Life
*
* a cell is born, if it has exactly three neighbours
* a cell dies of loneliness, if it has less than two neighbours
* a cell dies of overcrowding, if it has more than three neighbours
* a cell survives to the next generation, if it does not die of loneliness
* or overcrowding
*
* In this version, a 2D array of ints is used. A 1 cell is on, a 0 cell is off.
* The game plays a number of steps (given by the input), printing to the screen each time. 'x' printed
* means on, space means off.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef unsigned char bool_t;
typedef unsigned char cell_t;
#define TILE_SIZE 8
#define KERNEL_SIZE 3
#define SHARED_MEMORY_SIZE (TILE_SIZE + KERNEL_SIZE - 1)
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
cell_t *allocate_board_flat(int flat_size, int outer_grid_size) {
cell_t *board = (cell_t *) malloc(sizeof(cell_t) * flat_size);
for (int i = 0; i < outer_grid_size; ++i) {
// Fill first row
board[i] = 0;
// Fill last row
board[(outer_grid_size - 1) * outer_grid_size + i] = 0;
// Fill left column
board[i * outer_grid_size] = 0;
// Fill right column
board[i * outer_grid_size + (outer_grid_size - 1)] = 0;
}
return board;
}
__global__ void playKernelSMPitched(const cell_t *d_board, cell_t *d_newboard, size_t pitch, int inner_size, int outer_size) {
unsigned short bx = blockIdx.x;
unsigned short by = blockIdx.y;
unsigned short tx = threadIdx.x;
unsigned short ty = threadIdx.y;
// Calculate the row and col for the output array
unsigned short row_g = by * TILE_SIZE + ty + (KERNEL_SIZE / 2);
unsigned short col_g = bx * TILE_SIZE + tx + (KERNEL_SIZE / 2);
__shared__ cell_t neighbors_ds[SHARED_MEMORY_SIZE][SHARED_MEMORY_SIZE];
unsigned short idx_inner_x = tx + (KERNEL_SIZE / 2);
unsigned short idx_inner_y = ty + (KERNEL_SIZE / 2);
unsigned short blockIndex = ty + tx * TILE_SIZE;
// Using unsigned short reduces the duration of each kernel by ~100 us (~930 us to ~830 us)
for (unsigned short incr = blockIndex; incr < SHARED_MEMORY_SIZE * SHARED_MEMORY_SIZE; incr += TILE_SIZE * TILE_SIZE) {
unsigned short ry = incr % SHARED_MEMORY_SIZE;
unsigned short rx = incr / SHARED_MEMORY_SIZE;
unsigned short gy = ry + by * TILE_SIZE;
unsigned short gx = rx + bx * TILE_SIZE;
// Required to avoid accessing out of bounds
if (gy < outer_size && gx < outer_size) {
neighbors_ds[ry][rx] = d_board[gy * pitch + gx];
}
}
// Required so we don't fill the outer padded grid
if (row_g > inner_size || col_g > inner_size) {
return;
}
// Sync threads now, no need to wait for the threads that exit
__syncthreads();
unsigned short a = 0;
for (unsigned short j = 0; j < KERNEL_SIZE; ++j) {
for (unsigned short i = 0; i < KERNEL_SIZE; ++i) {
a += neighbors_ds[j + idx_inner_y - (KERNEL_SIZE / 2)][i + idx_inner_x - (KERNEL_SIZE / 2)];
}
}
a -= neighbors_ds[idx_inner_y][idx_inner_x];
if (a == 2)
d_newboard[row_g * pitch + col_g] = neighbors_ds[idx_inner_y][idx_inner_x];
if (a == 3)
d_newboard[row_g * pitch + col_g] = 1;
if (a < 2)
d_newboard[row_g * pitch + col_g] = 0;
if (a > 3)
d_newboard[row_g * pitch + col_g] = 0;
}
/* print the life board */
void print_flat(cell_t *board, int inner_size, int outer_size) {
int i, j;
/* for each row */
for (j = 0; j < inner_size; j++) {
/* print each column position... */
for (i = 0; i < inner_size; i++)
printf("%c", board[(j + (KERNEL_SIZE / 2)) * outer_size + (i + (KERNEL_SIZE / 2))] ? 'x' : ' ');
/* followed by a carriage return */
printf("\n");
}
}
/* read a file into the life board */
void read_file_flat(FILE *f, cell_t *board, int inner_size, int outer_size) {
int i, j;
size_t len;
char *s = (char *) malloc(inner_size + 10);
for (j = 0; j < inner_size; j++) {
/* get a string */
fgets(s, inner_size + 10, f);
len = strlen(s) - 1;
/* copy the string to the life board */
for (i = 0; i < inner_size; i++) {
board[(j + (KERNEL_SIZE / 2)) * outer_size + (i + (KERNEL_SIZE / 2))] = i < len ? s[i] == 'x' : 0;
}
}
}
int main(int argc, char *argv[]) {
// Host variables
int size, flat_size, steps, i, grid_size, outer_grid_size;
FILE *f_in;
cell_t *h_prev;
bool_t writeOutput = 1, evenSteps;
size_t pitch;
// Device variables
cell_t *d_prev, *d_next;
f_in = stdin;
// Read the input file and write its content in the host array
fscanf(f_in, "%d %d", &size, &steps);
// Create a border around the grid to avoid dealing with boundary conditions
outer_grid_size = size + (2 * (KERNEL_SIZE / 2));
flat_size = outer_grid_size * outer_grid_size;
evenSteps = steps % 2 == 0;
h_prev = allocate_board_flat(flat_size, outer_grid_size);
read_file_flat(f_in, h_prev, size, outer_grid_size);
fclose(f_in);
grid_size = int(ceil((float) size / TILE_SIZE));
dim3 dimGrid(grid_size, grid_size, 1);
// In our case, a TILE_SIZE of 8 gives the best results, with 16 and 32 being slightly slower
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// Allocate device arrays
gpuErrchk(cudaMallocPitch((void **) &d_prev, &pitch, outer_grid_size * sizeof(cell_t), outer_grid_size));
gpuErrchk(cudaMallocPitch((void **) &d_next, &pitch, outer_grid_size * sizeof(cell_t), outer_grid_size));
// Copy the data from the host array to the device array
gpuErrchk(cudaMemcpy2D(d_prev, pitch,
h_prev, outer_grid_size * sizeof(cell_t),
outer_grid_size * sizeof(cell_t), outer_grid_size,
cudaMemcpyHostToDevice));
for (i = 0; i < int(ceil((float) steps / 2)); i++) {
// printf("Step: %d\n", 2 * i);
// Instead of using cudaMemcpy and a buffer or swapping pointers,
// run the same kernel with the variables inverted
playKernelSMPitched<<<dimGrid, dimBlock>>>(d_prev, d_next, pitch, size, outer_grid_size);
if (evenSteps || (2 * i + 1) < steps) {
// printf("Step: %d\n", 2 * i + 1);
playKernelSMPitched<<<dimGrid, dimBlock>>>(d_next, d_prev, pitch, size, outer_grid_size);
}
}
// Copy data back from the device array to the host array
gpuErrchk(cudaMemcpy2D(h_prev, outer_grid_size * sizeof(cell_t),
evenSteps ? d_prev : d_next, pitch,
outer_grid_size * sizeof(cell_t), outer_grid_size,
cudaMemcpyDeviceToHost));
// Deallocate device arrays
gpuErrchk(cudaFree(d_next));
gpuErrchk(cudaFree(d_prev));
if (writeOutput) {
print_flat(h_prev, size, outer_grid_size);
}
free(h_prev);
return EXIT_SUCCESS;
}
|
21,663 |
/* Includes, system */
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
/* DEVICE CODE */
__global__ void primer_kernel(){
}
/* HOST CODE*/
int main(int argc, char** argv)
{
int DeviceCount = 0;
/* Initialize CUDA */
if (cuInit(0) != 0){
printf("ERROR de inicializacion\n");
exit(0);
}
cuDeviceGetCount(&DeviceCount);
if (DeviceCount == 0){
printf("ERROR ningun dispositivo soporta CUDA\n");
exit(0);
}
primer_kernel<<<1,1,0,0>>>();
printf("Se dispone de %d unidade(s) GPU.\n",DeviceCount);
}
|
21,664 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define INF 1073741824
#define BLOCK_SZ 16
// small
#define SMALL_M 1024
#define SMALL_N 256
#define SMALL_K 8
#define SMALL_B 64
// middle
#define MIDDLE_M 4096
#define MIDDLE_N 1024
#define MIDDLE_K 16
#define MIDDLE_B 256
// large
#define LARGE_M 16384
#define LARGE_N 4096
#define LARGE_K 32
#define LARGE_B 1024
int m; // nodes
int n; // dimensions
int k; // k-nearest
// input sample file
int* load(const char *input)
{
FILE *file = fopen(input, "r");
if (!file) {
fprintf(stderr, "Error: no such input file \"%s\"\n", input);
exit(1);
}
// load m, n, k
fscanf(file, "%d%d%d", &m, &n, &k);
// allocate memory
int *data = (int*)malloc(sizeof(int) * m * n);
// load data
int i;
for (i = 0; i < m * n; i++) {
fscanf(file, "%d", data + i);
}
fclose(file);
return data;
}
__global__ void distances_small(int *data, int *dis)
{
int tx = threadIdx.x + (threadIdx.z / 2) * BLOCK_SZ / 2;
int ty = threadIdx.y + (threadIdx.z % 2) * BLOCK_SZ / 2;
int i = BLOCK_SZ * (blockIdx.x + (blockIdx.z / 2) * SMALL_B / 2) + tx;
int j = BLOCK_SZ * (blockIdx.y + (blockIdx.z % 2) * SMALL_B / 2) + ty;
__shared__ int matA[BLOCK_SZ][BLOCK_SZ];
__shared__ int matB[BLOCK_SZ][BLOCK_SZ];
int tmp1;
int tmp2 = 0;
for (int k = 0; k < SMALL_N; k += BLOCK_SZ) {
// load sub matrix to shared memory
matA[tx][ty] = data[i * SMALL_N + (k + ty)];
matB[tx][ty] = data[j * SMALL_N + (k + tx)];
__syncthreads();
if (i < j) { // compute partial sum
for (int w = 0; w < BLOCK_SZ; w++) {
tmp1 = matA[tx][w] - matB[w][ty];
tmp2 += tmp1 * tmp1;
}
}
__syncthreads();
}
if (i < j) {
dis[i * SMALL_M + j] = dis[j * SMALL_M + i] = tmp2;
} else if (i == j) {
dis[i * SMALL_M + j] = INF;
}
}
__global__ void distances_middle(int *data, int *dis)
{
int tx = threadIdx.x + (threadIdx.z / 2) * BLOCK_SZ / 2;
int ty = threadIdx.y + (threadIdx.z % 2) * BLOCK_SZ / 2;
int i = BLOCK_SZ * (blockIdx.x + (blockIdx.z / 2) * MIDDLE_B / 2) + tx;
int j = BLOCK_SZ * (blockIdx.y + (blockIdx.z % 2) * MIDDLE_B / 2) + ty;
__shared__ int matA[BLOCK_SZ][BLOCK_SZ];
__shared__ int matB[BLOCK_SZ][BLOCK_SZ];
int tmp1;
int tmp2 = 0;
for (int k = 0; k < MIDDLE_N; k += BLOCK_SZ) {
// load sub matrix to shared memory
matA[tx][ty] = data[i * MIDDLE_N + (k + ty)];
matB[tx][ty] = data[j * MIDDLE_N + (k + tx)];
__syncthreads();
if (i < j) { // compute partial sum
for (int w = 0; w < BLOCK_SZ; w++) {
tmp1 = matA[tx][w] - matB[w][ty];
tmp2 += tmp1 * tmp1;
}
}
__syncthreads();
}
if (i < j) {
dis[i * MIDDLE_M + j] = dis[j * MIDDLE_M + i] = tmp2;
} else if (i == j) {
dis[i * MIDDLE_M + j] = INF;
}
}
__global__ void distances_large(int *data, int *dis)
{
int tx = threadIdx.x + (threadIdx.z / 2) * BLOCK_SZ / 2;
int ty = threadIdx.y + (threadIdx.z % 2) * BLOCK_SZ / 2;
int i = BLOCK_SZ * (blockIdx.x + (blockIdx.z / 2) * LARGE_B / 2) + tx;
int j = BLOCK_SZ * (blockIdx.y + (blockIdx.z % 2) * LARGE_B / 2) + ty;
__shared__ int matA[BLOCK_SZ][BLOCK_SZ];
__shared__ int matB[BLOCK_SZ][BLOCK_SZ];
int tmp1;
int tmp2 = 0;
for (int k = 0; k < LARGE_N; k += BLOCK_SZ) {
// load sub matrix to shared memory
matA[tx][ty] = data[i * LARGE_N + (k + ty)];
matB[tx][ty] = data[j * LARGE_N + (k + tx)];
__syncthreads();
if (i < j) { // compute partial sum
for (int w = 0; w < BLOCK_SZ; w++) {
tmp1 = matA[tx][w] - matB[w][ty];
tmp2 += tmp1 * tmp1;
}
}
__syncthreads();
}
if (i < j) {
dis[i * LARGE_M + j] = dis[j * LARGE_M + i] = tmp2;
} else if (i == j) {
dis[i * LARGE_M + j] = INF;
}
}
__global__ void sort_small(int *dis, int *result)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int tmp, idx;
for (int j = 0; j < SMALL_K; j++) { // find j-th nearest neighbor
tmp = INF;
for (int l = i * SMALL_M; l < (i + 1) * SMALL_M; l++) {
if (dis[l] < tmp) {
tmp = dis[l];
idx = l;
}
}
result[i * SMALL_K + j] = idx % SMALL_M;
dis[idx] = INF;
}
}
__global__ void sort_middle(int *dis, int *result)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int tmp, idx;
for (int j = 0; j < MIDDLE_K; j++) { // find j-th nearest neighbor
tmp = INF;
for (int l = i * MIDDLE_M; l < (i + 1) * MIDDLE_M; l++) {
if (dis[l] < tmp) {
tmp = dis[l];
idx = l;
}
}
result[i * MIDDLE_K + j] = idx % MIDDLE_M;
dis[idx] = INF;
}
}
__global__ void sort_large(int *dis, int *result)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int tmp, idx;
for (int j = 0; j < LARGE_K; j++) { // find j-th nearest neighbor
tmp = INF;
for (int l = i * LARGE_M; l < (i + 1) * LARGE_M; l++) {
if (dis[l] < tmp) {
tmp = dis[l];
idx = l;
}
}
result[i * LARGE_K + j] = idx % LARGE_M;
dis[idx] = INF;
}
}
void knn_small(int *data, int *result, float *timer)
{
int *d_data, *d_result, *d_dis;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMalloc((void**)&d_data, sizeof(int) * m * n);
cudaMalloc((void**)&d_result, sizeof(int) * m * k);
cudaMalloc((void**)&d_dis, sizeof(int) * m * m);
cudaMemcpy(d_data, data, sizeof(int) * m * n, cudaMemcpyHostToDevice);
distances_small<<<dim3(SMALL_B / 2, SMALL_B / 2, 4), dim3(BLOCK_SZ / 2, BLOCK_SZ / 2, 4)>>>(d_data, d_dis);
cudaStreamSynchronize(0);
sort_small<<<SMALL_B, BLOCK_SZ>>>(d_dis, d_result);
cudaMemcpy(result, d_result, sizeof(int) * m * k, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(timer, start, stop);
}
void knn_middle(int *data, int *result, float *timer)
{
int *d_data, *d_result, *d_dis;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMalloc((void**)&d_data, sizeof(int) * m * n);
cudaMalloc((void**)&d_result, sizeof(int) * m * k);
cudaMalloc((void**)&d_dis, sizeof(int) * m * m);
cudaMemcpy(d_data, data, sizeof(int) * m * n, cudaMemcpyHostToDevice);
distances_middle<<<dim3(MIDDLE_B / 2, MIDDLE_B / 2, 4), dim3(BLOCK_SZ / 2, BLOCK_SZ / 2, 4)>>>(d_data, d_dis);
cudaStreamSynchronize(0);
sort_middle<<<MIDDLE_B, BLOCK_SZ>>>(d_dis, d_result);
cudaMemcpy(result, d_result, sizeof(int) * m * k, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(timer, start, stop);
}
void knn_large(int *data, int *result, float *timer)
{
int *d_data, *d_result, *d_dis;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMalloc((void**)&d_data, sizeof(int) * m * n);
cudaMalloc((void**)&d_result, sizeof(int) * m * k);
cudaMalloc((void**)&d_dis, sizeof(int) * m * m);
cudaMemcpy(d_data, data, sizeof(int) * m * n, cudaMemcpyHostToDevice);
distances_large<<<dim3(LARGE_B / 2, LARGE_B / 2, 4), dim3(BLOCK_SZ / 2, BLOCK_SZ / 2, 4)>>>(d_data, d_dis);
cudaStreamSynchronize(0);
sort_large<<<LARGE_B, BLOCK_SZ>>>(d_dis, d_result);
cudaMemcpy(result, d_result, sizeof(int) * m * k, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(timer, start, stop);
}
int main(int argc, char **argv)
{
if (argc != 2) {
fprintf(stderr, "Usage: %s input_file\n", argv[0]);
exit(1);
}
// input
int *data = load(argv[1]);
int *result = (int*)malloc(sizeof(int) * m * k);
float timer;
if (m == SMALL_M) {
knn_small(data, result, &timer);
} else if (m == MIDDLE_M) {
knn_middle(data, result, &timer);
} else if (m == LARGE_M) {
knn_large(data, result, &timer);
} else {
fprintf(stderr, "unsupported m: %d\n", m);
exit(1);
}
// output
for (int i = 0; i < m; i++) {
for (int j = 0; j < k; j++) {
printf("%d ", result[i * k + j]);
}
printf("\n");
}
if (m == SMALL_M) {
printf("SMALL:%f\n", timer);
} else if (m == MIDDLE_M) {
printf("MIDDLE:%f\n", timer);
} else if (m == LARGE_M) {
printf("LARGE:%f\n", timer);
}
free(data);
free(result);
return 0;
}
|
21,665 | //
// This code is based on code from:
// https://en.cppreference.com/w/cpp/algorithm/reduce
// and
//
// This code uses a GPU and thrust to perform a reduction
//
#include <iostream>
#include <chrono>
#include <vector>
#include <numeric>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
int main(int argc,char *argv[])
{
if (argc!=2) {
std::cout << "Usage: sample n " << std::endl;
return -1;
}
int n = atoi(argv[1]);
thrust::host_vector<int> hv(n,1);
{
auto t1 = std::chrono::high_resolution_clock::now();
thrust::device_vector<int> dv = hv;
int result = 0;
// initial value of the reduction
int init = 0;
// binary operation used to reduce values
thrust::plus<int> binary_op;
// compute sum on the device
result = thrust::reduce(dv.begin(), dv.end(), init, binary_op);
//result = std::accumulate(v.begin(), v.end(), 0.0);
auto t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> ms = t2 - t1;
std::cout << std::fixed << "thrust::reduce " << result
<< " took " << ms.count() << " ms\n";
}
}
|
21,666 | #include "includes.h"
__global__ void initializeElementsTo(int initialValue, int *a, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N)
{
a[i] = initialValue;
}
} |
21,667 | #include <stdio.h>
__device__ const char *STR = "deNet is ON!\n";
const char STR_LENGTH = 13;
__global__ void deNet()
{
printf("%c\n", STR[threadIdx.x % STR_LENGTH]);
}
int main(void)
{
int num_threads = STR_LENGTH;
int num_blocks = 1;
deNet<<<num_blocks,num_threads>>>();
cudaDeviceSynchronize();
return 0;
}
|
21,668 | #include <stdio.h>
#include <assert.h>
#include <iostream>
#define N 2048 * 2048 // Number of elements in each vector
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
//assert(result == cudaSuccess);
}
return result;
}
__global__ void saxpy(int * a, int * b, int * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = tid; i < N; i+=stride){
c[i] = 2 * a[i] + b[i];
}
}
void saxpy_s(int * a, int * b, int * c)
{
for(int i = 0; i < N; i++){
c[i] = 2 * a[i] + b[i];
}
}
void init_vector(int value, int *a){
for(int i = 0; i < N; i++){
a[i] = value;
}
}
int main()
{
int *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
cudaMemPrefetchAsync(a, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(b, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
init_vector(2, a);
init_vector(1, b);
init_vector(0, c);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
int threads_per_block = 256;
int number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
//std::cout << number_of_blocks << std::endl;
saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c );
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
//saxpy_s(a,b,c);
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
checkCuda(cudaFree( a ));
checkCuda(cudaFree( b ));
checkCuda(cudaFree( c ));
}
|
21,669 | /* Voxel sampling GPU implementation
* Author Zhaoyu SU
* All Rights Reserved. Sep., 2019.
*/
#include <stdio.h>
#include <iostream>
#include <float.h>
#include <vector>
__device__ inline int get_batch_id(int* accu_list, int batch_size, int id) {
for (int b=0; b<batch_size-1; b++) {
if (id >= accu_list[b]) {
if(id < accu_list[b+1])
return b;
}
}
return batch_size - 1;
}
__global__ void dense_voxelization_idx_gpu_kernel(int batch_size, int input_point_num,
float resolution_w, float resolution_l, float resolution_h,
int output_w, int output_l, int output_h,
const float* input_coors,
const int* input_num_list,
int* input_accu_list,
int* count_buffer,
int* output_idx) {
const int output_voxel_size = output_w * output_l * output_h;
int point_id = threadIdx.x + blockIdx.x * blockDim.x;
if (point_id < input_point_num) {
int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution_w);
int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution_l);
int center_grid_coor_z = (int)floor(input_coors[point_id*3 + 2] / resolution_h);
int batch_id = get_batch_id(input_accu_list, batch_size, point_id);
int voxel_idx = batch_id * output_voxel_size + center_grid_coor_x * output_l * output_h + center_grid_coor_y * output_h + center_grid_coor_z;
int count = atomicAdd(&count_buffer[voxel_idx], 1);
if (count < 1) {
output_idx[voxel_idx] = point_id;
}
}
}
__global__ void dense_voxelization_features_gpu_kernel(int batch_size, int channels,
int output_w, int output_l, int output_h,
const float* input_features,
float* output_features,
int* count_buffer,
int* output_idx) {
int voxel_id = threadIdx.x + blockIdx.x * blockDim.x;
if (voxel_id < batch_size * output_w * output_l * output_h) {
int count = count_buffer[voxel_id];
if (count > 0) {
int point_id = output_idx[voxel_id];\
for (int c = 0; c < channels; c++) {
output_features[voxel_id * channels + c] = input_features[point_id * channels + c];
// output_features[voxel_id * channels + c] = 1.;
}
}
}
}
__global__ void dense_voxelization_grad_gpu_kernel(int batch_size, int channels,
int output_w, int output_l, int output_h,
const float* output_features_grad,
const int* output_idx,
float* input_features_grad) {
int voxel_id = threadIdx.x + blockIdx.x * blockDim.x;
if (voxel_id < batch_size * output_w * output_l * output_h) {
int point_id = output_idx[voxel_id];
if (point_id >= 0) {
for (int c = 0; c < channels; c++) {
input_features_grad[point_id * channels + c] = output_features_grad[voxel_id + c];
}
}
}
}
void dense_voxelization_gpu_launcher(int batch_size, int input_point_num, int channels,
std::vector<float> resolution, std::vector<int> output_size,
const float* input_coors,
const float* input_features,
const int* input_num_list,
int* input_accu_list,
int* count_buffer,
float* output_features,
int* output_idx) {
if (batch_size*input_point_num <=0) {
printf("BevProjectionOp ERROR: Invalid CUDA input dimensions: [%d, %d]\n", batch_size, input_point_num);
return;
}
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_idx_gpu_kernel, 0, input_point_num);
gridSize = (input_point_num + blockSize - 1) / blockSize;
dense_voxelization_idx_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num,
resolution[0], resolution[1], resolution[2],
output_size[0], output_size[1], output_size[2],
input_coors,
input_num_list,
input_accu_list,
count_buffer,
output_idx);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_features_gpu_kernel, 0, batch_size * output_size[0] * output_size[1] * output_size[2]);
gridSize = (batch_size * output_size[0] * output_size[1] * output_size[2] + blockSize - 1) / blockSize;
dense_voxelization_features_gpu_kernel<<<gridSize, blockSize>>>(batch_size, channels,
output_size[0], output_size[1], output_size[2],
input_features,
output_features,
count_buffer,
output_idx);
}
void dense_voxelization_grad_gpu_launcher(int batch_size, int channels, std::vector<int> output_size,
const float* output_features_grad,
const int* output_idx,
float* input_features_grad) {
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_grad_gpu_kernel, 0, batch_size * output_size[0] * output_size[1] * output_size[2]);
gridSize = (batch_size * output_size[0] * output_size[1] * output_size[2] + blockSize - 1) / blockSize;
dense_voxelization_grad_gpu_kernel<<<gridSize, blockSize>>>(batch_size, channels,
output_size[0], output_size[1], output_size[2],
output_features_grad,
output_idx,
input_features_grad);
}
|
21,670 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include "cuda_runtime.h"
#include <chrono>
using namespace std;
using namespace std::chrono;
#define Bsize_addition 256
#define Bsize_minimum 128
__global__ void reduceSum( float * d_V, int N ) {
// Vector en memoria compartida para almacenar los datos
extern __shared__ float sdata[];
// Cálculo de los índices para acceder al vector
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// Carga de los datos en memoria compartida (dos por hebra)
sdata[tid] = ( ( i < N ) ? d_V[i] + d_V[i + blockDim.x] : 0.0f );
__syncthreads();
// Reducción en memoria compartida
for( int s = blockDim.x/2; s > 0; s >>= 1 ) {
if( tid < s )
sdata[tid] += sdata[tid + s];
__syncthreads();
}
// Escribir el resultado en memoria principal
if( tid == 0 )
d_V[blockIdx.x] = sdata[0];
}
int main( int argc, char *argv[] ) {
cout << "////////////////////////////////////" << endl;
cout << "/// Suma de vector por reducción ///" << endl;
cout << "////////////////////////////////////" << endl;
int N;
cout << endl << "Introduce el número de elementos del vector: ";
cin >> N;
// Puntero a memoria host
float *h_V;
// Puntero a memoria device
float *d_V;
// Situar el array h_V en el host
h_V = (float*) malloc( N * sizeof( float ) );
// Situar el array d_V en device
cudaMalloc( (void **) &d_V, sizeof(float) * N );
// Inicializar el array d
for( int i = 0; i < N; i++ )
h_V[i] = (float) 1;
// Copiar los datos de la memoria host a device
cudaMemcpy( d_V, h_V, sizeof(float) * N, cudaMemcpyHostToDevice );
// Configuración de la ejecución
//dim3 dimBlock( Bsize_addition );
//dim3 dimGrid( ceil( (float(N)) / (float) dimBlock.x ) );
// ADD ARRAYS A AND B, STORE RESULT IN C
dim3 threadsPerBlock( 32 );
dim3 numBlocks( ceil( ( float ) ( N / 2 ) / threadsPerBlock.x ), 1 );
int smemSize = threadsPerBlock.x * sizeof( float );
// Variables para medir el tiempo
high_resolution_clock::time_point tantes, tdespues;
duration<double> transcurrido;
// Ejecución del kernel
tantes = high_resolution_clock::now();
reduceSum <<< numBlocks, threadsPerBlock, smemSize >>> ( d_V, N );
cudaDeviceSynchronize();
tdespues = high_resolution_clock::now();
transcurrido = duration_cast<duration<double>> ( tdespues - tantes );
// Copiar los datos de la memoria device a host
cudaMemcpy( h_V, d_V, N * sizeof( float ), cudaMemcpyDeviceToHost );
int suma = 0;
for( int i = 0; i < numBlocks.x; i++ )
suma += h_V[i];
// Resultados:
cout << endl << "Resultado: " << suma << " / " << h_V[0] << endl;
cout << "El tiempo empleado es " << transcurrido.count() << " segundos." <<
endl << endl;
}
|
21,671 | #include <stdio.h>
#include <stdlib.h>
/*
* In CUDA it is necessary to define block sizes
* The grid of data that will be worked on is divided into blocks
*/
#define BLOCK_SIZE 512
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
__global__ void cu_dotProduct(long long *distance_array_d,
long long *force_array_d,
long long *result_array_d, long long max) {
long long x;
x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if (x < max) {
result_array_d[x] = distance_array_d[x] * force_array_d[x];
}
}
__global__ void cu_gen_force_array(long long *force_array_d, long long max) {
long long x, half_vectors;
x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
half_vectors = max / 2;
if (x < half_vectors) {
force_array_d[x] = x + 1;
} else {
force_array_d[x] = half_vectors + (half_vectors - x);
}
}
__global__ void cu_gen_distance_array(long long *distance_array_d,
long long max) {
long long x;
x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
distance_array_d[x] = (x + 1) % 10;
if (distance_array_d[x] == 0) {
distance_array_d[x] = 10;
}
}
// Called from driver program. Handles running GPU calculation
extern "C" void gpu_dotProduct(long long *result_array, long long num_vectors) {
long long *distance_array_d;
long long *force_array_d;
long long *result_array_d;
// allocate space in the device
cudaMalloc((void **)&distance_array_d, sizeof(long long) * num_vectors);
cudaMalloc((void **)&force_array_d, sizeof(long long) * num_vectors);
cudaMalloc((void **)&result_array_d, sizeof(long long) * num_vectors);
// set execution configuration
dim3 dimblock(BLOCK_SIZE);
dim3 dimgrid(ceil((long double)num_vectors / BLOCK_SIZE));
cu_gen_force_array<<<dimgrid, dimblock>>>(force_array_d, num_vectors);
cu_gen_distance_array<<<dimgrid, dimblock>>>(distance_array_d, num_vectors);
cu_dotProduct<<<dimgrid, dimblock>>>(distance_array_d, force_array_d,
result_array_d, num_vectors);
// transfer results back to host
cudaMemcpy(result_array, result_array_d, sizeof(long long) * num_vectors,
cudaMemcpyDeviceToHost);
// release the memory on the GPU
cudaFree(distance_array_d);
cudaFree(force_array_d);
cudaFree(result_array_d);
}
|
21,672 | #include <stdio.h>
#include "includes/utils.cuh"
inline int _ConvertSMVer2Cores(int major, int minor);
void AllocateCudaMem(float **pointer, int size) {
cudaError_t err = cudaSuccess;
err = cudaMalloc((void **)pointer, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device memory (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void GetDeviceInfo(int *maxThreadsPerBlock, int *workingThreadsPerBlock) {
int devid;
cudaDeviceProp deviceProp;
cudaGetDevice(&devid);
cudaGetDeviceProperties(&deviceProp, devid);
*maxThreadsPerBlock = deviceProp.maxThreadsPerBlock;
*workingThreadsPerBlock =
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
/*
* Copy & modify from "helper_cuda.h" in the cuda samples, used to calculate the
* number of cores per SM
*/
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine the #
// of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM
// minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class
{0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class
{0x60, 64}, // Pascal Generation (SM 6.0) GP100 class
{0x61, 128}, // Pascal Generation (SM 6.1) GP10x class
{0x62, 128}, // Pascal Generation (SM 6.2) GP10x class
{0x70, 64}, // Volta Generation (SM 7.0) GV100 class
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
return nGpuArchCoresPerSM[index - 1].Cores;
}
|
21,673 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
int main()
{
// 16! different output
hello <<< NUM_BLOCKS, BLOCK_WIDTH >>> ();
// Wait for the kernal execution
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
} |
21,674 | const int threadsPerBlock = 256;
extern "C" __global__ void dotProductFloat(const float* A, const float* B, float* C, size_t size)
{
__shared__ float sumBuffer[threadsPerBlock];
int sumBufferIdx = threadIdx.x;
int stride = blockDim.x * gridDim.x;
float strideSum = 0;
for (int cellIdx = blockIdx.x * blockDim.x + threadIdx.x; cellIdx < size; cellIdx += stride)
{
strideSum = strideSum + A[cellIdx] * B[cellIdx];
}
sumBuffer[sumBufferIdx] = strideSum;
__syncthreads();
for (int i = blockDim.x / 2; i != 0; i /= 2)
{
if (sumBufferIdx < i)
{
sumBuffer[sumBufferIdx] += sumBuffer[sumBufferIdx + i];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
C[blockIdx.x] = sumBuffer[0];
}
}
extern "C" __global__ void dotProductFloat2(const float2* A, const float2* B, float* C, size_t size)
{
__shared__ float sumBuffer[threadsPerBlock];
int sumBufferIdx = threadIdx.x;
int stride = blockDim.x * gridDim.x;
float strideSum = 0;
for (int cellIdx = blockIdx.x * blockDim.x + threadIdx.x; cellIdx < size; cellIdx += stride)
{
strideSum += A[cellIdx].x * B[cellIdx].x;
strideSum += A[cellIdx].y * B[cellIdx].y;
}
sumBuffer[sumBufferIdx] = strideSum;
__syncthreads();
for (int i = blockDim.x / 2; i != 0; i /= 2)
{
if (sumBufferIdx < i)
{
sumBuffer[sumBufferIdx] += sumBuffer[sumBufferIdx + i];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
C[blockIdx.x] = sumBuffer[0];
}
}
extern "C" __global__ void dotProductFloat4(const float4 * A, const float4 * B, float* C, size_t size)
{
__shared__ float sumBuffer[threadsPerBlock];
int sumBufferIdx = threadIdx.x;
int stride = blockDim.x * gridDim.x;
float strideSum = 0;
for (int cellIdx = blockIdx.x * blockDim.x + threadIdx.x; cellIdx < size; cellIdx += stride)
{
strideSum += A[cellIdx].x * B[cellIdx].x;
strideSum += A[cellIdx].y * B[cellIdx].y;
strideSum += A[cellIdx].z * B[cellIdx].z;
strideSum += A[cellIdx].w * B[cellIdx].w;
}
sumBuffer[sumBufferIdx] = strideSum;
__syncthreads();
for (int i = blockDim.x / 2; i != 0; i /= 2)
{
if (sumBufferIdx < i)
{
sumBuffer[sumBufferIdx] += sumBuffer[sumBufferIdx + i];
}
__syncthreads();
}
if (threadIdx.x == 0)
{
C[blockIdx.x] = sumBuffer[0];
}
} |
21,675 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
struct point
{
float x;
float y;
};
struct dist
{
float da;
float db;
float dc;
};
float eucli(float fx, float fy)
{
return sqrt(fx * fx + fy * fy);
}
__global__ void trilaterate(struct point a, struct point b, struct point c, struct dist *d_set, struct point *d_trail, int NUM)
{
float a1Sq = a.x * a.x, a2Sq = b.x * b.x, a3Sq = c.x * c.x, b1Sq = a.y * a.y, b2Sq = b.y * b.y, b3Sq = c.y * c.y;
float r1Sq, r2Sq, r3Sq, denom1, numer1, denom2, numer2;
float a1 = a.x, a2 = b.x, a3 = c.x, b1 = a.y, b2 = b.y, b3 = c.y;
int i;
for(i=0; i < NUM; i++)
{
r1Sq = d_set[i].da * d_set[i].da;
r2Sq = d_set[i].db * d_set[i].db;
r3Sq = d_set[i].dc * d_set[i].dc;
numer1 = (a2 - a1) * (a3Sq + b3Sq - r3Sq) + (a1 - a3) * (a2Sq + b2Sq - r2Sq) + (a3 - a2) * (a1Sq + b1Sq - r1Sq);
denom1 = 2 * (b3 * (a2 - a1) + b2 * (a1 - a3) + b1 * (a3 - a2));
d_trail[i].y = numer1/denom1;
numer2 = r2Sq - r1Sq + a1Sq - a2Sq + b1Sq - b2Sq - 2 * (b1 - b2) * d_trail[i].y;
denom2 = 2 * (a1 - a2);
d_trail[i].x = numer2/denom2;
}
}
int main(int argc, char *argv[])
{
cudaEvent_t start, stop;
float etime;
int i, j=0;
float fx, fy, gx, gy, z = 5.0;
int NUM;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
if (argc != 2)
{
printf("Check you arguments!\n");
exit(1);
}
struct point a, b, c;
a.x = 1.67; a.y = 2.58;
b.x = 3.74; b.y = 2.08;
c.x = 5.12; c.y = 3.95;
struct point init;
init.x = 3.12;
init.y = 4.27;
NUM = atoi(argv[1]);
struct point trail[NUM], avg_trail[(NUM/4)], ret_avg_trail[(NUM/4)];
struct point *d_trail, *h_trail;
trail[0] = init;
srand(time(NULL));
for(i=1; i<NUM; i++)
{
gx = ((float)rand()/(float)(RAND_MAX)) * z;
gx = floorf(gx * 100) / 100;
gy = ((float)rand()/(float)(RAND_MAX)) * z;
gy = floorf(gy * 100) / 100;
trail[i].x = (floorf(trail[i-1].x * 100 + 0.5) / 100) + gx;
trail[i].y = (floorf(trail[i-1].y * 100 + 0.5) / 100) + gy;
}
for(i=0; i<(NUM/4); i++)
{
avg_trail[i].x = (trail[j].x + trail[j+1].x + trail[j+2].x + trail[j+3].x) / 4;
avg_trail[i].y = (trail[j].y + trail[j+1].y + trail[j+2].y + trail[j+3].y) / 4;
j += 4;
}
printf("\nAvg. Random Trail at Host\n");
for(i=0; i<(NUM/4); i++)
{
printf("(%f, %f)\n", avg_trail[i].x, avg_trail[i].y);
}
struct dist *set;
size_t size = NUM * sizeof(struct dist);
set = (struct dist *)malloc(size);
size_t sz = NUM * sizeof(struct point);
h_trail = (struct point *)malloc(sz);
for(i=0; i<NUM; i++)
{
fx = trail[i].x - a.x;
fy = trail[i].y - a.y;
set[i].da = eucli(fx, fy);
fx = trail[i].x - b.x;
fy = trail[i].y - b.y;
set[i].db = eucli(fx, fy);
fx = trail[i].x - c.x;
fy = trail[i].y - c.y;
set[i].dc = eucli(fx, fy);
}
struct dist *d_set;
cudaMalloc((void **) &d_set, size);
cudaMalloc((void **) &d_trail, sz);
cudaMemcpy(d_set, set, sizeof(struct dist)*NUM, cudaMemcpyHostToDevice);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
int nBlocks = devProp.multiProcessorCount;
int blockSize = devProp.warpSize;
printf("\nU: %d\n", nBlocks);
printf("\nV: %d\n", blockSize);
trilaterate <<< nBlocks, blockSize >>> (a, b, c, d_set, d_trail, NUM);
cudaMemcpy(h_trail, d_trail, sizeof(struct point)*NUM, cudaMemcpyDeviceToHost);
j=0;
for(i=0; i<(NUM/4); i++)
{
ret_avg_trail[i].x = (h_trail[j].x + h_trail[j+1].x + h_trail[j+2].x + h_trail[j+3].x) / 4;
ret_avg_trail[i].y = (h_trail[j].y + h_trail[j+1].y + h_trail[j+2].y + h_trail[j+3].y) / 4;
j += 4;
}
printf("\nAvg. Generated Trail at Device\n");
for(i=0; i<(NUM/4); i++)
{
printf("(%f, %f)\n", ret_avg_trail[i].x, ret_avg_trail[i].y);
}
printf("\n");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&etime, start, stop);
printf("Time elapsed: %f ms\n", etime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(set);
cudaFree(d_set);
cudaFree(d_trail);
cudaFree(h_trail);
return 0;
}
|
21,676 | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <sys/time.h>
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; ++i) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Array do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match){
printf("Array match.\n\n");
}
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; ++i) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void initialInt(int *ip, int size){
for (int i = 0; i < size; ++i) {
ip[i] = i;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N){
for (int i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
C[i] = A[i] + B[i];
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void printMatrix(int *C, const int nx, const int ny){
int *ic = C;
printf("\nMatrix: (%d,%d)\n", nx, ny);
for (int i = 0; i < ny; ++i) {
for (int j = 0; j < nx; ++j) {
printf("%3d", ic[j]);
}
ic += nx;
printf("\n");
}
printf("\n");
}
__global__ void printThreadIndex(int *A, const int nx, const int ny){
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id (%d,%d) block_id (%d,%d) coordinate(%d,%d)"
"global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x,
blockIdx.y, ix, iy, idx, A[idx]);
}
void sumMatrixOnHost2D(float *A, float *B, float *C, const int nx, const int ny){
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; ++iy) {
for (int ix = 0; ix < nx; ++ix) {
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;ib += nx;ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, int nx, int ny){
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
if ((ix < nx) && (iy < ny)){
C[idx] = A[idx] + B[idx];
}
}
__global__ void initialDataOnGPU(float *A, int nx, int ny){
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
if ((ix < nx) && (iy < ny)){
A[idx] = idx;
}
}
int main() {
int dev = 0;
cudaSetDevice(dev);
int nx = 1<<14;
int ny = 1<<14;
int nxy = nx * ny;
printf("Vector size %d\n", nxy);
size_t nBytes = nxy * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// memset(hostRef, 0, nBytes);
// memset(gpuRef, 0, nBytes);
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, nBytes);
cudaMalloc((void **)&d_B, nBytes);
cudaMalloc((void **)&d_C, nBytes);
int dimx = 16;
int dimy = 16;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
double iStart, iElaps;
iStart = cpuSecond();
// initialData(h_A, nxy);
// initialData(h_B, nxy);
initialDataOnGPU<<<grid, block>>>(d_A, nx, ny);
initialDataOnGPU<<<grid, block>>>(d_B, nx, ny);
iElaps = cpuSecond() - iStart;
printf("initData use %.6f\n", iElaps);
cudaMemcpy(h_A, d_A, nBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, nBytes, cudaMemcpyDeviceToHost);
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("Execution configuration <<<(%d,%d), (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y);
printf("GPU sum use %.6f\n", iElaps);
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
iStart = cpuSecond();
sumMatrixOnHost2D(h_A, h_B, hostRef, nx, ny);
iElaps = cpuSecond() - iStart;
printf("Host sum use %.3f\n", iElaps);
checkResult(hostRef, gpuRef, nxy);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
} |
21,677 | #include <iostream>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
#define ARRAY_SIZE 128
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
/*定义 const 指针(由于指针本身的值不能改变所以必须得初始化)*/
__global__ void what_is_my_id(unsigned int * const block,
unsigned int * const thread,
unsigned int * const warp,
unsigned int * const calc_thread)
{
/* Thread is is block index * block size + thread offset into the block */
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
block[thread_idx] = blockIdx.x;
thread[thread_idx]= threadIdx.x;
/* Calculate warp using built in variable warpSize */
warp[thread_idx] = threadIdx.x / warpSize;
calc_thread[thread_idx] = thread_idx;
}
/* Declare statically four arrays of ARRAY_SIZE each */
unsigned int cpu_block[ARRAY_SIZE];
unsigned int cpu_thread[ARRAY_SIZE];
unsigned int cpu_warp[ARRAY_SIZE];
unsigned int cpu_calc_thread[ARRAY_SIZE];
int main(void)
{
/* Total thread count = 2 * 64 = 128 */
const unsigned int num_blocks = 2;
const unsigned int num_threads = 64;
// Declare pointers fro GPU based params
unsigned int * gpu_block;
unsigned int * gpu_thread;
unsigned int * gpu_warp;
unsigned int * gpu_cal_thread;
// Declare loop counter for use later
unsigned int i;
// Allocate four arrays on the GPU
cudaMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_warp, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_cal_thread, ARRAY_SIZE_IN_BYTES);
// Execute the kernel
what_is_my_id<<<num_blocks, num_threads>>>(gpu_block, gpu_thread, gpu_warp, gpu_cal_thread);
// copy back the gpu results to the cpu
cudaMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_calc_thread, gpu_cal_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
// free the arrays on the GPU
cudaFree(gpu_block);
cudaFree(gpu_thread);
cudaFree(gpu_warp);
cudaFree(gpu_cal_thread);
// print
for (i=0; i<ARRAY_SIZE; i++)
{
printf("Calculated Thread: %d - Block: %d - Warp %d - Thread %d \n",
cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]);
}
}
|
21,678 | __global__ void kern() {
// do nothing
}
int main() {
kern <<< 1, 1 >>> ();
return 0;
} |
21,679 | /*
*
* CUDA Example
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <iostream>
#include <cuda_runtime.h>
using namespace std;
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+5.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = a[idx]+5.f;
}
int main(void)
{
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, N = 1024;
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
cudaSetDevice(0);
// allocate array on device
if (cudaMalloc((void **) &a_d, size) != cudaSuccess)
cout << "error in cudaMalloc" << endl;
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
if (cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice) != cudaSuccess)
cout << "error in cudaMemcpy" << endl;
// do calculation on host
incrementArrayOnHost(a_h, N);
// do calculation on device:
incrementArrayOnDevice <<< 2, N/2 >>> (a_d, N);
cudaThreadSynchronize();
// Retrieve result from device and store in b_h
if (cudaMemcpy(b_h, a_d, size, cudaMemcpyDeviceToHost) != cudaSuccess)
cout << "error in cudaMemcpy" << endl;
// check results
for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
// cleanup
free(a_h);
free(b_h);
cudaFree(a_d);
return 0;
}
|
21,680 | #include "includes.h"
__global__ void inputKernel(float *x, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * NUM_OF_X_THREADS + ix;
if (idx < N)
x[idx] = x[idx] + (float)idx;
} |
21,681 | #include <iostream>
#include <vector>
#include <cmath>
#include <chrono>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
// #if __CUDA_ARCH__ < 600
// __device__ double atomicAdd(double* address, double val)
// {
// unsigned long long int* address_as_ull =
// (unsigned long long int*)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed,
// __double_as_longlong(val +
// __longlong_as_double(assumed)));
// // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
// } while (assumed != old);
// return __longlong_as_double(old);
// }
// #endif
#define CUDA_CALL( call ) \
{ \
cudaError_t err = call; \
if ( cudaSuccess != err) \
fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,cudaGetErrorString(err));\
}
// __global__ void print_GPU(int* x){
// printf("[GPU] (int) = %d\n", *x);
// }
__global__ void vectorAdd(double* a, double* b, size_t N)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < N )
{
atomicAdd(a, b[id]);
}
}
int main()
{
size_t N = 10;
double a = 0;
vector<double> b(N,1);
double* d_a;
double* d_b;
CUDA_CALL( cudaMalloc( (void**)&d_a, sizeof(double) ) );
CUDA_CALL( cudaMalloc( (void**)&d_b, sizeof(double) * N ) );
CUDA_CALL( cudaMemcpy( d_a, &a, sizeof(double), cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemcpy( d_b, &b[0], sizeof(double) * N, cudaMemcpyHostToDevice ) );
vectorAdd<<<1,10>>>(d_a, d_b, N);
CUDA_CALL( cudaMemcpy( &a, d_a, sizeof(double), cudaMemcpyDeviceToHost ) );
cout << a << endl;
cudaFree( d_a );
cudaFree( d_b );
cudaDeviceSynchronize();
}
|
21,682 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// input: features (n,c), idx (n,3), weight (n,3)
// output: probs (n,c)
__global__ void interpolate_kernel(int n, int c, int k_num, float *features, float *weight, int *idx, float *probs) {
for(int i= blockIdx.x; i<n; i+=gridDim.x){
for(int j=threadIdx.x; j<c;j+=blockDim.x){
float tmp_prob = 0;
for(int k=0; k<k_num;k++){
int idd = idx[i*k_num + k];
float wgt = weight[i*k_num + k];
tmp_prob += features[idd*c+j]*wgt;
}
probs[i*c+j] += tmp_prob;
//printf("%f\t", tmp_prob);
}
}
}
extern "C" void interpolateLauncher(int n_host, int m_host, int c_host, float *features_host, int *idx_host, float *weight_host, float *probs_host){
//int *n_dev, *c_dev;
float *weight, *features, *probs;
int *idx;
cudaError_t error;
cudaMalloc((void**)&weight, sizeof(float)* n_host*3);
cudaMalloc((void**)&idx, sizeof(int)* n_host*3);
cudaMalloc((void**)&features, sizeof(float)* m_host*c_host);
cudaMalloc((void**)&probs, sizeof(float)* n_host*c_host);
cudaMemcpy(weight, weight_host, sizeof(float)* n_host*3, cudaMemcpyHostToDevice);
cudaMemcpy(idx, idx_host, sizeof(int)* n_host*3, cudaMemcpyHostToDevice);
cudaMemcpy(features, features_host, sizeof(float)* m_host*c_host, cudaMemcpyHostToDevice);
cudaMemcpy(probs, probs_host, sizeof(float)*n_host*c_host, cudaMemcpyHostToDevice);
dim3 grid(32768, 1, 1), block(c_host, 3, 1);
interpolate_kernel<<<grid, block>>>(n_host, c_host, 3, features, weight, idx, probs);
error = cudaDeviceSynchronize();
if(error != cudaSuccess){
printf("code: %d, reason: %s\n",error,cudaGetErrorString(error));
}
cudaMemcpy(probs_host, probs, sizeof(float)*n_host*c_host, cudaMemcpyDeviceToHost);
cudaFree(weight);
cudaFree(features);
cudaFree(probs);
cudaFree(idx);
}
extern "C" void filterLauncher(int n_host, int c_host, int k_num, int iter_num, float *features_host, int *idx_host, float *weight_host, float *probs_host){
//int *n_dev, *c_dev;
float *weight, *features, *probs;
int *idx;
cudaError_t error;
cudaMalloc((void**)&weight, sizeof(float)* n_host*k_num);
cudaMalloc((void**)&idx, sizeof(int)* n_host*k_num);
cudaMalloc((void**)&features, sizeof(float)* n_host*c_host);
cudaMalloc((void**)&probs, sizeof(float)* n_host*c_host);
cudaMemcpy(weight, weight_host, sizeof(float)* n_host*k_num, cudaMemcpyHostToDevice);
cudaMemcpy(idx, idx_host, sizeof(int)* n_host*k_num, cudaMemcpyHostToDevice);
cudaMemcpy(features, features_host, sizeof(float)* n_host*c_host, cudaMemcpyHostToDevice);
cudaMemcpy(probs, probs_host, sizeof(float)*n_host*c_host, cudaMemcpyHostToDevice);
dim3 grid(32768, 1, 1), block(c_host, 1, 1);
for(int i=0;i<iter_num;i++){
interpolate_kernel<<<grid, block>>>(n_host, c_host, k_num, features, weight, idx, probs);
error = cudaDeviceSynchronize();
if(error != cudaSuccess){
printf("code: %d, reason: %s\n",error,cudaGetErrorString(error));
}
}
cudaMemcpy(probs_host, probs, sizeof(float)*n_host*c_host, cudaMemcpyDeviceToHost);
cudaFree(weight);
cudaFree(features);
cudaFree(probs);
cudaFree(idx);
}
|
21,683 | // Compile: nvcc -o kmeans_gpu kmeans_gpu.cu
#include <math.h>
#include <stdlib.h>
#include <malloc.h>
#include <ctype.h>
#include <sys/time.h>
#include <time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 256
#define uchar unsigned char
__constant__ int d_k;
__constant__ int d_pixelCount;
__global__ void assignClusters(uchar *d_imageR, uchar *d_imageG, uchar *d_imageB, int *d_assignedClusters,
uchar *d_clusterR, uchar *d_clusterG, uchar *d_clusterB){
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if (threadID < d_pixelCount){
double dist, min = 0;
int index;
for (int i=0; i<d_k; i++){
dist = sqrtf(powf(d_imageR[threadID] - d_clusterR[i], 2) +
powf(d_imageG[threadID] - d_clusterG[i], 2) +
powf(d_imageB[threadID] - d_clusterB[i], 2) );
if (dist < min || i == 0){
min = dist;
index = i;
}
}
d_assignedClusters[threadID] = index;
}
}
__global__ void sumClusters(uchar *d_imageR, uchar *d_imageG, uchar *d_imageB, int *d_assignedClusters,
int *d_sumR, int *d_sumG, int *d_sumB, int *d_clusterSize){
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if(threadID < d_pixelCount) {
int cluster = d_assignedClusters[threadID];
int R = d_imageR[threadID];
int G = d_imageG[threadID];
int B = d_imageB[threadID];
atomicAdd(&d_sumR[cluster], R);
atomicAdd(&d_sumG[cluster], G);
atomicAdd(&d_sumB[cluster], B);
atomicAdd(&d_clusterSize[cluster], 1);
}
}
__global__ void clearClusterInfo(int *d_sumR, int *d_sumG, int *d_sumB, int *d_clusterSize){
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < d_k) {
d_sumR[threadID] = 0;
d_sumG[threadID] = 0;
d_sumB[threadID] = 0;
d_clusterSize[threadID] = 0;
}
}
__global__ void calculateCentroids(uchar *d_clusterR, uchar *d_clusterG, uchar *d_clusterB,
int *d_sumR, int *d_sumG, int *d_sumB, int *d_clusterSize){
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < d_k) {
int clusterSize = d_clusterSize[threadID];
if (clusterSize == 0)
clusterSize = 1;
d_clusterR[threadID] = d_sumR[threadID] / clusterSize;
d_clusterG[threadID] = d_sumG[threadID] / clusterSize;
d_clusterB[threadID] = d_sumB[threadID] / clusterSize;
}
}
void error(char const *message){
fprintf(stderr, "Error: %s\n", message);
exit(1);
}
void readPPMHeader(FILE *fp, int *width, int *height){
char ch;
int maxval;
if (fscanf(fp, "P%c\n", &ch) != 1 || ch != '6')
error("file is not in ppm raw format (P6)");
/* skip comments */
ch = getc(fp);
while (ch == '#'){
do {
ch = getc(fp);
} while (ch != '\n'); /* read to the end of the line */
ch = getc(fp);
}
if (!isdigit(ch)) error("cannot read header information from ppm file");
ungetc(ch, fp); /* put that digit back */
/* read the width, height, and maximum value for a pixel */
fscanf(fp, "%d%d%d\n", width, height, &maxval);
if (maxval != 255) error("image is not true-color (24 bit); read failed");
}
void writePPMImage(uchar *imageR, uchar *imageG, uchar *imageB, int width, int height, char const *filename){
int pixelCount = width*height;
FILE *fp = fopen(filename, "w");
if (!fp) error("cannot open file for writing");
fprintf(fp, "P6\n%d %d\n%d\n", width, height, 255);
for (int i=0; i<pixelCount; i++){
fwrite(&imageR[i], sizeof(uchar), 1, fp);
fwrite(&imageG[i], sizeof(uchar), 1, fp);
fwrite(&imageB[i], sizeof(uchar), 1, fp);
}
fclose(fp);
}
void uploadImage(uchar *image, int size, uchar *imageR, uchar *imageG, uchar *imageB){
for (int i=0; i<size; i+=3){
int index = (int)i/3;
imageR[index] = image[i];
imageG[index] = image[i+1];
imageB[index] = image[i+2];
}
}
int main(int argc, char *argv[]) {
char* inputFile = argv[1];
int k = atoi(argv[2]);
int numIter = atoi(argv[3]);
char* outputFile;
if (argc == 5)
outputFile = argv[4];
int width, height;
FILE *fp = fopen(inputFile, "r");
readPPMHeader(fp, &width, &height);
int pixelCount = width*height;
uchar *image = (uchar*)malloc(pixelCount*3);
fread(image, 1, pixelCount*3, fp);
fclose(fp);
uchar *imageR, *imageG, *imageB, *clusterR, *clusterG, *clusterB;
int *assignedClusters;
uchar *d_imageR, *d_imageG, *d_imageB, *d_clusterR, *d_clusterG, *d_clusterB;
int *d_assignedClusters, *d_sumR, *d_sumG, *d_sumB, *d_clusterSize;
int imageSize = sizeof(uchar)*pixelCount;
int centroidsSize = sizeof(int)*k;
imageR = (uchar*)malloc(imageSize);
imageG = (uchar*)malloc(imageSize);
imageB = (uchar*)malloc(imageSize);
uploadImage(image, pixelCount*3, imageR, imageG, imageB);
free(image);
clusterR = (uchar*)calloc(sizeof(uchar), k);
clusterG = (uchar*)calloc(sizeof(uchar), k);
clusterB = (uchar*)calloc(sizeof(uchar), k);
assignedClusters = (int*)malloc(sizeof(int)*pixelCount);
/*initial random centroids*/
srand (time(NULL));
for (int i=0; i<k; i++){
clusterR[i] = rand() % 256;
clusterG[i] = rand() % 256;
clusterB[i] = rand() % 256;
}
cudaMalloc(&d_imageR, imageSize);
cudaMalloc(&d_imageG, imageSize);
cudaMalloc(&d_imageB, imageSize);
cudaMalloc(&d_assignedClusters, sizeof(int)*pixelCount);
cudaMalloc(&d_clusterR, sizeof(uchar)*k);
cudaMalloc(&d_clusterG, sizeof(uchar)*k);
cudaMalloc(&d_clusterB, sizeof(uchar)*k);
cudaMalloc(&d_sumR, centroidsSize);
cudaMalloc(&d_sumG, centroidsSize);
cudaMalloc(&d_sumB, centroidsSize);
cudaMalloc(&d_clusterSize, centroidsSize);
cudaMemcpy(d_imageR, imageR, imageSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_imageG, imageG, imageSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_imageB, imageB, imageSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_clusterR, clusterR, sizeof(uchar)*k, cudaMemcpyHostToDevice);
cudaMemcpy(d_clusterG, clusterG, sizeof(uchar)*k, cudaMemcpyHostToDevice);
cudaMemcpy(d_clusterB, clusterB, sizeof(uchar)*k, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_k, &k, sizeof(int));
cudaMemcpyToSymbol(d_pixelCount, &pixelCount, sizeof(int));
int BLOCK_X, BLOCK_Y;
BLOCK_X = ceil(width/BLOCK_SIZE);
BLOCK_Y = ceil(height/BLOCK_SIZE);
if(BLOCK_X > GRID_SIZE)
BLOCK_X = GRID_SIZE;
if(BLOCK_Y > GRID_SIZE)
BLOCK_Y = GRID_SIZE;
//2D Grid
//Minimum number of threads that can handle width¡height pixels
dim3 dimGRID(BLOCK_X,BLOCK_Y);
//2D Block
//Each dimension is fixed
dim3 dimBLOCK(BLOCK_SIZE,BLOCK_SIZE);
struct timespec stime, etime;
double t;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID , &stime)) {
fprintf(stderr, "clock_gettime failed");
exit(-1);
}
for (int i=0; i<numIter; i++){
assignClusters<<< dimGRID, dimBLOCK >>> (d_imageR, d_imageG, d_imageB, d_assignedClusters,
d_clusterR, d_clusterG, d_clusterB);
clearClusterInfo<<< 1, dimBLOCK >>> (d_sumR, d_sumG, d_sumB, d_clusterSize);
sumClusters<<< dimGRID, dimBLOCK >>> (d_imageR, d_imageG, d_imageB, d_assignedClusters,
d_sumR, d_sumG, d_sumB, d_clusterSize);
calculateCentroids<<< 1, dimBLOCK >>> (d_clusterR, d_clusterG, d_clusterB,
d_sumR, d_sumG, d_sumB, d_clusterSize);
}
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID , &etime)) {
fprintf(stderr, "clock_gettime failed");
exit(-1);
}
t = (etime.tv_sec - stime.tv_sec) + (etime.tv_nsec - stime.tv_nsec) / 1000000000.0;
printf("%d,%d,%d,%lf\n", pixelCount, k, numIter, t); //results
int *clusterSize = (int*)malloc(sizeof(int)*k);
cudaMemcpy(clusterSize, d_clusterSize, centroidsSize, cudaMemcpyDeviceToHost);
cudaMemcpy(clusterR, d_clusterR, sizeof(uchar)*k, cudaMemcpyDeviceToHost);
cudaMemcpy(clusterG, d_clusterG, sizeof(uchar)*k, cudaMemcpyDeviceToHost);
cudaMemcpy(clusterB, d_clusterB, sizeof(uchar)*k, cudaMemcpyDeviceToHost);
cudaMemcpy(imageR, d_imageR, imageSize, cudaMemcpyDeviceToHost);
cudaMemcpy(imageG, d_imageR, imageSize, cudaMemcpyDeviceToHost);
cudaMemcpy(imageB, d_imageR, imageSize, cudaMemcpyDeviceToHost);
cudaMemcpy(assignedClusters, d_assignedClusters, sizeof(int)*pixelCount, cudaMemcpyDeviceToHost);
for (int i=0; i<pixelCount; i++){
int cluster = assignedClusters[i];
imageR[i] = clusterR[cluster];
imageG[i] = clusterG[cluster];
imageB[i] = clusterB[cluster];
}
if (argc == 5)
writePPMImage(imageR, imageG, imageB, width, height, outputFile);
free(imageR);
free(imageG);
free(imageB);
free(clusterR);
free(clusterG);
free(clusterB);
free(assignedClusters);
free(clusterSize);
cudaFree(d_imageR);
cudaFree(d_imageG);
cudaFree(d_imageB);
cudaFree(d_assignedClusters);
cudaFree(d_clusterR);
cudaFree(d_clusterG);
cudaFree(d_clusterB);
cudaFree(d_sumR);
cudaFree(d_sumG);
cudaFree(d_sumB);
cudaFree(d_clusterSize);
}
|
21,684 | #include <stdio.h>
#include <future>
#include <thread>
#include <chrono>
#include <iostream>
#define N 1000000
#define NUM_THREADS_PER_BLOCK 256
#define NUM_BLOCKS_PER_GRID 1024
//#define NUM_BLOCKS_PER_GRID (N + NUM_THREADS_PER_BLOCK-1) / NUM_THREADS_PER_BLOCK;
__constant__ int factor = 0;
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = factor*(a[i] + b[i]);
}
__global__
void matrixAdd(int **a,int **b, int**c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
__global__
void dotProduct(float *a, float *b, float *c) {
// shared memory!
__shared__ float cache[NUM_THREADS_PER_BLOCK];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float runningSum = 0;
while (tid < N) {
runningSum += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// store the current running sum for the threads
cache[cacheIndex] = runningSum;
// sync all the threads before starting to cooperate
__syncthreads();
// reduction
int i = blockDim.x/2; // number of threads per block
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
#define PRINT(x) \
std::cout << #x " = " << x << std::endl
void func(const char* ptr) {
std::cout << "ptr = " << ptr << std::endl;
}
int main(int argc, char** argv) {
// start time
auto startTime = std::chrono::high_resolution_clock::now();
printf("Hello World\n");
// get the number of devices
int numDevices;
cudaGetDeviceCount(&numDevices);
PRINT(numDevices);
cudaDeviceProp prop;
for (auto i=0 ; i<numDevices; i++) {
cudaGetDeviceProperties(&prop, i);
PRINT(prop.name);
PRINT(prop.totalGlobalMem);
PRINT(prop.sharedMemPerBlock);
PRINT(prop.regsPerBlock);
PRINT(prop.warpSize);
PRINT(prop.memPitch);
PRINT(prop.maxThreadsPerBlock);
PRINT(prop.maxThreadsDim[0]);
PRINT(prop.maxThreadsDim[1]);
PRINT(prop.maxThreadsDim[2]);
PRINT(prop.maxGridSize[0]);
PRINT(prop.maxGridSize[1]);
PRINT(prop.maxGridSize[2]);
PRINT(prop.totalConstMem);
PRINT(prop.major);
PRINT(prop.minor);
PRINT(prop.clockRate);
PRINT(prop.textureAlignment);
PRINT(prop.deviceOverlap);
PRINT(prop.multiProcessorCount);
PRINT(prop.kernelExecTimeoutEnabled);
PRINT(prop.integrated);
PRINT(prop.canMapHostMemory);
PRINT(prop.computeMode);
PRINT(prop.maxTexture1D);
PRINT(prop.maxTexture2D[0]);
PRINT(prop.maxTexture2D[1]);
PRINT(prop.maxTexture3D[0]);
PRINT(prop.maxTexture3D[1]);
PRINT(prop.maxTexture3D[2]);
// PRINT(prop.maxTexture2DArray[0]);
// PRINT(prop.maxTexture2DArray[1]);
// PRINT(prop.maxTexture2DArray[2]);
PRINT(prop.concurrentKernels);
}
float h_a[N], h_b[N], h_c[NUM_BLOCKS_PER_GRID];
float *d_a, *d_b, *d_c;
cudaMalloc(&d_a, N*sizeof(float));
cudaMalloc(&d_b, N*sizeof(float));
cudaMalloc(&d_c, NUM_BLOCKS_PER_GRID*sizeof(float));
for (auto i=0; i<N; i++) {
h_a[i] = i;
h_b[i ] = i*2;
}
cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(float), cudaMemcpyHostToDevice);
dotProduct<<<NUM_BLOCKS_PER_GRID, NUM_THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, NUM_BLOCKS_PER_GRID*sizeof(float), cudaMemcpyDeviceToHost);
float sum = 0;
for (auto i=0; i<NUM_BLOCKS_PER_GRID; i++)
sum += h_c[i];
cudaFree(d_c);
cudaFree(d_a);
cudaFree(d_b);
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
printf("Doues GPU version equal CPU version: %.6g = %.6g\n", sum, 2*sum_squares((float)(N-1)));
// stop time
auto stopTime = std::chrono::high_resolution_clock::now();
PRINT((stopTime - startTime).count());
printf("Goodbye World\n");
}
|
21,685 |
#include <type_traits>
using tt = std::true_type;
using ft = std::false_type;
int __host__ static_cuda11_func(int x)
{
return x * x + std::integral_constant<int, 17>::value;
}
|
21,686 | #include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <curand_kernel.h>
#include <device_functions.h>
const int digest_size = 256;
const int digest_size_bytes = digest_size / 8;
//cudaEvent_t start, stop;
#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
//__device__ __host__ inline const char* chars = "123abcABC";
__device__ const uint64_t RC[24] = {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
};
__device__ const int r[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
__device__ const int piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
//__device__ __host__ inline void generate_message(char* message, uint64_t tid, int* str_len) {
// int len = 0;
// const int num_chars = 94;
// char str[21];
// while (tid > 0) {
// str[len++] = chars[tid % num_chars];
// tid /= num_chars;
// }
//
// str[len] = '\0';
// memcpy(message, str, len + 1);
// *str_len = len;
//}
__device__ __host__ inline void keccak256(uint64_t state[25]) {
uint64_t temp, C[5];
int j;
for (int i = 0; i < 24; i++) {
// Theta
// for i = 0 to 5
// C[i] = state[i] ^ state[i + 5] ^ state[i + 10] ^ state[i + 15] ^ state[i + 20];
C[0] = state[0] ^ state[5] ^ state[10] ^ state[15] ^ state[20];
C[1] = state[1] ^ state[6] ^ state[11] ^ state[16] ^ state[21];
C[2] = state[2] ^ state[7] ^ state[12] ^ state[17] ^ state[22];
C[3] = state[3] ^ state[8] ^ state[13] ^ state[18] ^ state[23];
C[4] = state[4] ^ state[9] ^ state[14] ^ state[19] ^ state[24];
// for i = 0 to 5
// temp = C[(i + 4) % 5] ^ ROTL64(C[(i + 1) % 5], 1);
// for j = 0 to 25, j += 5
// state[j + i] ^= temp;
temp = C[4] ^ ROTL64(C[1], 1);
state[0] ^= temp;
state[5] ^= temp;
state[10] ^= temp;
state[15] ^= temp;
state[20] ^= temp;
temp = C[0] ^ ROTL64(C[2], 1);
state[1] ^= temp;
state[6] ^= temp;
state[11] ^= temp;
state[16] ^= temp;
state[21] ^= temp;
temp = C[1] ^ ROTL64(C[3], 1);
state[2] ^= temp;
state[7] ^= temp;
state[12] ^= temp;
state[17] ^= temp;
state[22] ^= temp;
temp = C[2] ^ ROTL64(C[4], 1);
state[3] ^= temp;
state[8] ^= temp;
state[13] ^= temp;
state[18] ^= temp;
state[23] ^= temp;
temp = C[3] ^ ROTL64(C[0], 1);
state[4] ^= temp;
state[9] ^= temp;
state[14] ^= temp;
state[19] ^= temp;
state[24] ^= temp;
// Rho Pi
// for i = 0 to 24
// j = piln[i];
// C[0] = state[j];
// state[j] = ROTL64(temp, r[i]);
// temp = C[0];
temp = state[1];
j = piln[0];
C[0] = state[j];
state[j] = ROTL64(temp, r[0]);
temp = C[0];
j = piln[1];
C[0] = state[j];
state[j] = ROTL64(temp, r[1]);
temp = C[0];
j = piln[2];
C[0] = state[j];
state[j] = ROTL64(temp, r[2]);
temp = C[0];
j = piln[3];
C[0] = state[j];
state[j] = ROTL64(temp, r[3]);
temp = C[0];
j = piln[4];
C[0] = state[j];
state[j] = ROTL64(temp, r[4]);
temp = C[0];
j = piln[5];
C[0] = state[j];
state[j] = ROTL64(temp, r[5]);
temp = C[0];
j = piln[6];
C[0] = state[j];
state[j] = ROTL64(temp, r[6]);
temp = C[0];
j = piln[7];
C[0] = state[j];
state[j] = ROTL64(temp, r[7]);
temp = C[0];
j = piln[8];
C[0] = state[j];
state[j] = ROTL64(temp, r[8]);
temp = C[0];
j = piln[9];
C[0] = state[j];
state[j] = ROTL64(temp, r[9]);
temp = C[0];
j = piln[10];
C[0] = state[j];
state[j] = ROTL64(temp, r[10]);
temp = C[0];
j = piln[11];
C[0] = state[j];
state[j] = ROTL64(temp, r[11]);
temp = C[0];
j = piln[12];
C[0] = state[j];
state[j] = ROTL64(temp, r[12]);
temp = C[0];
j = piln[13];
C[0] = state[j];
state[j] = ROTL64(temp, r[13]);
temp = C[0];
j = piln[14];
C[0] = state[j];
state[j] = ROTL64(temp, r[14]);
temp = C[0];
j = piln[15];
C[0] = state[j];
state[j] = ROTL64(temp, r[15]);
temp = C[0];
j = piln[16];
C[0] = state[j];
state[j] = ROTL64(temp, r[16]);
temp = C[0];
j = piln[17];
C[0] = state[j];
state[j] = ROTL64(temp, r[17]);
temp = C[0];
j = piln[18];
C[0] = state[j];
state[j] = ROTL64(temp, r[18]);
temp = C[0];
j = piln[19];
C[0] = state[j];
state[j] = ROTL64(temp, r[19]);
temp = C[0];
j = piln[20];
C[0] = state[j];
state[j] = ROTL64(temp, r[20]);
temp = C[0];
j = piln[21];
C[0] = state[j];
state[j] = ROTL64(temp, r[21]);
temp = C[0];
j = piln[22];
C[0] = state[j];
state[j] = ROTL64(temp, r[22]);
temp = C[0];
j = piln[23];
C[0] = state[j];
state[j] = ROTL64(temp, r[23]);
temp = C[0];
// Chi
// for j = 0 to 25, j += 5
// for i = 0 to 5
// C[i] = state[j + i];
// for i = 0 to 5
// state[j + 1] ^= (~C[(i + 1) % 5]) & C[(i + 2) % 5];
C[0] = state[0];
C[1] = state[1];
C[2] = state[2];
C[3] = state[3];
C[4] = state[4];
state[0] ^= (~C[1]) & C[2];
state[1] ^= (~C[2]) & C[3];
state[2] ^= (~C[3]) & C[4];
state[3] ^= (~C[4]) & C[0];
state[4] ^= (~C[0]) & C[1];
C[0] = state[5];
C[1] = state[6];
C[2] = state[7];
C[3] = state[8];
C[4] = state[9];
state[5] ^= (~C[1]) & C[2];
state[6] ^= (~C[2]) & C[3];
state[7] ^= (~C[3]) & C[4];
state[8] ^= (~C[4]) & C[0];
state[9] ^= (~C[0]) & C[1];
C[0] = state[10];
C[1] = state[11];
C[2] = state[12];
C[3] = state[13];
C[4] = state[14];
state[10] ^= (~C[1]) & C[2];
state[11] ^= (~C[2]) & C[3];
state[12] ^= (~C[3]) & C[4];
state[13] ^= (~C[4]) & C[0];
state[14] ^= (~C[0]) & C[1];
C[0] = state[15];
C[1] = state[16];
C[2] = state[17];
C[3] = state[18];
C[4] = state[19];
state[15] ^= (~C[1]) & C[2];
state[16] ^= (~C[2]) & C[3];
state[17] ^= (~C[3]) & C[4];
state[18] ^= (~C[4]) & C[0];
state[19] ^= (~C[0]) & C[1];
C[0] = state[20];
C[1] = state[21];
C[2] = state[22];
C[3] = state[23];
C[4] = state[24];
state[20] ^= (~C[1]) & C[2];
state[21] ^= (~C[2]) & C[3];
state[22] ^= (~C[3]) & C[4];
state[23] ^= (~C[4]) & C[0];
state[24] ^= (~C[0]) & C[1];
// Iota
state[0] ^= RC[i];
}
}
__device__ __host__ inline void keccak(const char* message, int message_len, unsigned char* output, int output_len) {
uint64_t state[25];
uint8_t temp[144];
int rsize = 136;
int rsize_byte = 17;
memset(state, 0, sizeof(state));
for (; message_len >= rsize; message_len -= rsize, message += rsize) {
for (int i = 0; i < rsize_byte; i++) {
state[i] ^= ((uint64_t*)message)[i];
}
keccak256(state);
}
// last block and padding
memcpy(temp, message, message_len);
temp[message_len++] = 1;
memset(temp + message_len, 0, rsize - message_len);
temp[rsize - 1] |= 0x80;
for (int i = 0; i < rsize_byte; i++) {
state[i] ^= ((uint64_t*)temp)[i];
}
keccak256(state);
memcpy(output, state, output_len);
}
//
//__global__ void benchmark(const char* messages, unsigned char* output, int num_messages) {
// const int str_len = 6;
// const int output_len = 32;
// int tid = threadIdx.x + (blockIdx.x * blockDim.x);
// int num_threads = blockDim.x * gridDim.x;
//
// for (; tid < num_messages; tid += num_threads)
// {
// keccak(&messages[tid * str_len], str_len, &output[tid * output_len], output_len);
// }
//}
//
//
//void gpu_init() {
// cudaDeviceProp device_prop;
// int device_count, block_size;
//
// cudaGetDeviceCount(&device_count);
// if (device_count != 1) {
// exit(EXIT_FAILURE);
// }
//
// if (cudaGetDeviceProperties(&device_prop, 0) != cudaSuccess) {
// exit(EXIT_FAILURE);
// }
//
// number_threads = device_prop.maxThreadsPerBlock;
// number_multi_processors = device_prop.multiProcessorCount;
// max_threads_per_mp = device_prop.maxThreadsPerMultiProcessor;
// block_size = (max_threads_per_mp / gcd(max_threads_per_mp, number_threads));
// number_threads = max_threads_per_mp / block_size;
// number_blocks = block_size * number_multi_processors;
// clock_speed = (int)(device_prop.memoryClockRate * 1000 * 1000);
//}
//int gcd(int a, int b) {
// return (a == 0) ? b : gcd(b % a, a);
//}
//char* read_in_messages(char* file_name) {
// FILE* f;
// if (!(f = fopen(file_name, "r")))
// {
// printf("Error opening file %s", file_name);
// exit(1);
// }
//
// char* messages = (char*)malloc(sizeof(char) * num_messages * str_length);
// if (messages == NULL)
// {
// perror("Error allocating memory for list of Strings.\n");
// exit(1);
// }
//
// int index = 0;
// char buf[10];
// while (1) {
// if (fgets(buf, str_length + 1, f) == NULL)
// break;
// buf[strlen(buf) - 1] = '\0';
// memcpy(&messages[index], buf, str_length);
// index += str_length - 1;
// }
//
// return messages;
//}
//
//
//
//
//void runBenchmarks(char* file_name, int num_messages) {
// float h_to_d_time = 0.0;
// float comp_time = 0.0;
// float d_to_h_time = 0.0;
// float total_time = 0.0;
// float elapsed_time;
// int hashes_per_sec;
//
// size_t array_size = sizeof(char) * str_length * num_messages;
// size_t output_size = digest_size_bytes * num_messages;
//
// // Allocate host arrays
// char* h_messages = read_in_messages(file_name);
// unsigned char* h_output = (unsigned char*)malloc(output_size);
//
// char* d_messages;
// unsigned char* d_output;
//
// // Allocate device arrays
// cudaMalloc((void**)&d_messages, array_size);
// cudaMalloc((void**)&d_output, output_size);
//
// int number_runs = 25;
// // Copy Strings from host to device arrays
// for (int j = 0; j < number_runs; j++) {
// benchmark << <number_blocks, number_threads >> > (d_messages, d_output, num_messages);
// }
//
// // Free arrays from memory
// free(h_messages);
// free(h_output);
// cudaFree(d_messages);
// cudaFree(d_output);
//} |
21,687 | #include<stdio.h>
// Kernel definition
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<N)
C[i] = A[i] + B[i];
printf("In thread-i, we are using value %f + %f = %f\n", A[i], B[i], C[i]);
}
int main()
{
int N = 1024;
float* h_A, *h_B, *h_C;
size_t arrSize = N*sizeof(float);
h_A = (float*)malloc(arrSize);
h_B = (float*)malloc(arrSize);
h_C = (float*)malloc(arrSize);
for(int i=0;i<N;i++)
h_A[i] = h_B[i] = i;
float *d_A,*d_B, *d_C;
cudaMalloc(&d_A, arrSize);
cudaMalloc(&d_B, arrSize);
cudaMalloc(&d_C, arrSize);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, arrSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_A, arrSize, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = N/threadsPerBlock;
if(N%threadsPerBlock) blocksPerGrid++;
//(N + threadsPerBlock – 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
cudaMemcpy(h_C, d_C, arrSize, cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
printf("%f %f %f\n", h_A[i], h_B[i], h_C[i]);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
21,688 | #define TILE_DIM 1024
template<typename T>
__device__ void argmin(const T* vector, int* result, const int length) {
__shared__ T partsVals[TILE_DIM];
__shared__ int partsArgs[TILE_DIM];
int index = threadIdx.x;
int partLength = (length + TILE_DIM - 1) / TILE_DIM;
T min;
int argmin;
if (index < length) {
min = vector[index];
argmin = index;
}
for (int i = 1; i < partLength; i++) {
int valueIndex = i * TILE_DIM + index;
if (valueIndex < length) {
T value = vector[valueIndex];
if (value < min) {
min = value;
argmin = valueIndex;
}
}
}
partsVals[index] = min;
partsArgs[index] = argmin;
int limit = length < TILE_DIM ? length : TILE_DIM;
for (int d = 1; d < limit; d <<= 1) {
__syncthreads();
if (index % (d << 1) == 0) {
int valueIndex = index + d;
if (valueIndex < limit) {
T value = partsVals[valueIndex];
int arg = partsArgs[valueIndex];
if (value < min) {
min = value;
partsVals[index] = min;
argmin = arg;
partsArgs[index] = argmin;
}
}
}
}
if (index == 0) {
result[0] = argmin;
}
} |
21,689 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
using namespace std;
int main() {
thrust::host_vector<int> h_vec(24);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
thrust::device_vector<int> d_vec = h_vec;
thrust::sort(d_vec.begin(), d_vec.end());
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
|
21,690 | //==============================================================
// Copyright 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <stdio.h>
#include <cuda_runtime.h>
extern void run_util();
__global__ void kernel_main(int n) {
printf("kernel_main!\n");
}
int main(){
kernel_main<<<1, 1>>>(1);
cudaDeviceSynchronize();
run_util();
cudaDeviceSynchronize();
return 0;
}
|
21,691 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/** YOUR CODE GOES BELOW **/
int threadNum = blockDim.x * gridDim.x;
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = threadId; i < numEdges; i += threadNum) {
keepEdges[i] = (matches[src[i]] != -1 || matches[dst[i]] != -1) ? 0 : 1;
}
/** YOUR CODE GOES ABOVE **/
}
|
21,692 | #include <ctime>
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <cmath>
#include <locale.h>
const int PWD_LENGTH = 4;
const int HASH_LENGTH = 64;
const int NUM_BLOCKS = 64;
const int NUM_THREADS = 256;
const int KERNEL_SIZE = NUM_BLOCKS * NUM_THREADS;
const int CHARACTER_SET = 94;
const int ASCII_OFFSET = 33;
const size_t PWD_TRY_ARR_MEM_SIZE = (sizeof(char) * PWD_LENGTH) * KERNEL_SIZE;
const size_t PWD_HASH_ARR_MEM_SIZE = (sizeof(char) * HASH_LENGTH) * KERNEL_SIZE;
const size_t COMP_ARR_MEM_SIZE = (sizeof(int) * KERNEL_SIZE);
// Application variables
std::string pwdSalt = "";
std::string checkFor = "";
std::string testPwdStr = "";
std::string testHash = "";
bool passwordFound = false;
std::string resultPassword = "";
unsigned long maxPwdAttempts;
int kernelPasses = 0;
// Timing variables
double totalRunTime = 0;
/**
* Caculates the maximum number of attempts based on password length
*/
__host__
void calcMaxAttempts() {
for (int i = 1; i <= PWD_LENGTH; i++) {
maxPwdAttempts += pow(CHARACTER_SET, i);
}
maxPwdAttempts += KERNEL_SIZE - 1;
}
/**
* Take user input and parse into salt and hash
*/
__host__
void parseInputHash(std::string arg) {
pwdSalt = arg.substr(0,12);
checkFor = arg.substr(12,HASH_LENGTH);
std::cout << "User Entered the Following Password Hash Information...\n";
std::cout << "Salt: " << pwdSalt << "\n";
std::cout << "Hash: " << checkFor << "\n";
}
/**
* Create hash from brute force password string
*/
__host__
std::string createHash(std::string password) {
// Create password hash from password try and salt
testHash = crypt((char *) password.c_str(), (char *) pwdSalt.c_str());
return testHash.substr(12,HASH_LENGTH);
}
/**
* Helper function to copy the individual hash to the hash array
*/
__host__
void copyHashToArr(int arrIndx, std::string hash, char *charArr) {
// Calculate array offset
int offset = arrIndx * HASH_LENGTH;
// Loop through input hash to insert into hash array
for (int i = 0; i < HASH_LENGTH; i++) {
charArr[offset + i] = hash[i];
}
}
/**
* Helper function to pull a password out of password character array
*/
__host__
std::string getPwdTry(int arrIndx, char *pwdCharArr) {
// Calculate array offset
int offset = arrIndx * PWD_LENGTH;
// temp string for password
std::string tmpPwd = "";
for (int i = 0; i < PWD_LENGTH; i++) {
// Skip blank characters
if (pwdCharArr[offset + i] != ' ') {
tmpPwd += pwdCharArr[offset + i];
}
}
return tmpPwd;
}
/**
* Debug Helper Function to print character array entries
*/
__host__
void printArr(int length, char *charArr) {
// Loop through character array
for (int i = 0; i < (KERNEL_SIZE * length); i++) {
if ((i == 0) || (i % length) != 0) {
if (charArr[i] != ' ') {
std::cout << charArr[i];
}
} else {
std::cout << ", ";
if (charArr[i] != ' ') {
std::cout << charArr[i];
}
}
}
std::cout << std::endl;
}
/**
* Create brute force password attemps
*/
__global__
void kernel_createPwdTry(int numPass, char *tryArr) {
// Get current thread
int curThread = blockIdx.x * blockDim.x + threadIdx.x;
// Create unique guess number based on thread and number of times kernel has been run
int guess = (numPass * KERNEL_SIZE) + curThread;
// Set fourth letter of four letter password
tryArr[(curThread * PWD_LENGTH) + 3] = (guess % CHARACTER_SET) + ASCII_OFFSET;
// Reduce guess number
guess = guess / CHARACTER_SET;
// If necessary, set third letter of four and reduce guess number
if (guess > 0) {
tryArr[(curThread * PWD_LENGTH) + 2] = (guess % CHARACTER_SET) + ASCII_OFFSET;
guess = guess / CHARACTER_SET;
} else {
tryArr[(curThread * PWD_LENGTH) + 2] = 32; // Space
}
// If necessary, set second letter of four and reduce guess number
if (guess > 0) {
tryArr[(curThread * PWD_LENGTH) + 1] = (guess % CHARACTER_SET) + ASCII_OFFSET;
guess = guess / CHARACTER_SET;
} else {
tryArr[(curThread * PWD_LENGTH) + 1] = 32; // Space
}
// If necessary, set first letter of four and reduce guess number
if (guess > 0) {
tryArr[curThread * PWD_LENGTH] = (guess % CHARACTER_SET) + ASCII_OFFSET;
guess = guess / CHARACTER_SET;
} else {
tryArr[curThread * PWD_LENGTH] = 32; // Space
}
}
/**
* Kernel to check brute force password hash with the one application is looking for
*/
__global__
void kernel_checkHash(char *lookingFor, char *hashArr, int *compares) {
// Get current thread
int curThread = blockIdx.x * blockDim.x + threadIdx.x;
// Get array offset
int offset = (curThread * HASH_LENGTH);
// Flag to indicate match
int match = 1;
for (int i = 0; i < HASH_LENGTH; i++) {
//printf("hashArr idx %d = %c --- lookingFor idx %d = %c Match = %d\n",
// i, hashArr[offset + i], i, lookingFor[i], match);
if (hashArr[offset + i] != lookingFor[i]) {
//printf("NO MATCH FOR HASH - %d\n", curThread);
match = 0;
break;
}
}
compares[curThread] = match;
//if (match == 0) {
//printf("!!! MATCH !!!\n");
// compares[curThread] = 1;
//}
}
/**
* Main sub routine which runs cracking loop
*/
void main_sub0() {
// Time variables
clock_t start;
clock_t stop;
double elapsedTime;
// Declare host variables
char *h_pwdTryArr;
char *h_pwdHashArr;
char *h_checkingFor;
int *h_compareArr;
// Declare device variables
char *d_pwdTryArr;
char *d_pwdHashArr;
char *d_checkingFor;
int *d_compareArr;
// Allocate Host memory
h_pwdTryArr = (char *)malloc(PWD_TRY_ARR_MEM_SIZE);
h_pwdHashArr = (char *)malloc(PWD_HASH_ARR_MEM_SIZE);
h_checkingFor = (char *)malloc(HASH_LENGTH * sizeof(char));
h_compareArr = (int *)malloc(COMP_ARR_MEM_SIZE);
// Allocate GPU memory
cudaMalloc((void **) &d_pwdTryArr, PWD_TRY_ARR_MEM_SIZE);
cudaMalloc((void **) &d_pwdHashArr, PWD_HASH_ARR_MEM_SIZE);
cudaMalloc((void **) &d_checkingFor, (HASH_LENGTH * sizeof(char)));
cudaMalloc((void **) &d_compareArr, COMP_ARR_MEM_SIZE);
strcpy(h_checkingFor, checkFor.c_str());
// Fill array
//for (int i = 0; i < (KERNEL_SIZE * PWD_LENGTH); i++) {
// h_pwdTryArr[i] = 'Z';
//}
// DEBUG PRINT ORIGINAL PASSWORDS
//std::cout << "PWDTRYARR BEFORE KERNEL EXECUTION\n";
//printArr(PWD_LENGTH, h_pwdTryArr);
// Copy host to device
cudaMemcpy(d_pwdTryArr, h_pwdTryArr, PWD_TRY_ARR_MEM_SIZE, cudaMemcpyHostToDevice);
while ((!passwordFound) && ((kernelPasses * KERNEL_SIZE) < maxPwdAttempts)) {
printf("Running round %'d of %'d password attempts\n", (kernelPasses + 1), KERNEL_SIZE);
//std::cout << "Running round " << (kernelPasses + 1) << " of ";
//std::cout << KERNEL_SIZE << " password attempts\n";
// Start clock for kernel timing
start = clock();
std::cout << "** Running GPU Kernel to Create Brute Force Passwords... ";
// Run create password kernel
kernel_createPwdTry<<<NUM_BLOCKS,NUM_THREADS>>>(kernelPasses, d_pwdTryArr);
// Stop clock for kernel timing and output results
stop = clock();
elapsedTime = double(stop - start) / CLOCKS_PER_SEC;
totalRunTime += elapsedTime;
printf("Took %.5f seconds to run\n", elapsedTime);
// Copy device to host
cudaMemcpy(h_pwdTryArr, d_pwdTryArr, PWD_TRY_ARR_MEM_SIZE, cudaMemcpyDeviceToHost);
// DEBUG PRINT CREATED PASSWORDS
//std::cout << "PWDTRYARR AFTER KERNEL EXECUTION\n";
//printArr(PWD_LENGTH, h_pwdTryArr);
// Start clock for host hash creation timing
start = clock();
std::cout << "** Running Host Method to Create Hashes... ";
// Iterate through passowrd try array
for (int i = 0; i < (KERNEL_SIZE); i++) {
// Reset string
testPwdStr = "";
testPwdStr = getPwdTry(i, h_pwdTryArr);
/**
for (int j = 0; j < PWD_LENGTH; j++) {
char c = h_pwdTryArr[(i * PWD_LENGTH) + j];
if (c != ' ') {
testPwdStr += c;
}
}
**/
// Print Debug Logic
//std::cout << testPwdStr << " --- ";
//std::cout << createHash(testPwdStr) << std::endl;
copyHashToArr(i, createHash(testPwdStr), h_pwdHashArr);
// Create hash of password and store in hash array
// strcpy(h_pwdHashArr[i], createHash(testPwdStr).c_str());
}
// Stop clock for host hash creation timing and output results
stop = clock();
elapsedTime = double(stop - start) / CLOCKS_PER_SEC;
totalRunTime += elapsedTime;
printf("Took %.5f seconds to run\n", elapsedTime);
// DEBUG PRINT CREATED HASHES
//std::cout << "PWDHASHARR\n";
//printArr(HASH_LENGTH, h_pwdHashArr);
// Copy host to device
cudaMemcpy(d_checkingFor, h_checkingFor, (HASH_LENGTH * sizeof(char)), cudaMemcpyHostToDevice);
cudaMemcpy(d_pwdHashArr, h_pwdHashArr, PWD_HASH_ARR_MEM_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_compareArr, h_compareArr, COMP_ARR_MEM_SIZE, cudaMemcpyHostToDevice);
// DEBUG PRINT LOOKINGFOR HASH
//std::cout << "LOOKINGFOR\n";
//for (int i = 0; i < HASH_LENGTH; i++) {
// std::cout << h_checkingFor[i];
//}
//std::cout << std::endl;
// Start clock for hash comparison kernel timing
start = clock();
std::cout << "** Running GPU Kernel to Check Password Hashes... ";
// Run check hash kernel
kernel_checkHash<<<NUM_BLOCKS, NUM_THREADS>>>(d_checkingFor, d_pwdHashArr, d_compareArr);
// Stop clock for hash comparison kernel timing and output results
stop = clock();
elapsedTime = double(stop - start) / CLOCKS_PER_SEC;
totalRunTime += elapsedTime;
printf("Took %.5f seconds to run\n", elapsedTime);
// Copy device to host
cudaMemcpy(h_compareArr, d_compareArr, COMP_ARR_MEM_SIZE, cudaMemcpyDeviceToHost);
// DEBUG PRINT COMPAREARR
//std::cout << "COMPAREARR --- ";
//for (int i = 0; i < KERNEL_SIZE; i++) {
// std::cout << h_compareArr[i] << ", ";
//}
//std::cout << std::endl;
// Start clock for host checking for match timing
start = clock();
std::cout << "** Checking on Host for hash match... ";
// Check if there is a match
for (int i = 0; i < KERNEL_SIZE; i++) {
if (h_compareArr[i] == 1) {
passwordFound = true;
resultPassword = getPwdTry(i, h_pwdTryArr);
//std::cout << "MATCH FOUND: ";
//std::cout << getPwdTry(i, h_pwdTryArr) << "\n";
}
}
// Stop clock for host checking for match timing and output results
stop = clock();
elapsedTime = double(stop - start) / CLOCKS_PER_SEC;
totalRunTime += elapsedTime;
printf("Took %.5f seconds to run\n", elapsedTime);
// Incrememnt kernel pass
kernelPasses++;
}
free(h_pwdTryArr);
free(h_pwdHashArr);
free(h_checkingFor);
free(h_compareArr);
cudaFree(d_pwdTryArr);
cudaFree(d_pwdHashArr);
cudaFree(d_checkingFor);
cudaFree(d_compareArr);
}
/**
* Main application
*/
int main(int argc, char *argv[]) {
setlocale(LC_NUMERIC, "");
// Variable for command line argument
std::string argument = "";
// Make sure hash has been passed to program via command line
if (argc < 2) {
std::cout << "!!! ERROR !!! Please enter hash as argument !!!\n";
return EXIT_FAILURE;
} else {
argument = argv[1];
if (argument.length() != 77) {
std::cout << "!!! ERROR !!! Hash must be 77 characters long !!!\n";
return EXIT_FAILURE;
}
}
// Parse argument into salt and hash
parseInputHash(argument);
calcMaxAttempts();
std::cout << "Attempting to crack password...\n";
main_sub0();
if (passwordFound) {
std::cout << "Password found --- " << resultPassword << "\n";
} else {
std::cout << "Password not found\n";
}
// Display total number of attempts
printf("%'d attempts processed\n", kernelPasses * KERNEL_SIZE);
printf("%'.5f total processing time\n", totalRunTime);
return EXIT_SUCCESS;
}
|
21,693 | //#include <algorithm>
//#include <cassert>
//#include <cstdlib>
//#include <functional>
//#include <iostream>
//#include <vector>
//#include <cuda_runtime.h>
//#include "device_launch_parameters.h"
//#include <random>
//
//using std::cout;
//using std::generate;
//using std::vector;
//
//using namespace std;
//
//#define BLOCK_SIZE 32
//
//__global__ void matrixMulKernel(float* a, float* b, float* c, int N) {
// int gx = blockIdx.x * BLOCK_SIZE + threadIdx.x; // global thread x
// int gy = blockIdx.y * BLOCK_SIZE + threadIdx.y; // global thread y
//
// float sum = 0.f;
//
// for (int r = 0; r < N; r++)
// {
// sum += a[gy * N + r] * b[gx + r * N];
// }
//
// c[gy * N + gx] = sum;
//}
//
//void printMatrix(vector<float> a, int N) {
// for (int i = 0; i < N; i++) {
// for (int j = 0; j < N; j++) {
// cout << a[i * N + j] << " ";
// }
// cout << endl;
// }
// cout << endl;
//}
//
//vector<float> runMatrixMul(int N, vector<float> h_a, vector<float> h_b) {
//
// // Matrix size in bytes
// size_t byteSize = N * N * sizeof(float);
//
// vector<float> h_c(N * N);
//
// //Allocate device memory (device = GPU)
// float* d_a, * d_b, * d_c;
// cudaMalloc(&d_a, byteSize);
// cudaMalloc(&d_b, byteSize);
// cudaMalloc(&d_c, byteSize);
//
// //Copy data to device
// cudaMemcpy(d_a, h_a.data(), byteSize, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b.data(), byteSize, cudaMemcpyHostToDevice);
//
// //Blocks per grid dimension
// int BlkGrdDim = (int)ceil((float)N / BLOCK_SIZE);
//
// //dim3 - cuda int vector https://codeyarns.com/tech/2011-02-16-cuda-dim3.html
// dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
// dim3 blocks(BlkGrdDim, BlkGrdDim);
//
// //Run kernel
// matrixMulKernel <<<blocks, threads>>> (d_a, d_b, d_c, N);
// cudaThreadSynchronize();
//
// //Copy back to host
// cudaMemcpy(h_c.data(), d_c, byteSize, cudaMemcpyDeviceToHost);
//
// //Free memory on device
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c);
//
// return h_c;
//}
//
////With timer for kernel analysis
//vector<float> runMatrixMul(int N, vector<float> h_a, vector<float> h_b, float* time) {
//
// //Timer stuff
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// // Matrix size in bytes
// size_t byteSize = N * N * sizeof(float);
//
// vector<float> h_c(N * N);
//
// //Allocate device memory (device = GPU)
// float* d_a, * d_b, * d_c;
// cudaMalloc(&d_a, byteSize);
// cudaMalloc(&d_b, byteSize);
// cudaMalloc(&d_c, byteSize);
//
// //Copy data to device
// cudaMemcpy(d_a, h_a.data(), byteSize, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b.data(), byteSize, cudaMemcpyHostToDevice);
//
// //Blocks per grid dimension
// int BlkGrdDim = (int)ceil((float)N / BLOCK_SIZE);
//
// //dim3 - cuda int vector https://codeyarns.com/tech/2011-02-16-cuda-dim3.html
// dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
// dim3 blocks(BlkGrdDim, BlkGrdDim);
//
// //Start timer here
// cudaEventRecord(start, 0);
//
// //Run kernel
// matrixMulKernel <<<blocks, threads>>> (d_a, d_b, d_c, N);
// cudaThreadSynchronize();
//
// //Stop timer here
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(time, start, stop);
//
// //Copy back to host
// cudaMemcpy(h_c.data(), d_c, byteSize, cudaMemcpyDeviceToHost);
//
// //Free memory on device
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c);
//
// return h_c;
//}
//
////CPU side
//int main() {
// ////Matrix size N x N
// //const int N = 256;
// //const int N = 512;
// //const int N = 1024;
// //const int N = 1536;
// //const int N = 2048;
// //const int N = 3072;
// const int N = 4096;
//
// vector<float> time_list;
// int launchIter = 11;
// int warmupLaunches = 1;
//
// //Timer stuff
// float time;
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// //Matrices
// vector<float> h_a(N * N);
// vector<float> h_b(N * N);
//
// for (int i = 0; i < launchIter; i++){
// //Initialize matrices
// generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
// generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
//
// ////Start timer here
// //cudaEventRecord(start, 0);
//
// //runMatrixMul(N, h_a, h_b);
//
// ////Stop timer here
// //cudaEventRecord(stop, 0);
// //cudaEventSynchronize(stop);
// //cudaEventElapsedTime(&time, start, stop);
//
// runMatrixMul(N, h_a, h_b, &time);
//
// cout << "True time = " << time << endl;
// time_list.push_back(time);
// }
//
// for (int i = 0; i < warmupLaunches; i++)
// time_list.erase(time_list.begin());
//
// float sumTime = 0;
// for(auto el : time_list)
// {
// sumTime += el;
// }
//
// cout << endl << "Avg time = " << round(sumTime / (launchIter - warmupLaunches)) << endl;
//
// //Event variables destruction (lol)
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//
// cout << "Done" << endl;
// return 0;
//}
|
21,694 | /* Matrix normalization.
* Compile with "gcc matrixNorm.c"
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
/* Program Parameters */
#define MAXN 10000 /* Matrix size */
int BLOCK_SIZE = 256;
int N=6000;
/* Matrices */
volatile float A[MAXN][MAXN], B[MAXN][MAXN];
float h_a[MAXN][MAXN], h_b[MAXN][MAXN];
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
srand((unsigned)time(NULL));
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
A[row][col] = (float)rand() / 32768.0;
h_a[row][col] = A[row][col];
B[row][col] = 0.0;
h_b[row][col] = 0.0;
}
}
}
/* Kernel function */
__global__ void matrixNorm(float *A, float *B, int n) {
int col = blockIdx.x;
int row, stride;
int tid = threadIdx.x;
float mu, sigma, partial=0; // Mean and Standard Deviation
__shared__ float partials[1024], fullCol[MAXN];
//set up partial sums and copy working column into shared memory
for(row = threadIdx.x; row < n; row += blockDim.x){
fullCol[row] = A[row*MAXN + col];
partial += fullCol[row];
}
partials[tid] = partial;
__syncthreads();
//reduction for sum
for (stride = 1; stride < blockDim.x; stride *= 2) {
if (tid % (2*stride) == 0){
partials[tid] += partials[tid+stride];
}
__syncthreads();
}
//calculate mu, reset partial
mu = partials[0]/n;
partial = 0;
//repeat for sigma
for(row = threadIdx.x; row < n; row += blockDim.x){
partial += powf(fullCol[row]-mu, 2.0);
}
partials[tid] = partial;
__syncthreads();
//reduction for variance * n
for (stride = 1; stride < blockDim.x; stride *= 2) {
if (tid % (2*stride) == 0){
partials[tid] += partials[tid+stride];
}
__syncthreads();
}
//calculate mu
sigma = partials[0]/n;
sigma = sqrt(sigma);
//use copied column to fill in B array
for(row = threadIdx.x; row < n; row += blockDim.x){
if (sigma == 0.0){
B[row*MAXN + blockIdx.x] = 0.0;
}
else{
B[row*MAXN + blockIdx.x] = (fullCol[row] -mu) / sigma;
}
}
}
void matrixNormSerial() {
int row, col;
float mu, sigma; // Mean and Standard Deviation
printf("Computing Serially.\n");
for (col=0; col < N; col++) {
mu = 0.0;
for (row=0; row < N; row++)
mu += A[row][col];
mu /= (float) N;
sigma = 0.0;
for (row=0; row < N; row++)
sigma += powf(A[row][col] - mu, 2.0);
sigma /= (float) N;
sigma = sqrt(sigma);
for (row=0; row < N; row++) {
if (sigma == 0.0)
B[row][col] = 0.0;
else
B[row][col] = (A[row][col] - mu) / sigma;
}
}
}
void runTest(float serial) {
// Allocate memory space on the device
float *d_a, *d_b;
cudaMalloc((void **) &d_a, sizeof(float)*MAXN*MAXN);
cudaMalloc((void **) &d_b, sizeof(float)*MAXN*MAXN);
// copy matrix A from host to device memory
cudaMemcpy(d_a, h_a, sizeof(float)*MAXN*MAXN, cudaMemcpyHostToDevice);
int i;
// some events to count the execution time
cudaEvent_t cstart, cstop;
cudaEventCreate(&cstart);
cudaEventCreate(&cstop);
float gpu_elapsed_time_ms;
for(i = 16; i <=1024; i*=2){
dim3 dimGrid(N, 1, 1);
dim3 dimBlock(i, 1,1);
cudaEventRecord(cstart, 0);
matrixNorm<<<dimGrid, dimBlock>>>(d_a, d_b, N);
// start to count execution time of GPU Kernel
cudaEventRecord(cstop, 0);
cudaEventSynchronize(cstop);
// Transfer results from device to host
cudaMemcpy(h_b, d_b, sizeof(float)*MAXN*MAXN, cudaMemcpyDeviceToHost);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, cstart, cstop);
printf("Threads = %i, Runtime = %g, Speedup = %g\n", i, (float)gpu_elapsed_time_ms, serial/gpu_elapsed_time_ms);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
}
float runSerial(){
struct timeval start, stop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
unsigned long long runtime;
/* Start Clock */
printf("\n---------------------------------------------\n");
printf("\nStarting clock.\n\n");
gettimeofday(&start, &tzdummy);
// Launch simple matrix multiplication kernel
matrixNormSerial();
/* Stop Clock */
gettimeofday(&stop, &tzdummy);
runtime = (unsigned long long)(stop.tv_sec - start.tv_sec) * 1000000 + (stop.tv_usec - start.tv_usec);
/* Display timing results */
printf("Runtime = %g ms.\n", (float)runtime/(float)1000);
printf("\nStopped clock.");
printf("\n---------------------------------------------\n");
return ((float)runtime/(float)1000);
}
int main(int argc, char **argv) {
/* Timing variables */
if (argc == 3) {
BLOCK_SIZE = atoi(argv[2]);
}
if(argc >=2){
N = atoi(argv[1]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
}
if(argc == 1){
printf("Usage: %s [matrixDimension] [numThreadsPerBlock]\n",
argv[0]);
printf("Using defaults matrixDimension=%i, numThreadsPerBlock=%i\n", N, BLOCK_SIZE);
}else{
printf("Matrix Size = %i\n", N);
if(BLOCK_SIZE == 0){
printf("Running Test \n\n");
}else {
printf("Block Size = %i\n", BLOCK_SIZE);
}
}
/* Initialize A and B */
initialize_inputs();
float serial = runSerial();
if(BLOCK_SIZE == 0) {
runTest(serial);
exit(0);
}else {
// some events to count the execution time
// Allocate memory space on the device
float *d_a, *d_b;
cudaMalloc((void **) &d_a, sizeof(float)*MAXN*MAXN);
cudaMalloc((void **) &d_b, sizeof(float)*MAXN*MAXN);
// copy matrix A from host to device memory
cudaMemcpy(d_a, h_a, sizeof(float)*MAXN*MAXN, cudaMemcpyHostToDevice);
cudaEvent_t cstart, cstop;
cudaEventCreate(&cstart);
cudaEventCreate(&cstop);
float gpu_elapsed_time_ms;
dim3 dimGrid(N, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1,1);
/* Start Clock */
printf("\n---------------------------------------------\n");
printf("\nStarting Cuda clock.\n\n");
cudaEventRecord(cstart, 0);
matrixNorm<<<dimGrid, dimBlock>>>(d_a, d_b, N);
// start to count execution time of GPU Kernel
cudaEventRecord(cstop, 0);
cudaEventSynchronize(cstop);
// Transfer results from device to host
cudaMemcpy(h_b, d_b, sizeof(float)*MAXN*MAXN, cudaMemcpyDeviceToHost);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, cstart, cstop);
printf("Time elapsed on matrix norm on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
printf("Runtime = %g ms.\n", (float)gpu_elapsed_time_ms);
printf("\nStopped clock.");
printf("\n---------------------------------------------\n");
printf("Speedup = %g\n", serial/gpu_elapsed_time_ms);
cudaFree(d_a);
cudaFree(d_b);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
}
int i;
printf("Spot check for correctness on row 100, cols 0-9: \n");
for(i=0; i < 10; i++){
printf("B: %5.2f b_h: %5.2f\n", B[100][i], h_b[100][i]);
}
exit(0);
} |
21,695 | __global__ void plotLines(unsigned char* result, int w, int h, int* points, unsigned char blue, unsigned char green, unsigned char read) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int tid = y * w + x;
int _r = w * h * 0;
int _g = w * h * 1;
int _b = w * h * 2;
int _a = w * h * 3;
if (x >= w || y >= h)
return;
int x1 = points[0];
int y1 = points[1];
int x2 = points[2];
int y2 = points[3];
//ax + by + c = 0
int a = y1 - y2;
int b = x2 - x1;
int c = (x1 - x2) * y1 + (y2 - y1) * x1;
double diff = a * x + b * y + c;
if (-10 < diff && diff < 500) {
result[tid + _b] = blue;
result[tid + _g] = green;
result[tid + _r] = read;
result[tid + _a] = 255;
}
return;
}
|
21,696 | #include <iostream>
#include "cuda_runtime_api.h"
using namespace std;
int main(int argc, char ** argv) {
int count;
cudaGetDeviceCount(&count);
if(count == 0) {
std::cerr << "Could not find a CUDA device";
return 1;
}
if(count != 1) {
std::cerr << "Warning: Expected exactly one CUDA device, got " << count;
}
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
std::cout << prop.major << prop.minor;
return 0;
} |
21,697 | #include <stdio.h>
__global__ void add (int a, int b, int *c) {
*c=a+b;
}
int main(int argc, char **argv) {
int c;
int *dev_c;
/* Allocate memory on device */
/* Note, the pointer returned is *not* valid on the host */
/* and dereferencing it will not work */
cudaMalloc( (void **)&dev_c,sizeof(int));
add<<<1,1>>>(3,4,dev_c);
cudaMemcpy( &c,
dev_c,
sizeof(int),
cudaMemcpyDeviceToHost);
printf("3+4=%d\n",c);
cudaFree(dev_c);
return 0;
}
|
21,698 | #include <stdio.h>
#define N 256
#define T 4
__global__ void vecAdd(int *A){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N){
A[i] = i;
}
}
int main(int argc, char *argv[]){
int i;
int blocks = N/T;
int size = N*sizeof(int);
int a[N], *devA;
cudaMalloc( (void**) &devA, size);
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
vecAdd<<<blocks, T>>>(devA);
cudaMemcpy( a, devA, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
for(i=0; i<N; i++){
if(i != 0 && i%20 == 0) printf("\n");
printf("%d ", a[i]);
}
printf("\n");
}
|
21,699 | #include "includes.h"
using namespace std;
__global__ void matrixMultiplicationKernel(long* A, long* B, long* C, long N) {
long ROW = (blockIdx.y*blockDim.y) + threadIdx.y;
long COL = (blockIdx.x*blockDim.x) + threadIdx.x;
long tmpSum = 0;
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (long i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
C[ROW * N + COL] = tmpSum;
}
} |
21,700 | #include <iostream>
#include <string>
#include <iomanip>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// #define imin(a, b) (a<b?a:b)
//定义X,Y,Z各维的长度
const int dimX = 10;
const int dimY = 10;
const int dimZ = 10;
const int SIZE = dimX * dimY * dimZ;
//设置每个线程块中线程数量,此处设置三维一样
const int threadPerBlock = 32;
//设置迭代次数
const int times = 90;
//设置stencil边界处邻居的值
__device__ const float BORDER = 0.0;
int count = 0;
//设定线程格中线程块的数量, 避免启动过多线程块
int blockPerGrid(const int dim, const int threadPerBlock)
{
//由于暂时一个线程只计算一个stencil,所以暂时不能指定线程块的限制
int temp = dim / threadPerBlock;
if (dim % threadPerBlock != 0) {
temp += 1;
}
return temp;
}
//错误处理
#define CHECK_ERROR(error) checkCudaError(error, __FILE__, __LINE__)
#define CHECK_STATE(msg) checkCudaState(msg, __FILE__, __LINE__)
inline void checkCudaError(cudaError_t error, const char *file, const int line)
{
if (error != cudaSuccess) {
std::cerr << "CUDA CALL FAILED:" << file << "( " << line << ")- " << cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
}
inline void checkCudaState(const char *msg, const char *file, const int line)
{
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::cerr << "---" << msg << " Error---" << std::endl;
std::cerr << file << "( " << line << ")- " << cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
}
//计算线程与元素的唯一偏移,以x为行,y为列,z为高
__device__ __host__ int offset(int x, int y, int z)
{
return (((x + dimX) % dimX) + ((y + dimY) % dimY) * dimX + ((z + dimZ) % dimZ) * dimX * dimY);
}
__global__ void kernel(double *dev_grid_in, double *dev_grid_out)
{
// __shared__ double cache[threadPerBlock][threadPerBlock][1];
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
//设置stencil中各元素值
double center = dev_grid_in[offset(x, y, z)];
double up = (z < (dimZ - 1)) ? dev_grid_in[offset(x, y, z + 1)] : BORDER;
double down = (z > 0) ? dev_grid_in[offset(x, y, z - 1)] : BORDER;
double west = (x > 0) ? dev_grid_in[offset(x - 1, y, z)] : BORDER;
double east = (x < (dimX - 1)) ? dev_grid_in[offset(x + 1, y, z)] : BORDER;
double south = (y > 0) ? dev_grid_in[offset(x, y - 1, z)] : BORDER;
double north = (y < (dimY - 1)) ? dev_grid_in[offset(x, y + 1, z)] : BORDER;
// dev_grid_out[offset(x, y, z)] = 1.0;
dev_grid_out[offset(x, y, z)] = (center + up + down + west + east + south + north) * (1.0 / 7.0);
// cache[threadIdx.x][threadIdx.y][threadIdx.z] = (center + up + down + west + east + south + north) * (1.0 / 7.0);
// __syncthreads();
// dev_grid_out[offset(x, y, z)] = cache[threadIdx.x][threadIdx.y][threadIdx.z];
}
//初始化输入,输出
void init(double *grid, int dimX, int dimY, int dimZ)
{
for (int z=0; z<dimZ; ++z) {
for (int y=0; y<dimY; ++y) {
for (int x=0; x<dimX; ++x) {
if ((x*y*z == 0) || (x == dimX-1) || (y == dimY-1) || (z == dimZ-1)) {
grid[offset(x, y, z)] = 7;
}
else {
grid[offset(x, y, z)] = 0;
// grid[offset(x, y, z)] = count;
}
count++;
}
}
}
}
void print(double *grid)
{
for (int z=0; z<dimZ; ++z) {
std::cout << z << ":\n\n";
for (int y=0; y<dimY; ++y) {
for (int x=0; x<dimX; ++x) {
std::cout << std::fixed << std::setprecision(3) << grid[offset(x, y, z)] << "\t";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
void debug(int test, std::string str)
{
if (test != 0) {
std::cout << "-----------" << str << "--------------" << std::endl;
std::cout << test << std::endl;
}
else {
std::cout << "-----------" << str << "--------------" << std::endl;
}
}
int main(void)
{
CHECK_ERROR(cudaSetDevice(0));
//由于blocks不能大于1024,所以最后一维设备为1
dim3 blocks(threadPerBlock, threadPerBlock, 1);
dim3 grids(blockPerGrid(dimX, blocks.x), blockPerGrid(dimY, blocks.y), blockPerGrid(dimZ, blocks.z));
double *grid_in, *grid_out;
grid_in = (double *)malloc(SIZE * sizeof(double));
grid_out = (double *)malloc(SIZE * sizeof(double));
double *dev_grid_in, *dev_grid_out;
CHECK_ERROR(cudaMalloc((void**)&dev_grid_in, SIZE * sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&dev_grid_out, SIZE * sizeof(double)));
init(grid_in, dimX, dimY, dimZ);
init(grid_out, dimX, dimY, dimZ);
// debug(0, "input");
// print(grid_in);
//统计用于GPU计算的时间
cudaEvent_t start, stop;
CHECK_ERROR(cudaEventCreate(&start));
CHECK_ERROR(cudaEventCreate(&stop));
CHECK_ERROR(cudaEventRecord(start, 0));
CHECK_ERROR(cudaEventSynchronize(start));
CHECK_ERROR(cudaMemcpy(dev_grid_in, grid_in, SIZE * sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(dev_grid_out, grid_out, SIZE * sizeof(double), cudaMemcpyHostToDevice));
for (int i=0; i<times; ++i) {
kernel<<<grids, blocks>>>(dev_grid_in, dev_grid_out);
std::swap(dev_grid_in, dev_grid_out);
}
cudaDeviceSynchronize();
CHECK_STATE("kernel call");
CHECK_ERROR(cudaMemcpy(grid_in, dev_grid_in, SIZE * sizeof(double), cudaMemcpyDeviceToHost));
//计算统计的时间
CHECK_ERROR(cudaEventRecord(stop, 0));
CHECK_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
debug(0, "output");
print(grid_in);
std::cout << "Time elapsed: " << std::fixed << std::setprecision(6) << elapsedTime << " ms" << std::endl;
CHECK_ERROR(cudaEventDestroy(start));
CHECK_ERROR(cudaEventDestroy(stop));
free(grid_in);
free(grid_out);
CHECK_ERROR(cudaFree(dev_grid_in));
CHECK_ERROR(cudaFree(dev_grid_out));
getchar();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.