serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
13,801 | #include "includes.h"
__global__ void saxpy(int * a, int * b, int * c)
{
// Determine our unique global thread ID, so we know which element to process
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < N ) // Make sure we don't do more work than we have data!
c[tid] = 2 * a[tid] + b[tid];
} |
13,802 | __global__ void add_one(int *x) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
x[index] = x[index] + 1;
}
int main() {
int x[256];
int* x_gpu;
cudaMalloc(&x_gpu, 256 * sizeof(int));
cudaMemcpy(x_gpu, x, 256 * sizeof(int), cudaMemcpyHostToDevice);
add_one<<<1, 256>>>(x_gpu);
}
|
13,803 | extern "C" __global__ void directCoulombSumReference(float4* atomInfo, int numberOfAtoms, float gridSpacing, int gridSize, float* energyGrid)
{
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
int zIndex = blockIdx.z*blockDim.z + threadIdx.z;
if ((xIndex >= gridSize) || (yIndex >= gridSize) || (xIndex >= gridSize))
return;
int outIndex = blockDim.y*gridDim.y * blockDim.x*gridDim.x * zIndex + blockDim.x*gridDim.x * yIndex + xIndex;
float coordX = gridSpacing * xIndex;
float coordY = gridSpacing * yIndex;
float coordZ = gridSpacing * zIndex;
float energyValue = 0.0f;
for (int i = 0; i < numberOfAtoms; i++)
{
float dX = coordX - atomInfo[i].x;
float dY = coordY - atomInfo[i].y;
float dZ = coordZ - atomInfo[i].z;
float partialResult = rsqrt(dX * dX + dY * dY + dZ*dZ);
energyValue += atomInfo[i].w * partialResult;
}
energyGrid[outIndex] += energyValue;
}
|
13,804 | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
/* change dimension size as needed */
const int dimension = 32 ;
const int blocksize = 10;
const int K = 1;
struct timeval tv;
__global__ void gpuMM(double *A, double *B, double *C, int N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.f;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
double timestamp()
{
double t;
gettimeofday(&tv, NULL);
t = tv.tv_sec + (tv.tv_usec/1000000.0);
return t;
}
int main(int argc, char *argv[])
{
int i, j;
double *A, *B, *C, start, end;
double *Ad, *Bd, *Cd;
A = (double*)malloc(dimension*dimension*sizeof(double));
B = (double*)malloc(dimension*dimension*sizeof(double));
C = (double*)malloc(dimension*dimension*sizeof(double));
srand(292);
for(i = 0; i < dimension; i++)
for(j = 0; j < dimension; j++)
{
A[dimension*i+j] = (rand()/(RAND_MAX + 1.0));
B[dimension*i+j] = (rand()/(RAND_MAX + 1.0));
C[dimension*i+j] = 0.0;
}
cudaMalloc( (void**)&Ad, dimension*dimension*sizeof(double) );
cudaMemcpy( Ad, A, dimension*dimension*sizeof(double), cudaMemcpyHostToDevice );
cudaMalloc( (void**)&Bd, dimension*dimension*sizeof(double) );
cudaMemcpy( Bd, B, dimension*dimension*sizeof(double), cudaMemcpyHostToDevice );
cudaMalloc( (void**)&Cd, dimension*dimension*sizeof(double) );
dim3 threadBlock(blocksize,blocksize);
dim3 grid(K,K);
start = timestamp();
gpuMM<<<grid,threadBlock>>>( Ad,Bd,Cd,dimension);
end = timestamp();
cudaMemcpy(C,Cd,dimension*dimension*sizeof(double),cudaMemcpyDeviceToHost);
printf("\nsecs:%f\n", end-start);
free(A);
free(B);
free(C);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
return 0;
}
|
13,805 | #include <stdio.h>
__global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size);
/*
* Conduct experiments to learn more about the behavior of
* `cudaMallocManaged`.
*
* What happens when unified memory is accessed only by the GPU?
* What happens when unified memory is accessed only by the CPU?
* What happens when unified memory is accessed first by the GPU then the CPU?
* What happens when unified memory is accessed first by the CPU then the GPU?
*
* Hypothesize about UM behavior, page faulting specificially, before each
* experiment, and then verify by running `nsys`.
*/
#define GPU
#define CPU
#ifdef CPU
hostFunction(a, N);
printf("CPU Execution done\n");
#endif
#ifdef GPU
int device_id;
cudaDeviceProp props;
cudaGetDevice(&device_id);
cudaGetDeviceProperties(&props, device_id);
size_t number_of_blocks = (N + props.maxThreadsPerBlock - 1)/props.maxThreadsPerBlock;
deviceKernel<<<number_of_blocks, props.maxThreadsPerBlock>>>(a, N);
cudaDeviceSynchronize();
printf("GPU Execution done\n");
#endif
cudaFree(a);
}
|
13,806 | /******************************************************************************
* FILE: lakegpu.cu
*
* Group Info:
* agoel5 Anshuman Goel
* kgondha Kaustubh Gondhalekar
* ndas Neha Das
*
* LAST REVISED: 9/19/2017
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#define VSQR 0.1
#define TSCALE 1.0
#define __DEBUG
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
extern int tpdt(double *t, double dt, double end_time);
__device__ double fn(double p, double t)
{
return -expf(-TSCALE * t) * p;
}
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
//updates the grid state from time t to time t+dt
__global__ static void evolve(double *un, double *uc, double *uo, double *pebbles, int *n, double *h, double *dt, double *t, int *n_blocks, int *n_threads)
{
int i, j;
const unsigned int tid = threadIdx.x;
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int gid = idx;
//assuming threads*blocks is not a power of 2,
//the leftover variable stores the excess number of grid points we need to compute
int leftover = (*n * *n) - (*n_threads * *n_blocks);
i = idx / *n;
j = idx % *n;
//values at lake edge points are set to zero
if( i == 0 || i == *n - 1 || j == 0 || j == *n - 1 ||
i == *n - 2 || i == 1 || j == *n - 2 || j == 1)
{
un[idx] = 0.;
}
else
{
//compute the 13-point stencil function for every grid point
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(*dt * *dt) *
(( uc[idx-1] // WEST
+ uc[idx+1] // EAST
+ uc[idx + *n] // SOUTH
+ uc[idx - *n] // NORTH
+ 0.25*( uc[idx - *n - 1 ] // NORTHWEST
+ uc[idx - *n + 1 ] // NORTHEAST
+ uc[idx + *n - 1 ] // SOUTHWEST
+ uc[idx + *n + 1 ] // SOUTHEAST
)
+ 0.125*( uc[idx - 2 ] // WESTWEST
+ uc[idx + 2 ] // EASTEAST
+ uc[idx - 2 * *n ] // NORTHNORTH
+ uc[idx + 2 * *n ] // SOUTHSOUTH
)
- 6 * uc[idx])/(*h * *h) + fn(pebbles[idx],*t));
}
//thread 0 in the last block handles computation for the leftover grid points
if (blockIdx.x == *n_blocks - 1 && tid == 0 && leftover > 0)
{
for( idx = *n * *n - 1; idx>= *n * *n - leftover; idx--)
{
i = idx / *n;
j = idx % *n;
if( i == 0 || i == *n - 1 || j == 0 || j == *n - 1 ||
i == *n - 2 || i == 1 || j == *n - 2 || j == 1)
{
un[idx] = 0.;
}
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(*dt * *dt) *
(( uc[idx-1] // WEST
+ uc[idx+1] // EAST
+ uc[idx + *n] // SOUTH
+ uc[idx - *n] // NORTH
+ 0.25*( uc[idx - *n - 1 ] // NORTHWEST
+ uc[idx - *n + 1 ] // NORTHEAST
+ uc[idx + *n - 1 ] // SOUTHWEST
+ uc[idx + *n + 1 ] // SOUTHEAST
)
+ 0.125*( uc[idx - 2 ] // WESTWEST
+ uc[idx + 2 ] // EASTEAST
+ uc[idx - 2 * *n ] // NORTHNORTH
+ uc[idx + 2 * *n ] // SOUTHSOUTH
)
- 6 * uc[idx])/(*h * *h) + fn(pebbles[idx],*t));
}
}
}
__syncthreads();
//save most recent two time-stamps into uo and uc
uo[gid] = uc[gid];
uc[gid] = un[gid];
//update leftover grid point's timestamps
if (blockIdx.x == *n_blocks - 1 && tid == 0 && leftover > 0)
{
for( idx = *n * *n - 1; idx>= *n * *n - leftover; idx--)
{
uo[idx] = uc[idx];
uc[idx] = un[idx];
}
}
//move the timestamp forward by dt
(*t) = (*t) + *dt;
}
// simulates the state of the grid after the given time, using a 13-point stencil function
void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads)
{
cudaEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
double t=0., dt = h / 2.;
int blocks = (int)pow(n / nthreads, 2);
int threads = nthreads * nthreads;
int *blocks_d, *threads_d, *n_d;
double *un_d, *uc_d, *uo_d, *pebs_d, *t_d, *dt_d, *h_d;
if (nthreads > n)
{
printf("Choose threads less than grid dimension\n");
return;
}
//copy host variables to device
cudaMalloc( (void **) &un_d, sizeof(double) * n * n);
cudaMalloc( (void **) &uc_d, sizeof(double) * n * n);
cudaMalloc( (void **) &uo_d, sizeof(double) * n * n);
cudaMalloc( (void **) &pebs_d, sizeof(double) * n * n);
cudaMalloc( (void **) &blocks_d, sizeof(int) * 1 );
cudaMalloc( (void **) &threads_d, sizeof(int) * 1 );
cudaMalloc( (void **) &n_d, sizeof(int) * 1 );
cudaMalloc( (void **) &t_d, sizeof(double) * 1 );
cudaMalloc( (void **) &dt_d, sizeof(double) * 1 );
cudaMalloc( (void **) &h_d, sizeof(double) * 1 );
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
CUDA_CALL(cudaMemcpy( uc_d, u1, sizeof(double) * n * n, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( un_d, u, sizeof(double) * n * n, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( uo_d, u0, sizeof(double) * n * n, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( pebs_d, pebbles, sizeof(double) * n * n, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( blocks_d, &blocks, sizeof(int) * 1, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( threads_d, &threads, sizeof(int) * 1, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( n_d, &n, sizeof(int) * 1, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( h_d, &h, sizeof(double) * 1, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( dt_d, &dt, sizeof(double) * 1, cudaMemcpyHostToDevice ));
CUDA_CALL(cudaMemcpy( t_d, &t, sizeof(double) * 1, cudaMemcpyHostToDevice ));
//compute state of the grid over the given time
while(1)
{
evolve<<< blocks, threads >>>(un_d, uc_d, uo_d, pebs_d, n_d, h_d, dt_d, t_d, blocks_d, threads_d);
//exit from the loop if time exceeds final timestamp
if(!tpdt(&t,dt,end_time))
break;
CUDA_CALL(cudaMemcpy( t_d, &t, sizeof(double) * 1, cudaMemcpyHostToDevice ));
}
CUDA_CALL(cudaMemcpy( u, un_d, sizeof(double) * n * n, cudaMemcpyDeviceToHost ));
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
//free resources
cudaFree(un_d);
cudaFree(uc_d);
cudaFree(uo_d);
cudaFree(blocks_d);
cudaFree(threads_d);
cudaFree(pebs_d);
cudaFree(n_d);
cudaFree(t_d);
cudaFree(h_d);
cudaFree(dt_d);
/* timer cleanup */
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
}
|
13,807 | #include <math.h>
#include <stdio.h>
#include<iostream>
using namespace std;
int main()
{
//r value
double r01= 1;
double r24 = 1;
double r57 = 1;
double r16 = 5;
double r25 = 1;
double r78 = 2;
double r12 = 1;
double r56 = 3;
double r89 = 3;
double r03 = 5;
double r3e = 1;
double r69 = 3;
double r34 = 3;
double r47 = 1;
double r8a = 1;
double rab = 3;
double rad = 2;
double reb = 2;
double rbc = 2;
double rdc = 1;
double rdg = 5;
double r9g = 1;
double rgh = 1;
double ref = 5;
double rcf = 2;
double rfh = 2;
double rS0 = 1;
double rEA = 1;
double rEB = 1;
//Q flow rate
double Qin = 2.09457;
double Q01 = Qin / 2.0;
double Q24 = Qin / 8.0;
double Q57 = Qin / 8.0;
double Q16 = Qin / 4.0;
double Q25 = Qin / 8.0;
double Q78 = Qin / 4.0;
double Q12 = Qin / 4.0;
double Q56 = 0;
double Q89 = 0;
double Q03 = Qin / 2.0;
double Q3e = Qin / 2.0;
double Q69 = Qin / 4.0;
double Q34 = 0;
double Q47 = Qin / 8.0;
double Q8a = Qin / 4.0;
double Qab = Qin / 8.0;
double Qad = Qin / 8.0;
double Qeb = 0;
double Qbc = Qin / 8.0;
double Qdc = 0;
double Qdg = Qin / 8.0;
double Q9g = 0;
double Qgh = Qin / 8.0;
double Qef = Qin / 2.0;
double Qcf = Qin / 8.0;
double Qfh = Qin * 5.0 / 8.0;
double QS0 = Qin;
double QEA = Qin / 4.0;
double QEB = Qin * 3.0 / 4.0;
double dQ_1 = 0;
double dQ_2 = 0;
double dQ_3 = 0;
double dQ_4 = 0;
double dQ_5 = 0;
double dQ_6 = 0;
double dQ_7 = 0;
double dQ_8 = 0;
double dQ_9 = 0;
double ds1 = 0;
double ds2 = 0;
int c01 = 1;
int c24 = 1;
int c57 = 1;
int c16 = 1;
int c25 = 1;
int c78 = 1;
int c12 = 1;
int c56 = 1;
int c89 = 1;
int c03 = 1;
int c3e = 1;
int c69 = 1;
int c34 = 1;
int c47 = 1;
int c8a = 1;
int cab = 1;
int cad = 1;
int ceb = 1;
int cbc = 1;
int cdc = 1;
int cdg = 1;
int c9g = 1;
int cgh = 1;
int cef = 1;
int ccf = 1;
int cfh = 1;
int cS0 = 1;
int cEA = 1;
int cEB = 1;
int no_iteraion = 10000;
for (int i = 0; i < no_iteraion; i++)
{
c01 = Q01 >= 0 ? 1 : -1;
c24 = Q24 >= 0 ? 1 : -1;
c57 = Q57 >= 0 ? 1 : -1;
c16 = Q16 >= 0 ? 1 : -1;
c25 = Q25 >= 0 ? 1 : -1;
c78 = Q78 >= 0 ? 1 : -1;
c12 = Q12 >= 0 ? 1 : -1;
c56 = Q56 >= 0 ? 1 : -1;
c89 = Q89 >= 0 ? 1 : -1;
c03 = Q03 >= 0 ? 1 : -1;
c3e = Q3e >= 0 ? 1 : -1;
c69 = Q69 >= 0 ? 1 : -1;
c34 = Q34 >= 0 ? 1 : -1;
c47 = Q47 >= 0 ? 1 : -1;
c8a = Q8a >= 0 ? 1 : -1;
cab = Qab >= 0 ? 1 : -1;
cad = Qad >= 0 ? 1 : -1;
ceb = Qeb >= 0 ? 1 : -1;
cbc = Qbc >= 0 ? 1 : -1;
cdc = Qdc >= 0 ? 1 : -1;
cdg = Qdg >= 0 ? 1 : -1;
c9g = Q9g >= 0 ? 1 : -1;
cgh = Qgh >= 0 ? 1 : -1;
cef = Qef >= 0 ? 1 : -1;
ccf = Qcf >= 0 ? 1 : -1;
cfh = Qfh >= 0 ? 1 : -1;
cS0 = QS0 >= 0 ? 1 : -1;
cEA = QEA >= 0 ? 1 : -1;
cEB = QEB >= 0 ? 1 : -1;
dQ_1 = -(c01*r01 *Q01 *Q01 + c12*r12*Q12*Q12 + c24*r24*Q24*Q24 - c34*r34*Q34*Q34 - c03*r03*Q03*Q03)
/ (2 * r01*fabs(Q01) + 2 * r12*fabs(Q12) + 2 * r24*fabs(Q24) + 2 * r34*fabs(Q34) + 2 * r03*fabs(Q03));
dQ_2 = -(c16*r16 *Q16 *Q16 - c56*r56*Q56*Q56 - c25*r25*Q25*Q25 - c12*r12*Q12*Q12)
/ (2 * r16*fabs(Q16) + 2 * r56*fabs(Q56) + 2 * r25*fabs(Q25) + 2 * r12*fabs(Q12));
dQ_3 = -(c25*r25 *Q25 *Q25 + c57*r57*Q57*Q57 - c47*r47*Q47*Q47 - c24*r24*Q24*Q24)
/ (2 * r25*fabs(Q25) + 2 * r57*fabs(Q57) + 2 * r47*fabs(Q47) + 2 * r24*fabs(Q24));
dQ_4 = -(c34*r34 *Q34 *Q34 + c47*r47*Q47*Q47 + c78*r78*Q78*Q78 + c8a*r8a*Q8a*Q8a + cab*rab*Qab*Qab - ceb*reb*Qeb*Qeb - c3e*r3e*Q3e*Q3e)
/ (2 * r34*fabs(Q34) + 2 * r47*fabs(Q47) + 2 * r78*fabs(Q78) + 2 * r8a*fabs(Q8a) + 2 * rab*fabs(Qab) + 2 * reb*fabs(Qeb) + 2 * r3e*fabs(Q3e));
dQ_5 = -(c56*r56 *Q56 *Q56 + c69*r69*Q69*Q69 - c89*r89*Q89*Q89 - c78*r78*Q78*Q78 - c57*r57*Q57*Q57)
/ (2 * r56*fabs(Q56) + 2 * r12*fabs(Q12) + 2 * r89*fabs(Q89) + 2 * r78*fabs(Q78) + 2 * r57*fabs(Q57));
dQ_6 = -(c89*r89*Q89*Q89 - c9g*r9g*Q9g*Q9g - cdg*rdg*Qdg*Qdg - cad*rad*Qad*Qad - c8a*r8a*Q8a*Q8a)
/ (2 * r89*fabs(Q89) + 2 * r9g*fabs(Q9g) + 2 * rdg*fabs(Qdg) + 2 * rad*fabs(Qad) + 2 * r8a*fabs(Q8a));
dQ_7 = -(cad*rad *Qad *Qad + cdc*rdc*Qdc*Qdc - cbc*rbc*Qbc*Qbc - cab*rab*Qab*Qab)
/ (2 * rad*fabs(Qad) + 2 * rdc*fabs(Qdc) + 2 * rbc*fabs(Qbc) + 2 * rab*fabs(Qab));
dQ_8 = -(ceb*reb *Qeb *Qeb + cbc*rbc*Qbc*Qbc + ccf*rcf*Qcf*Qcf - cef*ref*Qef*Qef)
/ (2 * reb*fabs(Qeb) + 2 * rbc*fabs(Qbc) + 2 * rcf*fabs(Qcf) + 2 * ref*fabs(Qef));
dQ_9 = -(cdg*rdg *Qdg *Qdg + cgh*rgh*Qgh*Qgh - cfh*rfh*Qfh*Qfh - ccf*rcf*Qcf*Qcf - cdc*rdc*Qdc*Qdc)
/ (2 * rdg*fabs(Qdg) + 2 * rgh*fabs(Qgh) + 2 * rfh*fabs(Qfh) + 2 * rcf*fabs(Qcf) + 2 * rdc*fabs(Qdc));
ds1 = -(-10 + cS0*rS0 *QS0 *QS0 +c01*r01 *Q01 *Q01 + c16*r16*Q16*Q16 + c69*r69*Q69*Q69 + cEA*rEA*QEA*QEA)
/ (2 * rS0*fabs(QS0) + 2 * r01*fabs(Q01) + 2 * r16*fabs(Q16) + 2 * r69*fabs(Q69) + 2 * rEA*fabs(QEA));
ds2 = -(-10 + cS0*rS0 *QS0 *QS0 + c03*r03 *Q03 *Q03 + c3e*r3e*Q3e*Q3e + cef*ref*Qef*Qef + cfh*rfh*Qfh*Qfh + cEB*rEB*QEB*QEB)
/ (2 * rS0*fabs(QS0) + 2 * r03*fabs(Q03) + 2 * r3e*fabs(Q3e) + 2 * ref*fabs(Qef) + 2 * rfh*fabs(Qfh) + 2 * rEB*fabs(QEB));
Q01 = Q01 + dQ_1 + ds1;
Q24 = Q24 + dQ_1 - dQ_3;
Q57 = Q57 + dQ_3 - dQ_5;
Q16 = Q16 + dQ_2 + ds1;
Q25 = Q25 - dQ_2 + dQ_3;
Q78 = Q78 + dQ_4 - dQ_5;
Q12 = Q12 + dQ_1 - dQ_2;
Q56 = Q56 - dQ_2 + dQ_5;
Q89 = Q89 - dQ_5 + dQ_6;
Q03 = Q03 - dQ_1 + ds2;
Q3e = Q3e - dQ_4 + ds2;;
Q69 = Q69 + dQ_5 + ds1;
Q34 = Q34 - dQ_1 + dQ_4;
Q47 = Q47 - dQ_3 + dQ_4;
Q8a = Q8a +dQ_4 - dQ_6;
Qab = Qab + dQ_4 - dQ_7;
Qad = Qad + dQ_7 - dQ_6;
Qeb = Qeb - dQ_4 + dQ_8;
Qbc = Qbc - dQ_7 + dQ_8;
Qdc = Qdc + dQ_7 - dQ_9;
Qdg = Qdg - dQ_6 + dQ_9;
Q9g = Q9g - dQ_6;
Qgh = Qgh + dQ_9;
Qef = Qef - dQ_8 + ds2;
Qcf = Qcf + dQ_8 - dQ_9;
Qfh = Qfh - dQ_9 + ds2;
QS0 = QS0 + ds1 + ds2;
QEA = QEA + ds1;
QEB = QEB + ds2;
}
double P01 = c01*r01* Q01*Q01;
double P24 = c24*r24*Q24*Q24;
double P57 = c57*r57*Q57*Q57;
double P16 = c16*r16*Q16*Q16;
double P25 = c25*r25*Q25*Q25;
double P78 = c78*r78*Q78*Q78;
double P12 = c12*r12*Q12*Q12;
double P56 = c56*r56*Q56*Q56;
double P89 = c89*r89*Q89*Q89;
double P03 = c03*r03*Q03*Q03;
double P3e = c3e*r3e*Q3e*Q3e;
double P69 = c69*r69*Q69*Q69;
double P34 = c34*r34*Q34*Q34;
double P47 = c47*r47*Q47*Q47;
double P8a = c8a*r8a*Q8a*Q8a;
double Pab = cab*rab*Qab*Qab;
double Pad = cad*rad*Qad*Qad;
double Peb = ceb*reb*Qeb*Qeb;
double Pbc = cbc*rbc*Qbc*Qbc;
double Pdc = cdc*rdc*Qdc*Qdc;
double Pdg = cdg*rdg*Qdg*Qdg;
double P9g = c9g*r9g*Q9g*Q9g;
double Pgh = cgh*rgh*Qgh*Qgh;
double Pef = cef*ref*Qef*Qef;
double Pcf = ccf*rcf*Qcf*Qcf;
double Pfh = cfh*rfh*Qfh*Qfh;
double PS0 = cS0*rS0*QS0*QS0;
double PEA = cEA*rEA*QEA*QEA;
double PEB = cEB*rEB*QEB*QEB;
std::cout << P01 + P12 + P24 - P34 - P03<<endl;
std::cout << P16 - P56 - P25 - P12 << endl;
std::cout << P25 + P57 - P47 - P24 << endl;
std::cout << P34 + P47 + P78 + P8a + Pab - Peb - P3e << endl;
std::cout << P56 + P69 - P78 - P89 - P57 << endl;
std::cout << P89 - P9g - Pdg - Pad -P8a<< endl;
std::cout << Pad + Pdc - Pbc - Pab << endl;
std::cout << Peb + Pbc + Pcf - Pef << endl;
std::cout << Pdg + Pgh - Pfh - Pcf - Pdc << endl;
std::cout << -10 + PS0 + P01 + P16 + P69 + PEA << endl;
std::cout << -10 + PS0 + P03 + P3e + Pef + Pfh + PEB << endl;
return 0;
}
|
13,808 | #include "includes.h"
__global__ void calcLoss(float *err, float *output, unsigned int Y, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
for (int idx = N * pos / totalPos; idx < N * (pos+1) / totalPos; ++idx) {
err[idx] = ((Y == idx ? 1.0f : 0.0f) - output[idx]);
}
} |
13,809 | #include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <ctime>
#include <cmath>
#include <cfloat>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <cuda.h>
#include <curand_kernel.h>
using namespace std;
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
int numberOfPoints = 0;
int numberOfClusters = 0;
__global__ void setup_kernel(curandState *state) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets different seed, a different sequence
number, no offset */
curand_init(7+id, id, 0, &state[id]);
}
__global__ void generate_normal_kernel(curandState *state, float *xPoints, float *yPoints) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
/* Copy state to local memory for efficiency */
curandState localState = state[i];
/* Generate pseudo-random uniforms */
xPoints[i] = curand_normal(&localState);
yPoints[i] = curand_normal(&localState);
/* Copy state back to global memory */
state[i] = localState;
}
__device__ __host__
float compute_distance(float x1,float x2,float y1,float y2) {
return sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1));
}
__global__
void mapFunction(int * map_data_cluster_index, float *xPoints, float *xCentroids, float *yPoints, float *yCentroids, int numberOfClusters) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int index = 0;
float minDistance = FLT_MAX;
for(int i = 0; i < numberOfClusters; i++) {
float currentDistance = compute_distance(xPoints[j],xCentroids[i],yPoints[j],yCentroids[i]);
if(currentDistance<minDistance)
{
minDistance = currentDistance;
index = i;
}
}
map_data_cluster_index[j] = index;
}
__global__
void reduce (int *clusterIndex, float *xPoints, float *yPoints, float *sumX, float *sumY, int *nElemsX, int *nElemsY) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
int index = clusterIndex[j];
atomicAdd(&sumX[index],xPoints[j]);
atomicAdd(&sumY[index],yPoints[j]);
atomicAdd(&nElemsX[index],1);
atomicAdd(&nElemsY[index],1);
}
__global__
void calculateNewCentroids (float *xCentroids, float *yCentroids, float *sumX, float *sumY, int *nElemsX, int * nElemsY) {
int j = threadIdx.x + blockIdx.x * blockDim.x;
xCentroids[j] = (float) (sumX[j] / nElemsX[j]);
yCentroids[j] = (float) (sumY[j] / nElemsY[j]);
//printf ("Number of points in cluster %d is %d\n",j,nElemsX[j]);
}
int main(int argc, char **argv) {
cudaSetDevice(1);
cudaFree(0);
if (argc < 4) {
printf ("Missing arguments!Exiting...\n");
return 0;
}
srand(time(NULL));
clock_t tStart = clock();
numberOfPoints = atoi(argv[1]);
int t = 0;
if (numberOfPoints % 10 != 0) {
t = 1024;
} else {
t = 1000;
}
if (numberOfPoints < 1000) {
t = 1;
}
numberOfClusters = atoi(argv[2]);
int tc = 0;
if (numberOfClusters % 10 != 0) {
tc = 64;
} else {
tc = 50;
}
if (numberOfClusters < 64) {
tc = 1;
}
int maxNumberOfIterations = atoi(argv[3]);
int n = numberOfPoints / t;
int nc = numberOfClusters / tc;
curandState *devStates, *devStates2;
CUDA_CALL(cudaMalloc((void **)&devStates, n * t * sizeof(curandState)));
CUDA_CALL(cudaMalloc((void **)&devStates2, nc * tc * sizeof(curandState)));
setup_kernel<<<n, t>>>(devStates);
setup_kernel<<<nc, tc>>>(devStates2);
thrust::host_vector<int> clusterIndex(numberOfPoints);
//initialize all the points to belong in sentinel cluster -1
for (int i = 0; i < clusterIndex.size(); i++) {
clusterIndex[i] = -1;
}
//creating and populating device vectors
thrust::device_vector<float> xCentroids(numberOfClusters);
thrust::device_vector<float> yCentroids(numberOfClusters);
thrust::device_vector<int> previousIndex(numberOfPoints);
thrust::device_vector<int> deviceClusterIndex = clusterIndex;
int *clusterIndexPointer = thrust::raw_pointer_cast(&deviceClusterIndex[0]);
float *xPoints;
float *yPoints;
float *xCentroidsPointer = thrust::raw_pointer_cast(&xCentroids[0]);
float *yCentroidsPointer = thrust::raw_pointer_cast(&yCentroids[0]);
CUDA_CALL(cudaMalloc((void **)&xPoints, n * t * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&yPoints, n * t * sizeof(float)));
generate_normal_kernel<<<n, t>>>(devStates, xPoints, yPoints);
generate_normal_kernel<<<nc, tc>>>(devStates2, xCentroidsPointer, yCentroidsPointer);
bool done = false;
int i = 0;
while(i < maxNumberOfIterations) {
float *sumX, *sumY;
int *nElemsX, *nElemsY;
CUDA_CALL(cudaMalloc((void **)&sumX, nc * tc * sizeof(float)));
CUDA_CALL(cudaMemset(sumX, 0, nc * tc * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&sumY, nc * tc * sizeof(float)));
CUDA_CALL(cudaMemset(sumY, 0, nc * tc * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&nElemsX, nc * tc * sizeof(int)));
CUDA_CALL(cudaMemset(nElemsX, 0, nc * tc * sizeof(int)));
CUDA_CALL(cudaMalloc((void **)&nElemsY, nc * tc * sizeof(int)));
CUDA_CALL(cudaMemset(nElemsY, 0, nc * tc * sizeof(int)));
printf("Calling the map function with iteration number %d\n", i);
mapFunction<<<n, t>>>(clusterIndexPointer,xPoints,xCentroidsPointer,yPoints,yCentroidsPointer, numberOfClusters);
// Check if the corresponding cluster for each point changed
done = thrust::equal(deviceClusterIndex.begin(),deviceClusterIndex.end(),previousIndex.begin());
if (done) {
printf("Clusters for each point remained the same! Terminating...\n");
break;
} else {
printf("Some points changed their corresponding cluster! Will do another iteration!\n");
}
// Copy this cluster index to another value to compare the next index to it
thrust::copy(deviceClusterIndex.begin(),deviceClusterIndex.end(),previousIndex.begin());
reduce<<<n, t>>>(clusterIndexPointer, xPoints, yPoints, sumX, sumY, nElemsX, nElemsY);
calculateNewCentroids<<<nc,tc>>>(xCentroidsPointer, yCentroidsPointer, sumX, sumY, nElemsX, nElemsY);
i++;
}
/*for(int i = 0; i < xCentroids.size(); i++)
{
cout << "The X axis value of the centroid number " << i << " is " << xCentroids[i] << endl;
cout << "The Y axis value of the centroid number " << i << " is " << yCentroids[i] << endl;
}*/
printf("Time taken mapping and reducing: %.5fs\n", (double)(clock() - tStart)/CLOCKS_PER_SEC);
}
|
13,810 | /**
*Developed by Karan Bhagat
*March 2017
**/
#include <stdio.h>
#include <stdlib.h>
#define BLK_ROWS 2
#define BLK_COLS 2
//size of the share memory tile in the device
#define TILE_SIZE BLK_ROWS
//cuda kernel for multiplying two matrices using tiling
__global__ void matrix_mul_kernel(int* a, int* b, int* c, int a_rows, int a_columns, int b_columns)
{
//declare shared memory matrices for A and B matrices
__shared__ int shared_a_tile[TILE_SIZE][TILE_SIZE];
__shared__ int shared_b_tile[TILE_SIZE][TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//check if thread directly maps to the dimensions of the resulting matrix
if (row < a_rows && col < b_columns)
{
int result = 0;
int k;
int phase;
//calculate C matrix indexes in phases. Each phase shares
//TILE_SIZE * TILE_SIZE data copied to the shared matrix A
//and matrix B.
for (phase = 0; phase <= a_columns/TILE_SIZE; phase++)
{
shared_a_tile[ty][tx] = a[row * a_columns + phase * TILE_SIZE + tx];
shared_b_tile[ty][tx] = b[(phase * TILE_SIZE + ty) * b_columns + col];
__syncthreads();
for (k = 0; k < TILE_SIZE; k++)
{
if (k + (phase * TILE_SIZE) < a_columns)
{
result += (shared_a_tile[ty][k] * shared_b_tile[k][tx]);
}
}
__syncthreads();
}
c[row * b_columns + col] = result;
}
}
void build_matrix(FILE *file, int* mat, int rows, int columns);
int main(int argc, char **argv)
{
//check for filenames and matrices' dimensions
if (argc != 6)
{
printf("Usage : ./matrix_mul_tiling <fileA> <fileB> <A_rows> <A_columns> <B_columns>");
exit(1);
}
char* fileA_name = argv[1];//matrix A filename
char* fileB_name = argv[2];//matrix B filename
// a_columns can also be perceived as b_rows
int a_rows, a_columns, b_columns;
//read matrix A and B's dimensions
sscanf(argv[3], "%d", &a_rows);
sscanf(argv[4], "%d", &a_columns);
sscanf(argv[5], "%d", &b_columns);
FILE *fileA = fopen(fileA_name, "r");
FILE *fileB = fopen(fileB_name, "r");
//declare host and device matrices pointers
int* mat_a;
int* mat_b;
int* mat_c;
int* d_mat_a;
int* d_mat_b;
int* d_mat_c;
//allocate memory for host matrices
mat_a = (int*)malloc(a_rows * a_columns * sizeof(int));
mat_b = (int*)malloc(a_columns * b_columns * sizeof(int));
mat_c = (int*)malloc(a_rows * b_columns * sizeof(int));
int i, j;
build_matrix(fileA, mat_a, a_rows, a_columns);
build_matrix(fileB, mat_b, a_columns, b_columns);
//declare dimensions for the grid and block
dim3 dimBlock(BLK_COLS,BLK_ROWS);
dim3 dimGrid((int)ceil(b_columns/BLK_COLS),(int)ceil(a_rows/BLK_ROWS));
const size_t size_a = a_rows * a_columns * sizeof(int);
const size_t size_b = a_columns * b_columns * sizeof(int);
const size_t size_c = a_rows * b_columns * sizeof(int);
//allocate matrices memeory on device
cudaMalloc((void **)&d_mat_a, size_a);
cudaMalloc((void **)&d_mat_b, size_b);
cudaMalloc((void **)&d_mat_c, size_c);
//copy A and B matrices from host to device
cudaMemcpy(d_mat_a, mat_a, size_a, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_b, mat_b, size_b, cudaMemcpyHostToDevice);
//execute cuda kernel
matrix_mul_kernel<<<dimGrid, dimBlock>>>(d_mat_a, d_mat_b, d_mat_c, a_rows, a_columns, b_columns);
//copy the compute matrix C from device to host
cudaMemcpy(mat_c, d_mat_c, size_c, cudaMemcpyDeviceToHost);
//print the resulting matrix
for (i = 0; i < a_rows; i++)
{
for (j = 0; j < b_columns; j++)
{
printf("%d ", mat_c[i * b_columns + j]);
}
printf("\n");
}
//free cuda memory
cudaFree(d_mat_a);
cudaFree(d_mat_b);
cudaFree(d_mat_c);
}
//build matrix from the file
void build_matrix(FILE *file, int* mat, int rows, int columns)
{
int i, j;
for (i = 0; i < rows; i++)
{
for (j = 0; j < columns; j++)
{
fscanf(file, "%d", &mat[i * columns + j]);
}
}
} |
13,811 | #include "includes.h"
__global__ void vecAddGPU(double *a, double *b, double *c, double n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n){
c[id] = a[id] + b[id];
}
} |
13,812 | /*
* dpdems_cuda.cu
* Copyright (C) 2016 <@A0835-PC>
*
* Distributed under terms of the MIT license.
*/
#include <iostream>
#include <cctype>
#include <ctime>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
const int maxdim = 100;
const int maxThreads = 21504;
const int threadPerBlock = 512;
int blockPerGrid(const int dim, const int threadPerBlock)
{
return (dim + threadPerBlock - 1) / threadPerBlock;
}
inline void checkCudaError(cudaError_t error, const char *file, const int line)
{
if (error != cudaSuccess) {
std::cerr << "CUDA CALL FAILED: " << file << "( " << line << " )- " <<
cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
// else
// std::cout << "cuda call success" << std::endl;
}
inline void checkCudaState(const char *msg, const char *file, const int line)
{
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::cerr << "---" << msg << " Error--" << std::endl;
std::cerr << file << "( " << line << " )- " <<
cudaGetErrorString(error) << std::endl;
exit(EXIT_FAILURE);
}
// else
// std::cout << "cuda state Success: " << msg << std::endl;
}
#define CHECK_ERROR(error) checkCudaError(error, __FILE__, __LINE__);
#define CHECK_STATE(msg) checkCudaState(msg, __FILE__, __LINE__);
void print_device(const int id)
{
cudaDeviceProp props;
CHECK_ERROR(cudaGetDeviceProperties(&props, id));
std::cout << "---Property of currently device used---" << std::endl;
std::cout << "Device " << id << ": " << props.name << std::endl;
std::cout << "CUDA Capability: " << props.major << "." << props.minor
<< std::endl;
std::cout << "MultiProcessor count: " << props.multiProcessorCount << std::endl;
}
void setCudaDevice(int id)
{
int numDevice = 0;
CHECK_ERROR(cudaGetDeviceCount(&numDevice));
std::cout << "Total CUDA device number: " << numDevice << std::endl;
if (numDevice > 1) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, id);
int maxMultiProcessors = props.multiProcessorCount;
for (int device = 1; device < numDevice; ++device) {
CHECK_ERROR(cudaGetDeviceProperties(&props, device));
if (maxMultiProcessors < props.multiProcessorCount) {
maxMultiProcessors = props.multiProcessorCount;
id = device;
}
}
}
CHECK_ERROR(cudaSetDevice(id));
print_device(id);
}
__global__ void cudaScale(double *dev_xt, double *dev_yt, double *dev_zt, int *dev_x, int *dev_y, int *dev_z, int readnum, int maxdim)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < readnum) {
dev_x[tid] = dev_xt[tid] * dev_xt[readnum] + maxdim;
dev_y[tid] = dev_yt[tid] * dev_yt[readnum] + maxdim;
dev_z[tid] = dev_zt[tid] * dev_zt[readnum] + maxdim;
tid += blockDim.x * gridDim.x;
}
}
double scalev(double &, const double &);
void swapv(double *, double *, double *, int, int, double);
bool isInGrid(const int &, const int &, const int &, const int &);
unsigned updatePosition(int *, int *, int *, double *, double *, double *, const int &, const int &, int ***, const unsigned long &);
unsigned long long collision(int *, int *, int *, double *, double *, double *, const int &, const int &, int ***, const unsigned long &, std::ostream &);
int main(int argc, char **argv)
{
std::cout.setf(std::ios::scientific);
std::cout.precision(19);
int device_id = 0;
setCudaDevice(device_id);
std::size_t readnum = 100;
clock_t t;
t = clock();
cudaEvent_t start, stop;
CHECK_STATE("cudaEvent1");
CHECK_ERROR(cudaEventCreate(&start));
CHECK_STATE("cudaEvent2");
CHECK_ERROR(cudaEventCreate(&stop));
CHECK_ERROR(cudaEventRecord(start, 0));
CHECK_ERROR(cudaEventSynchronize(start));
int *x = new int[readnum];
int *y = new int[readnum];
int *z = new int[readnum];
int *dev_x;
int *dev_y;
int *dev_z;
double *dev_xt;
double *dev_yt;
double *dev_zt;
CHECK_ERROR(cudaMalloc((void**)&dev_x, readnum * sizeof(int)));
CHECK_ERROR(cudaMalloc((void**)&dev_y, readnum * sizeof(int)));
CHECK_ERROR(cudaMalloc((void**)&dev_z, readnum * sizeof(int)));
CHECK_ERROR(cudaMalloc((void**)&dev_xt, (readnum + 1) * sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&dev_yt, (readnum + 1) * sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&dev_zt, (readnum + 1) * sizeof(double)));
int threads = threadPerBlock;
int blocks = blockPerGrid(readnum, threads);
// cudaScale<<<blocks, threads>>>(dev_xt, dev_yt, dev_zt, dev_x, dev_y, dev_z, readnum, maxdim);
CHECK_STATE("cudaScale call");
CHECK_ERROR(cudaMemcpy(x, dev_x, readnum * sizeof(int), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(y, dev_y, readnum * sizeof(int), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(z, dev_z, readnum * sizeof(int), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaFree(dev_x));
CHECK_ERROR(cudaFree(dev_y));
CHECK_ERROR(cudaFree(dev_z));
CHECK_ERROR(cudaFree(dev_xt));
CHECK_ERROR(cudaFree(dev_yt));
CHECK_ERROR(cudaFree(dev_zt));
CHECK_ERROR(cudaEventRecord(stop, 0));
CHECK_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
CHECK_ERROR(cudaEventDestroy(start));
CHECK_ERROR(cudaEventDestroy(stop));
std::cout << "CUDA elapsed: " << elapsedTime / 1000.0 << std::endl;
int ***grid;
grid = new int **[maxdim];
for (int i = 0; i < maxdim; ++i) {
grid[i] = new int *[maxdim];
for (int j = 0; j < maxdim; ++j) {
grid[i][j] = new int[maxdim];
for (int k = 0; k < maxdim; ++k)
grid[i][j][k] = 0;
}
}
std::cout << *(x + 0) << " " << *(y + 0) << " " << *(z + 0) << std::endl;
for (int i = 0; i < readnum; ++i) {
grid[*(x + i)][*(y + i)][*(z + i)] = i;
}
std::cout << x[0] << " : " << y[0] << " : " << z[0] << std::endl;
t = clock() - t;
double seconds = (double)t / CLOCKS_PER_SEC;
std::cout << std::endl << "Total time consumed: " << seconds << " seconds" << std::endl;
delete [] x;
std::cout << "delete x" << std::endl;
// delete [] y;
std::cout << "delete y" << std::endl;
// delete [] z;
std::cout << "delete z" << std::endl;
for (int i = 0; i < maxdim; ++i) {
for (int j = 0; j < maxdim; ++j) {
delete[] grid[i][j];
}
delete[] grid[i];
}
delete[] grid;
std::cout << "delete grid" << std::endl;
std::cout << std::endl;
return 0;
}
|
13,813 | #include <float.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int infTwoExp(int val)
{
int inf=1;
while(val>inf) inf<<=1;
return inf;
}
void getGPULayout(
int dim0,int dim1,int dim2,
int* bdim0,int* bdim1,int* bdim2,
int* tdim0,int* tdim1,int* tdim2
)
{
(*tdim2)=64;
if(dim2<(*tdim2)) (*tdim2)=infTwoExp(dim2);
(*bdim2)=dim2/(*tdim2);
if(dim2%(*tdim2)>0) (*bdim2)++;
(*tdim1)=1024/(*tdim2);
if(dim1<(*tdim1)) (*tdim1)=infTwoExp(dim1);
(*bdim1)=dim1/(*tdim1);
if(dim1%(*tdim1)>0) (*bdim1)++;
(*tdim0)=1024/((*tdim1)*(*tdim2));
if(dim0<(*tdim0)) (*tdim0)=infTwoExp(dim0);
(*bdim0)=dim0/(*tdim0);
if(dim0%(*tdim0)>0) (*bdim0)++;
}
__global__
void findNearestPoint3DIdxKernel(
float* ref_pts, // [b,pn1,3]
float* que_pts, // [b,pn2,3]
int* idxs, // [b,pn2]
int b,
int pn1,
int pn2,
int exclude_self
)
{
int bi = threadIdx.x + blockIdx.x*blockDim.x;
int p2i = threadIdx.y + blockIdx.y*blockDim.y;
if(p2i>=pn2||bi>=b) return;
float x2=que_pts[bi*pn2*3+p2i*3];
float y2=que_pts[bi*pn2*3+p2i*3+1];
float z2=que_pts[bi*pn2*3+p2i*3+2];
float min_dist=FLT_MAX;
int min_idx=0;
for(int p1i=0;p1i<pn1;p1i++)
{
if(exclude_self&&p1i==p2i) continue;
float x1=ref_pts[bi*pn1*3+p1i*3];
float y1=ref_pts[bi*pn1*3+p1i*3+1];
float z1=ref_pts[bi*pn1*3+p1i*3+2];
float dist=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2);
if(dist<min_dist)
{
min_dist=dist;
min_idx=p1i;
}
}
idxs[bi*pn2+p2i]=min_idx;
}
__global__
void findNearestPoint2DIdxKernel(
float* ref_pts, // [b,pn1,2]
float* que_pts, // [b,pn2,2]
int* idxs, // [b,pn2]
int b,
int pn1,
int pn2,
int exclude_self
)
{
int bi = threadIdx.x + blockIdx.x*blockDim.x;
int p2i = threadIdx.y + blockIdx.y*blockDim.y;
if(p2i>=pn2||bi>=b) return;
float x2=que_pts[bi*pn2*2+p2i*2];
float y2=que_pts[bi*pn2*2+p2i*2+1];
float min_dist=FLT_MAX;
int min_idx=0;
for(int p1i=0;p1i<pn1;p1i++)
{
if(exclude_self&&p1i==p2i) continue;
float x1=ref_pts[bi*pn1*2+p1i*2];
float y1=ref_pts[bi*pn1*2+p1i*2+1];
float dist=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);
if(dist<min_dist)
{
min_dist=dist;
min_idx=p1i;
}
}
idxs[bi*pn2+p2i]=min_idx;
}
#ifdef __cplusplus
extern "C" {
#endif
void findNearestPointIdxLauncher(
float* ref_pts, // [b,pn1,dim]
float* que_pts, // [b,pn2,dim]
int* idxs, // [b,pn2]
int b,
int pn1,
int pn2,
int dim,
int exclude_self
)
{
float* ref_pts_dev,* que_pts_dev;
int* idxs_dev;
gpuErrchk(cudaMalloc(&ref_pts_dev,b*pn1*sizeof(float)*dim))
gpuErrchk(cudaMalloc(&que_pts_dev,b*pn2*sizeof(float)*dim))
gpuErrchk(cudaMalloc(&idxs_dev,b*pn2*sizeof(int)))
gpuErrchk(cudaMemcpy(ref_pts_dev,ref_pts,b*pn1*sizeof(float)*dim,cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(que_pts_dev,que_pts,b*pn2*sizeof(float)*dim,cudaMemcpyHostToDevice))
gpuErrchk(cudaMemcpy(idxs_dev,idxs,b*pn2*sizeof(int),cudaMemcpyHostToDevice))
int bdim0,bdim1,bdim2;
int tdim0,tdim1,tdim2;
getGPULayout(b,pn2,1,&bdim0,&bdim1,&bdim2,&tdim0,&tdim1,&tdim2);
dim3 bdim(bdim0,bdim1,bdim2);
dim3 tdim(tdim0,tdim1,tdim2);
if(dim==3)
findNearestPoint3DIdxKernel<<<bdim,tdim>>>(ref_pts_dev,que_pts_dev,idxs_dev,b,pn1,pn2,exclude_self);
else
findNearestPoint2DIdxKernel<<<bdim,tdim>>>(ref_pts_dev,que_pts_dev,idxs_dev,b,pn1,pn2,exclude_self);
gpuErrchk(cudaGetLastError())
gpuErrchk(cudaMemcpy(idxs,idxs_dev,b*pn2*sizeof(int),cudaMemcpyDeviceToHost))
gpuErrchk(cudaFree(ref_pts_dev))
gpuErrchk(cudaFree(que_pts_dev))
gpuErrchk(cudaFree(idxs_dev))
}
#ifdef __cplusplus
}
#endif
|
13,814 | #include <cuda.h>
#include <stdio.h>
int main(int argc, char** argv) {
int driver_version = 0, runtime_version = 0;
cudaDriverGetVersion(&driver_version);
cudaRuntimeGetVersion(&runtime_version);
printf("Driver Version: %d\n"
"Runtime Version: %d\n",
driver_version, runtime_version);
return 0;
}
|
13,815 | //=============================================================================================
// Name : thread3D.cu
// Author : Jose Refojo
// Version : 29-06-2012
// Creation date : 18-06-2010
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include "stdio.h"
__global__ void scanTheadInformationGPU(int *threadXIdsGPU,int *threadYIdsGPU,int *threadZIdsGPU,int *blockXIdsGPU,int *blockYIdsGPU,int *blockZIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=threadIdx.x;
threadYIdsGPU[idx+idy*N+idz*N*M]=threadIdx.y;
threadZIdsGPU[idx+idy*N+idz*N*M]=threadIdx.z;
blockXIdsGPU[idx+idy*N+idz*N*M]=blockIdx.x;
blockYIdsGPU[idx+idy*N+idz*N*M]=blockIdx.y;
blockZIdsGPU[idx+idy*N+idz*N*M]=blockIdx.z;
}
}
}
}
int main() {
// pointers to host memory matrices
int ***threadXIds, ***threadYIds, ***threadZIds;
int *threadXIds1d = NULL;
int *threadYIds1d = NULL;
int *threadZIds1d = NULL;
int ***blockXIds, ***blockYIds, ***blockZIds;
int *blockXIds1d = NULL;
int *blockYIds1d = NULL;
int *blockZIds1d = NULL;
// pointers to device memory matrices
int *threadXIdsGPU, *threadYIdsGPU, *threadZIdsGPU;
int *blockXIdsGPU, *blockYIdsGPU, *blockZIdsGPU;
// N and M are the total size that we want, N is number of rows and M is number of columns
int N=4,M=4,L=4;
int i,j,k;
// Allocate arrays threadIds and blockIds on host
// threadIds
// threadXIds is the pointer to all the array malloced in one dimension
threadXIds1d = (int*) malloc( N*M*L*sizeof(int) );
threadYIds1d = (int*) malloc( N*M*L*sizeof(int) );
threadZIds1d = (int*) malloc( N*M*L*sizeof(int) );
// thread*Ids will be just pointers to the one dimension array
threadXIds = (int***) malloc(N*sizeof(int**));
threadYIds = (int***) malloc(N*sizeof(int**));
threadZIds = (int***) malloc(N*sizeof(int**));
for (i=0;i<N;i++) {
int **tmpPointerX = (int**) malloc(M*sizeof(int*));
int **tmpPointerY = (int**) malloc(M*sizeof(int*));
int **tmpPointerZ = (int**) malloc(M*sizeof(int*));
for (j=0;j<M;j++) {
tmpPointerX[j]=(&(threadXIds1d[i*M*L+j*L]));
tmpPointerY[j]=(&(threadYIds1d[i*M*L+j*L]));
tmpPointerZ[j]=(&(threadZIds1d[i*M*L+j*L]));
}
threadXIds[i]=tmpPointerX;
threadYIds[i]=tmpPointerY;
threadZIds[i]=tmpPointerZ;
}
// blockIds
// blockIds is the pointer to all the array malloced in one dimension
blockXIds1d = (int*) malloc( N*M*L*sizeof(int) );
blockYIds1d = (int*) malloc( N*M*L*sizeof(int) );
blockZIds1d = (int*) malloc( N*M*L*sizeof(int) );
// block*Ids will be just pointers to the one dimension array
blockXIds = (int***) malloc(N*sizeof(int**));
blockYIds = (int***) malloc(N*sizeof(int**));
blockZIds = (int***) malloc(N*sizeof(int**));
for (i=0;i<N;i++) {
int **tmpPointerX = (int**) malloc(M*sizeof(int*));
int **tmpPointerY = (int**) malloc(M*sizeof(int*));
int **tmpPointerZ = (int**) malloc(M*sizeof(int*));
for (j=0;j<M;j++) {
tmpPointerX[j]=(&(blockXIds1d[i*M*L+j*L]));
tmpPointerY[j]=(&(blockYIds1d[i*M*L+j*L]));
tmpPointerZ[j]=(&(blockZIds1d[i*M*L+j*L]));
}
blockXIds[i]=tmpPointerX;
blockYIds[i]=tmpPointerY;
blockZIds[i]=tmpPointerZ;
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
cudaMalloc ((void **) &threadXIdsGPU, sizeof(int)*N*M*L);
cudaMalloc ((void **) &threadYIdsGPU, sizeof(int)*N*M*L);
cudaMalloc ((void **) &threadZIdsGPU, sizeof(int)*N*M*L);
cudaMalloc ((void **) &blockXIdsGPU, sizeof(int)*N*M*L);
cudaMalloc ((void **) &blockYIdsGPU, sizeof(int)*N*M*L);
cudaMalloc ((void **) &blockZIdsGPU, sizeof(int)*N*M*L);
// Copy data from host memory to device memory (not needed)
cudaMemcpy(threadXIdsGPU, threadXIds1d, sizeof(int)*N*M*L, cudaMemcpyHostToDevice);
cudaMemcpy(threadYIdsGPU, threadYIds1d, sizeof(int)*N*M*L, cudaMemcpyHostToDevice);
cudaMemcpy(threadZIdsGPU, threadZIds1d, sizeof(int)*N*M*L, cudaMemcpyHostToDevice);
cudaMemcpy(blockXIdsGPU, blockXIds1d, sizeof(int)*N*M*L, cudaMemcpyHostToDevice);
cudaMemcpy(blockYIdsGPU, blockYIds1d, sizeof(int)*N*M*L, cudaMemcpyHostToDevice);
cudaMemcpy(blockZIdsGPU, blockZIds1d, sizeof(int)*N*M*L, cudaMemcpyHostToDevice);
// Compute the execution configuration
int block_size=2;
// Block size has to be L in Z since CUDA does not allow 3d grids
dim3 dimBlock(block_size,block_size,L);
// Which is why we have to use "1" as the third dimension here:
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1) ,1);
// Scan information from the threads
scanTheadInformationGPU<<<dimGrid,dimBlock>>>(threadXIdsGPU,threadYIdsGPU,threadZIdsGPU,blockXIdsGPU,blockYIdsGPU,blockZIdsGPU,N,M,L);
// Copy data from device memory to host memory
cudaMemcpy(threadXIds1d, threadXIdsGPU, sizeof(int)*N*M*L, cudaMemcpyDeviceToHost);
cudaMemcpy(threadYIds1d, threadYIdsGPU, sizeof(int)*N*M*L, cudaMemcpyDeviceToHost);
cudaMemcpy(threadZIds1d, threadZIdsGPU, sizeof(int)*N*M*L, cudaMemcpyDeviceToHost);
cudaMemcpy(blockXIds1d, blockXIdsGPU, sizeof(int)*N*M*L, cudaMemcpyDeviceToHost);
cudaMemcpy(blockYIds1d, blockYIdsGPU, sizeof(int)*N*M*L, cudaMemcpyDeviceToHost);
cudaMemcpy(blockZIds1d, blockZIdsGPU, sizeof(int)*N*M*L, cudaMemcpyDeviceToHost);
// Print all the data about the threads
printf(" dimGrid = %d %d %d\n",dimGrid.x,dimGrid.y,dimGrid.z);
for (i=0; i<N; i++) {
for (j=0; j<M; j++) {
for (k=0; k<L; k++) {
printf(" threadIds[%d][%d][%d]= %d , %d, %d\n",i,j,k,threadXIds[i][j][k],threadYIds[i][j][k],threadZIds[i][j][k]);
}
}
}
for (i=0; i<N; i++) {
for (j=0; j<M; j++) {
for (k=0; k<L; k++) {
printf(" blockIds[%d][%d][%d]= %d , %d, %d\n",i,j,k,blockXIds[i][j][k],blockYIds[i][j][k],blockZIds[i][j][k]);
}
}
}
// Free the memory
// Free the 1d
free(threadXIds1d);
free(threadYIds1d);
free(threadZIds1d);
free(blockXIds1d);
free(blockYIds1d);
free(blockZIds1d);
// Free the 2d
for (i=0;i<N;i++) {
free(threadXIds[i]);
free(threadYIds[i]);
free(threadZIds[i]);
free(blockXIds[i]);
free(blockYIds[i]);
free(blockZIds[i]);
}
// Free the 3d
free(threadXIds);
free(threadYIds);
free(threadZIds);
free(blockXIds);
free(blockYIds);
free(blockZIds);
cudaFree(threadXIdsGPU);
cudaFree(threadYIdsGPU);
cudaFree(threadZIdsGPU);
cudaFree(blockXIdsGPU);
cudaFree(blockYIdsGPU);
cudaFree(blockZIdsGPU);
}
|
13,816 | #include "includes.h"
__global__ void evovle_kernel_shared(int N, char *oldGen, char *newGen, int *allzeros, int *change)
{
// Global
int ix = (blockDim.x - 2) * blockIdx.x + threadIdx.x; //Different indexing as we declared more blocks (see SideGrid)
int iy = (blockDim.y - 2) * blockIdx.y + threadIdx.y;
int id = ix * (N+2) + iy;
int i = threadIdx.x;
int j = threadIdx.y;
int neighbors;
// Declare the shared memory on a per block level
__shared__ char oldGen_shared[BLOCK_SIZE][BLOCK_SIZE];
// Copy cells into shared memory
if (ix <= N+1 && iy <= N+1)
oldGen_shared[i][j] = oldGen[id]; //Copy each cell and in the sides of shared array the blocks' neighbors
// Sync threads on block
__syncthreads();
if (ix <= N && iy <= N) {
if(i != 0 && i != (blockDim.y-1) && j != 0 && j != (blockDim.x-1)) {
// Get the number of neighbors for a given oldGen point
neighbors = oldGen_shared[i+1][j] + oldGen_shared[i-1][j] //lower upper
+ oldGen_shared[i][j+1] + oldGen_shared[i][j-1] //right left
+ oldGen_shared[i+1][j+1] + oldGen_shared[i-1][j-1] //diagonals
+ oldGen_shared[i-1][j+1] + oldGen_shared[i+1][j-1];
char cell = oldGen_shared[i][j];
newGen[id] = neighbors == 3 || (neighbors == 2 && cell); // Fill in the cells
// Terminating Checkings
if (newGen[id] != 0) (*allzeros)++; // Check if all cells are dead
if (newGen[id] != oldGen[id]) (*change)++; // Check if life stayed the same
}
}
} |
13,817 | #pragma once
template<typename T>
__global__
void to_square_kernel(T* image_in,T* image_out,int heightImage,int widthImage)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
if(x<(heightImage*widthImage))
{
image_out[x]=image_in[x]*image_in[x];
}
}
template <typename T>
void to_square_transform(T* image_in, T* image_out, int heightImage, int widthImage, int threadsX,cudaStream_t stream)
{
int nblocks=(heightImage*widthImage)/threadsX;
if((heightImage*widthImage)%threadsX)nblocks++;
to_square_kernel<<<nblocks,threadsX,0,stream>>>(image_in, image_out, widthImage, heightImage);
}
template<typename T>
__global__
void multiply_kernel(T* image_in1,T* image_in2,T* image_out,int heightImage,int widthImage)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
if(x<(heightImage*widthImage))
{
image_out[x]=image_in1[x]*image_in2[x];
}
}
template <typename T>
void multiply_transform(T* image_in1,T*image_in2, T* image_out, int heightImage, int widthImage, int threadsX,cudaStream_t stream)
{
int nblocks=(heightImage*widthImage)/threadsX;
if((heightImage*widthImage)%threadsX)nblocks++;
multiply_kernel<<<nblocks,threadsX,0,stream>>>(image_in1,image_in2, image_out, widthImage, heightImage);
}
template<typename T_in,typename T_out>
__global__
void normalize_kernel(T_in* image_in,T_out* image_out,T_in * max_value,T_out bound_value,int heightImage,int widthImage)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
T_in normalizing_constant=(*max_value)/bound_value;
if(x<(heightImage*widthImage))
{
image_out[x]=(T_out)(image_in[x]/normalizing_constant);
}
}
template <typename T_in,typename T_out>
void normalize_array(T_in* image_in,T_out* image_out,T_in * max_value,T_out bound_value,int heightImage,int widthImage,const int threadsX,cudaStream_t stream)
{
int nblocks=(heightImage*widthImage)/threadsX;
if((heightImage*widthImage)%threadsX)nblocks++;
normalize_kernel<<<nblocks,threadsX,0,stream>>>(image_in,image_out,max_value,bound_value, widthImage, heightImage);
}
template<typename T_in,typename T_out>
__global__
void normalize_kernel2(T_in* image_in,T_out* image_out,T_in max_value,T_out bound_value,int heightImage,int widthImage)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
T_in normalizing_constant=max_value/bound_value;
if(x<(heightImage*widthImage))
{
image_out[x]=(T_out)(image_in[x]/normalizing_constant);
}
}
template <typename T_in,typename T_out>
void normalize_array2(T_in* image_in,T_out* image_out,T_in max_value,T_out bound_value,int heightImage,int widthImage,const int threadsX,cudaStream_t stream)
{
int nblocks=(heightImage*widthImage)/threadsX;
if((heightImage*widthImage)%threadsX)nblocks++;
normalize_kernel2<<<nblocks,threadsX,0,stream>>>(image_in,image_out,max_value,bound_value, widthImage, heightImage);
} |
13,818 | #include "includes.h"
__constant__ float *c_Kernel;
__global__ void add(float *d_dst, float*d_src_1, int len) {
int baseX = blockIdx.x * blockDim.x + threadIdx.x;
if (baseX < len)
{
d_dst[baseX] = d_dst[baseX] + d_src_1[baseX];
}
} |
13,819 | #include <stdio.h>
// CUDA to assign a value to each element of the array of integers A[256] using 256 threads.
// Each A[i] is assigned with the value of 2*i, for i = 0 to 255.
#define T 256 // As Threads
// #define n 256
__global__ void vecMultiply(int *A)
{
int i = threadIdx.x;
A[i] = A[i] * 2;
}
int main (int argc, char *argv[])
{
int i;
int size = T*sizeof(int);
int a[T], *devA;
for (i=0; i< T; i++)
{
a[i] = i + 1;
}
cudaMalloc( (void**)&devA,size);
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
vecMultiply<<<1, T>>>(devA);
printf("Before\n");
for (i=0; i< T; i++)
{
printf("%d ", a[i]);
}
printf("\n");
cudaMemcpy(a, devA, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
printf("After\n");
for (i=0; i < T; i++) {
printf("%d ",a[i]);
}
printf("\n");
}
|
13,820 | #include "includes.h"
//#define DEBUG
//#define HANDLE_ERROR(x) if((x) != 0) cout << "Error!" << endl;
using namespace std;
struct SubBlock{
int * nnz_global_i_idx;
int * nnz_global_o_idx;
int nnz;
int * nnz_local_r_idx;
int * nnz_local_c_idx;
float * nnz_values;
};
//void printSubBlocksInfo(SubBlock * sbs, int nsbs, int mem_b_size);
__global__ void CudaMergeResults(SubBlock * d_sbs, float * d_x, float * d_y, int nblocks, int mem_b_size, int nrows, int ncols , float * sub_y_arr){
if(blockIdx.x == 0 && threadIdx.x == 0){
for(int i = 0; i < nblocks; i++){
int * outLocs = d_sbs[i].nnz_global_o_idx;
for(int j = 0; j < mem_b_size; j++){
d_y[outLocs[j] - 1] += sub_y_arr[i * mem_b_size + j];
}
}
}
} |
13,821 | #include<stdio.h>
#define N 160
#define THREADS 16
__global__ void sum(float *A, float *B, float *result_d)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
result_d[i] = A[i] * B[i];
__shared__ float sresult[THREADS];
sresult[threadIdx.x] = result_d[i];
for(unsigned int s = blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s){
sresult[threadIdx.x] += sresult[threadIdx.x+s];
__syncthreads();
}
}
if(threadIdx.x == 0){
result_d[blockIdx.x] = sresult[threadIdx.x];
}
}
int main()
{
float A[N], B[N], tresult, *result;
float *A_d, *B_d, *result_d;
int i;
dim3 dimBlock(THREADS);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x);
for(i=0; i<N; i++){
A[i] = i * 2;
B[i] = N - i;
}
cudaMalloc((void **) &A_d, sizeof(float)*N);
cudaMalloc((void **) &B_d, sizeof(float)*N);
cudaMalloc((void **) &result_d, sizeof(float)*dimGrid.x);
cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, sizeof(float)*N, cudaMemcpyHostToDevice);
sum<<<dimGrid, dimBlock>>>(A_d, B_d, result_d);
result = (float*)malloc(sizeof(float)*dimGrid.x);
cudaMemcpy(result, result_d, sizeof(float)*dimGrid.x, cudaMemcpyDeviceToHost);
tresult = 0.0;
for(i=0; i<dimGrid.x; i++){
tresult += result[i];
}
printf("GPU dotprod : %f\n", tresult);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(result_d);
free(result);
}
|
13,822 | #include "includes.h"
__global__ void frontier_init_kernel(int* p_frontier_tail_d, int* c_frontier_tail_d, int* p_frontier_d, int* visited_d, int* label_d, int source) {
visited_d[source] = 1;
*c_frontier_tail_d = 0;
p_frontier_d[0] = source;
*p_frontier_tail_d = 1;
label_d[source] = 0;
} |
13,823 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <vector>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
typedef unsigned char uchar;
#define NC_MAX 32 // максимальное количество классов
double3 avg[NC_MAX]; // средние
double cov[3 * 3 * NC_MAX]; // ковариационные матрицы
__constant__ double3 AVG[NC_MAX];
__constant__ double COV[3 * 3 * NC_MAX];
__host__ __device__
double dot(const double *A, const double *a, const double *b, int n)
{
/*
* res = a ^ T * A * b
*/
int i, j;
double res = 0;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
res += A[i * n + j] * a[i] * b[j];
}
}
return res;
}
__host__ __device__ // работает везде
void mat_mul(const double *A, const double *B, double *C, int m, int n, int l, double alpha=1, double beta=0)
{
/*
* C = alpha * A * B + beta * C
*
* m - число строк A
* n - число столбцов A и число строк B
* l - число столбцов B
*/
int i, j, k;
double dot;
for (i = 0; i < m; i++)
{
for (j = 0; j < l; j++)
{
dot = 0;
for (k = 0; k < n; k++)
{
dot += A[i * n + k] * B[k * l + j];
}
C[i * l + j] = alpha * dot + (beta == 0 ? 0 : beta * C[i * l + j]);
}
}
}
__host__ __device__
void mat_mul_C(const double *A, double *B, double c, int m, int n)
{
/*
* B = c * A
*/
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
{
B[i * n + j] = c * A[i * n + j];
}
}
}
__host__ __device__
void mat_sum(const double *A, const double *B, double *C, int m, int n, double alpha=1, double beta=1)
{
/*
* C = alpha * A + beta * B
*/
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
{
C[i * n + j] = alpha * A[i * n + j] + beta * B[i * n + j];
}
}
}
__host__ __device__
void mat_set(double *A, int n, double alpha=0, double beta=0)
{
/*
* Инициализация A значениями alpha на диагонали и
* значениями beta вне диагонали
*/
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
if (i == j)
{
A[i * n + j] = alpha;
}
else
{
A[i * n + j] = beta;
}
}
}
}
void mat_swap_rows(double *A, int n, int i1, int i2)
{
int j;
double tmp;
for (j = 0; j < n; j++)
{
tmp = A[i1 * n + j];
A[i1 * n + j] = A[i2 * n + j];
A[i2 * n + j] = tmp;
}
}
void mat_transpose(const double *A, double *A_t, int n)
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
A_t[i * n + j] = A[j * n + i];
}
}
}
bool LUP(double *A, int n, int *pi, double eps=1e-10)
{
int i, j, k, k_;
int tmp;
for (i = 0; i < n; i++)
{
pi[i] = i;
}
for (k = 0; k < n; k++)
{
k_ = k;
for (i = k + 1; i < n; i++)
{
if (fabs(A[i * n + k]) > fabs(A[k_ * n + k]))
{
k_ = i;
}
}
if (fabs(A[k_ * n + k]) < eps)
{
return false;
}
if (k != k_)
{
tmp = pi[k];
pi[k] = pi[k_];
pi[k_] = tmp;
mat_swap_rows(A, n, k, k_);
}
for (i = k + 1; i < n; i++)
{
A[i * n + k] /= A[k * n + k];
for (j = k + 1; j < n; j++)
{
A[i * n + j] -= A[i * n + k] * A[k * n + j];
}
}
}
return true;
}
void LUP_solve(const double *LU, const int *pi, const double *b, int n, double *x, double *work)
{
/*
* work - вектор размерности n
*/
int i, j;
double sum;
double *y = work;
memset(y, 0, n * sizeof(double));
for (i = 0; i < n; i++) // прямой ход
{
for (sum = 0, j = 0; j <= i - 1; sum += LU[i * n + j] * y[j], j++);
y[i] = b[pi[i]] - sum;
}
for (i = n - 1; i >= 0; i--) // обратный ход
{
for (sum = 0, j = i + 1; j < n; sum += LU[i * n + j] * x[j], j++);
x[i] = (y[i] - sum) / LU[i * n + i];
}
}
void LUP_inv_mat(double *A, double *A_inv, int n, double *work, int *iwork, double eps=1e-10) // инвариантная матрица при помощи LUP разложения (украдено из Кормана)
{
/*
* work - вектор длины n ^ 2 + 2 * n
* iwork - вектор длины n
*/
int i;
double *X, *e, *space;
int *pi;
X = work;
e = X + n * n;
space = e + n;
pi = iwork;
memset(e, 0, n * sizeof(double));
e[0] = 1;
LUP(A, n, pi, eps);
for (i = 0; i < n - 1; i++)
{
LUP_solve(A, pi, e, n, X + i * n, space);
e[i] = 0;
e[i + 1] = 1;
}
LUP_solve(A, pi, e, n, X + i * n, space);
mat_transpose(X, A_inv, n);
}
__global__
void kernel(int nc, uchar4 *im, int w, int h)
{
int i, j, jc, idx;
int offset;
double3 to_dot;
double dist, dist_max;
idx = blockDim.x * blockIdx.x + threadIdx.x; // абсолютный номер потока
offset = blockDim.x * gridDim.x; // общее кол-во потоков
for (i = idx; i < w * h; i += offset) // идем по всем пикселям
{
dist_max = -INFINITY;
jc = 0;
for (j = 0; j < nc; j++) // цикл по числу классов
{
to_dot = make_double3((double)im[i].x - AVG[j].x,
(double)im[i].y - AVG[j].y,
(double)im[i].z - AVG[j].z);
dist = -dot(COV + j * 3 * 3, (double *)&to_dot, (double *)&to_dot, 3); // произведение из основной формулы
if (dist > dist_max) // для каждого класса вычисляем dist и выбираем максимум
{
jc = j;
dist_max = dist;
}
}
im[i].w = jc; // записываем номер класса с max dist
}
}
int main()
{
int nc, np, i, j, x, y, w, h;
uchar4 *im = NULL, *im_dev = NULL;
std::vector<double3> v(0);
double mat[3 * 3],
work[3 * 3 + 2 * 3];
int iwork[3];
FILE *fp;
char name_src_im[256], name_dst_im[256];
dim3 blocks(256), threads(256);
scanf("%s\n%s\n%d", name_src_im, name_dst_im, &nc);
fp = fopen(name_src_im, "rb");
if (fp == NULL)
{
fprintf(stderr, "Error: can't open %s\n", name_src_im);
return 0;
}
fread(&w, sizeof(int), 1, fp);
fread(&h, sizeof(int), 1, fp);
im = (uchar4 *)malloc(w * h * sizeof(uchar4));
if (im == NULL)
{
fprintf(stderr, "Error: not enough memory in CPU\n");
goto FREE;
}
CSC(cudaMalloc(&im_dev, w * h * sizeof(uchar4)));
fread(im, sizeof(uchar4), w * h, fp); // считывание пикселей
fclose(fp);
for (j = 0; j < nc; j++) // цикл по числу классов
{
scanf("%d", &np); // количество пикселей в классе
if(v.size() < np) v.resize(np);
avg[j] = make_double3(0, 0, 0);
for (i = 0; i < np; i++) // просчёт среднего по j-му классу
{
scanf("%d %d", &x, &y);
v[i] = make_double3((double)im[y * w + x].x,
(double)im[y * w + x].y,
(double)im[y * w + x].z);
avg[j].x += v[i].x;
avg[j].y += v[i].y;
avg[j].z += v[i].z;
}
avg[j].x /= np;
avg[j].y /= np;
avg[j].z /= np;
if (np > 1)
{
mat_set(mat, 3, 0, 0); // инициализация нулями
for (i = 0; i < np; i++) // просчёт ковариации по i-му классу
{
v[i].x = v[i].x - avg[j].x;
v[i].y = v[i].y - avg[j].y;
v[i].z = v[i].z - avg[j].z;
// double3 приводим к вектору double
mat_mul((double *)(v.data() + i), (double *)(v.data() + i), mat, 3, 1, 3, 1, 1); // умножение векторов как матриц 3х1 1х3 получаем 3х3
}
mat_mul_C(mat, mat, 1. / (np - 1), 3, 3); // делим матрицу на коэффициент
LUP_inv_mat(mat, cov + j * 3 * 3, 3, work, iwork); // получаем матрицу ковариации для j-того класса (work, iwork -- место, чтобы внутри функции не делать malloc)
}
else
{
mat_set(cov + j * 3 * 3, 3, 1, 0); // единичная матрица
}
}
/* Копирование в константную память */
CSC(cudaMemcpyToSymbol(AVG, avg, nc * sizeof(double3), 0, cudaMemcpyHostToDevice));
CSC(cudaMemcpyToSymbol(COV, cov, nc * 3 * 3 * sizeof(double), 0, cudaMemcpyHostToDevice));
/* Копирование изображения */
cudaMemcpy(im_dev, im, w * h * sizeof(uchar4), cudaMemcpyHostToDevice); // копирование изображения
kernel<<<blocks, threads>>>(nc, im_dev, w, h);
CSC(cudaGetLastError());
CSC(cudaMemcpy(im, im_dev, w * h * sizeof(uchar4), cudaMemcpyDeviceToHost));
fp = fopen(name_dst_im, "wb");
if (fp == NULL)
{
fprintf(stderr, "Error: can't open %s\n", name_dst_im);
goto FREE;
}
fwrite(&w, sizeof(int), 1, fp);
fwrite(&h, sizeof(int), 1, fp);
fwrite(im, sizeof(uchar4), w * h, fp);
fclose(fp);
FREE:
free(im);
cudaFree(im_dev);
return 0;
}
|
13,824 | #include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <time.h>
// #define NUM_SPHERES 2
#define NUM_SPHERES 200
// #define BLOCK_SIZE 1
#define BLOCK_SIZE 256
struct Sphere {
float posX, posY, posZ;
float velX, velY, velZ;
float radius;
unsigned id;
};
float *d_posBuf, *d_velBuf, *d_radBuf, *d_velBuf_temp;
unsigned *d_idBuf;
size_t d_posBuf_size, d_velBuf_size, d_radBuf_size, d_idBuf_size;
__global__ void swapVelBufKernel(float *d_velBuf, float *d_velBuf_temp) {
unsigned index, i;
index = threadIdx.x + blockDim.x * blockIdx.x;
if(index < NUM_SPHERES) {
for(i = 0; i < 3; i++) {
d_velBuf[index * 3 + i] = d_velBuf_temp[index * 3 + i];
}
}
}
__global__ void fillTempVelKernel(float *d_posBuf, float *d_velBuf,
float *d_radBuf, float *d_velBuf_temp) {
__shared__ float posBufLocal[BLOCK_SIZE * 3];
__shared__ float velBufLocal[BLOCK_SIZE * 3];
__shared__ float radBufLocal[BLOCK_SIZE];
unsigned i, j, k, index, otherIndex; // otherIndex = index of other sphere
float pos[3], vel[3], outVel[3], rad, mass; // outVel = output velocity
float otherPos[3], otherVel[3], otherRad, otherMass; // data of other sphere
float delta[3]; // Used to compare distance between sphere centers
float distanceSquared, radiusSumSquared;
bool didImpact;
index = threadIdx.x + blockDim.x * blockIdx.x;
for(i = 0; i < 3; i++) {
pos[i] = d_posBuf[index * 3 + i];
vel[i] = d_velBuf[index * 3 + i];
outVel[i] = 0;
}
rad = d_radBuf[index];
mass = rad * rad * rad;
didImpact = false;
for(i = 0; i < gridDim.x; i++) {
// For each block, load posBufLocal and velBufLocal
posBufLocal[threadIdx.x * 3 + 0] = d_posBuf[blockDim.x * 3 * i +
threadIdx.x * 3 + 0];
posBufLocal[threadIdx.x * 3 + 1] = d_posBuf[blockDim.x * 3 * i +
threadIdx.x * 3 + 1];
posBufLocal[threadIdx.x * 3 + 2] = d_posBuf[blockDim.x * 3 * i +
threadIdx.x * 3 + 2];
velBufLocal[threadIdx.x * 3 + 0] = d_velBuf[blockDim.x * 3 * i +
threadIdx.x * 3 + 0];
velBufLocal[threadIdx.x * 3 + 1] = d_velBuf[blockDim.x * 3 * i +
threadIdx.x * 3 + 1];
velBufLocal[threadIdx.x * 3 + 2] = d_velBuf[blockDim.x * 3 * i +
threadIdx.x * 3 + 2];
radBufLocal[threadIdx.x] = d_radBuf[blockDim.x * i + threadIdx.x];
__syncthreads();
// Now all of the local data is filled
// Check against each sphere in local data
for(j = 0; j < blockDim.x; j++) {
otherIndex = j + blockDim.x * i; // j = threadIdx.x, i = blockIdx.x
// Check if the sphere data is not garbage data or the same data
if((otherIndex < NUM_SPHERES) && (otherIndex != index)) {
// Fill data of other sphere
for(k = 0; k < 3; k++) {
otherPos[k] = posBufLocal[j * 3 + k];
otherVel[k] = velBufLocal[j * 3 + k];
}
otherRad = radBufLocal[j];
// Check to see if they collide
// Get difference in x, y, and z (0, 1, 2) for indexes
for(k = 0; k < 3; k++) {
delta[k] = pos[k] - otherPos[k];
}
// Calculate distanceSquared and radiusSumSquared
distanceSquared = 0;
radiusSumSquared = 0;
for(k = 0; k < 3; k++) {
distanceSquared = distanceSquared + delta[k] * delta[k];
}
radiusSumSquared = (rad + otherRad) * (rad + otherRad);
if(distanceSquared < radiusSumSquared) {
// We collide
didImpact = true;
// Calculate the mass of the other sphere
otherMass = otherRad * otherRad * otherRad;
for(k = 0; k < 3; k++) {
outVel[k] = outVel[k] +
(mass - otherMass) / (mass + otherMass) * vel[k] +
(2 * otherMass) / (mass + otherMass) * otherVel[k];
}
}
}
}
__syncthreads();
// Now that all threads in this block have processed the local data
// We can now move to the next set of local data
}
// Put the new velocity in outVel into the velocity buffer
if(!didImpact) {
// If the object did not impact with anything, continue with current
// velocity
for(i = 0; i < 3; i++) {
outVel[i] = vel[i];
}
}
for(i = 0; i < 3; i++) {
d_velBuf_temp[index * 3 + i] = outVel[i];
}
}
void updateVelocity() {
unsigned numBlocks;
numBlocks = NUM_SPHERES / BLOCK_SIZE;
if(NUM_SPHERES % BLOCK_SIZE) numBlocks++;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
// Fill the temporary velocity buffer
fillTempVelKernel<<<dimGrid, dimBlock>>>(d_posBuf, d_velBuf, d_radBuf,
d_velBuf_temp);
// Copy from the temporary buffer to the real buffer
swapVelBufKernel<<<dimGrid, dimBlock>>>(d_velBuf, d_velBuf_temp);
}
double time_diff(struct timeval x, struct timeval y) {
double x_ms, y_ms, diff;
x_ms = (double) x.tv_sec * 1000000 + (double) x.tv_usec;
y_ms = (double) y.tv_sec * 1000000 + (double) y.tv_usec;
diff = (double) y_ms - (double) x_ms;
return diff;
}
void updateVelocitySerial() {
float posBuf[NUM_SPHERES * 3];
float velBuf[NUM_SPHERES * 3];
float temp_velBuf[NUM_SPHERES * 3];
float radBuf[NUM_SPHERES];
struct Sphere s1, s2;
unsigned i, j, k;
float delta[3];
float distanceSquared, radiusSumSquared;
float s1Mass, s2Mass;
float outVel[3];
bool didImpact;
struct timeval before, after;
for(i = 0; i < 3; i++) {
outVel[i] = 0;
}
// Get the buffers from the GPU
cudaMemcpy(posBuf, d_posBuf, d_posBuf_size, cudaMemcpyDeviceToHost);
cudaMemcpy(velBuf, d_velBuf, d_velBuf_size, cudaMemcpyDeviceToHost);
cudaMemcpy(radBuf, d_radBuf, d_radBuf_size, cudaMemcpyDeviceToHost);
gettimeofday(&before, NULL);
for(i = 0; i < NUM_SPHERES; i++) {
didImpact = false;
outVel[0] = 0;
outVel[1] = 0;
outVel[2] = 0;
s1.posX = posBuf[i * 3 + 0];
s1.posY = posBuf[i * 3 + 1];
s1.posZ = posBuf[i * 3 + 2];
s1.velX = velBuf[i * 3 + 0];
s1.velY = velBuf[i * 3 + 1];
s1.velZ = velBuf[i * 3 + 2];
s1.radius = radBuf[i];
s1Mass = s1.radius * s1.radius * s1.radius;
for(j = 0; j < NUM_SPHERES; j++) {
if(j == i) {
continue;
}
s2.posX = posBuf[j * 3 + 0];
s2.posY = posBuf[j * 3 + 1];
s2.posZ = posBuf[j * 3 + 2];
s2.velX = velBuf[j * 3 + 0];
s2.velY = velBuf[j * 3 + 1];
s2.velZ = velBuf[j * 3 + 2];
s2.radius = radBuf[j];
delta[0] = s1.posX - s2.posX;
delta[1] = s1.posY - s2.posY;
delta[2] = s1.posZ - s2.posZ;
distanceSquared = 0;
radiusSumSquared = 0;
for(k = 0; k < 3; k++) {
distanceSquared = distanceSquared + delta[k] * delta[k];
}
radiusSumSquared = (s1.radius + s2.radius) * (s1.radius + s2.radius);
if(distanceSquared < radiusSumSquared) {
// We collide
didImpact = true;
s2Mass = s2.radius * s2.radius * s2.radius;
outVel[0] = outVel[0] +
(s1Mass - s2Mass) / (s1Mass + s2Mass) * s1.velX +
(2 * s2Mass) / (s1Mass + s2Mass) * s2.velX;
outVel[1] = outVel[1] +
(s1Mass - s2Mass) / (s1Mass + s2Mass) * s1.velY +
(2 * s2Mass) / (s1Mass + s2Mass) * s2.velY;
outVel[2] = outVel[2] +
(s1Mass - s2Mass) / (s1Mass + s2Mass) * s1.velZ +
(2 * s2Mass) / (s1Mass + s2Mass) * s2.velZ;
}
}
if(!didImpact) {
for(k = 0; k < 3; k++) {
outVel[k] = velBuf[i * 3 + k];
}
}
for(k = 0; k < 3; k++) {
temp_velBuf[i * 3 + k] = outVel[k];
}
didImpact = false;
}
gettimeofday(&after, NULL);
printf("Total time elapsed: %.01f us\n", time_diff(before, after));
// Copy temp_velBuf into GPU
cudaMemcpy(d_velBuf, temp_velBuf, d_velBuf_size, cudaMemcpyHostToDevice);
}
__global__ void updatePositionKernel(float *d_posBuf, float *d_velBuf) {
unsigned index, i;
index = threadIdx.x + blockDim.x * blockIdx.x;
// Each thread updates its portion
if(index < NUM_SPHERES) {
for(i = 0; i < 3; i++) {
// d_posBuf[index * 3 + i] += d_velBuf[index * 3 + i];
d_posBuf[index * 3 + i] += d_velBuf[index * 3 + i];;
}
}
}
void updatePosition() {
unsigned numBlocks;
numBlocks = (d_posBuf_size / 3) / BLOCK_SIZE;
if((d_posBuf_size / 3) % BLOCK_SIZE) numBlocks++;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
// Kernel
updatePositionKernel<<<dimGrid, dimBlock>>>(d_posBuf, d_velBuf);
}
void printPosition() {
float temp_posBuf[d_posBuf_size];
float temp_velBuf[d_velBuf_size];
unsigned temp_idBuf[d_idBuf_size];
unsigned i;
// Copy from GPU to CPU
cudaMemcpy(temp_posBuf, d_posBuf, d_posBuf_size, cudaMemcpyDeviceToHost);
cudaMemcpy(temp_velBuf, d_velBuf, d_velBuf_size, cudaMemcpyDeviceToHost);
cudaMemcpy(temp_idBuf, d_idBuf, d_idBuf_size, cudaMemcpyDeviceToHost);
for(i = 0; i < NUM_SPHERES; i++) {
printf("sphere %u\n", temp_idBuf[i]);
printf(" pos: %f %f %f\n", temp_posBuf[i * 3 + 0], temp_posBuf[i * 3 + 1],
temp_posBuf[i * 3 + 2]);
printf(" vel: %f %f %f\n", temp_velBuf[i * 3 + 0], temp_velBuf[i * 3 + 1],
temp_velBuf[i * 3 + 2]);
}
}
void loop() {
printf("--- TICK ---\n");
updatePosition();
updateVelocity();
// updateVelocitySerial();
// printPosition();
// sleep(1);
}
int main() {
// Initialize the object data
struct Sphere spheres[NUM_SPHERES];
unsigned i;
// Uncomment any of the test cases
// Test case: Different velocities -----
// UNCOMMENT START
/* // First sphere
spheres[0].posX = 0;
spheres[0].posY = 0;
spheres[0].posZ = 0;
spheres[0].velX = 0.2;
spheres[0].velY = 0;
spheres[0].velZ = 0;
spheres[0].radius = 1;
spheres[0].id = 0;
// Second sphere
spheres[1].posX = 3;
spheres[1].posY = 0;
spheres[1].posZ = 0;
spheres[1].velX = 0.1;
spheres[1].velY = 0;
spheres[1].velZ = 0;
spheres[1].radius = 1;
spheres[1].id = 1; */
// UNCOMMENT END
// Test case: Different directions -----
// UNCOMMENT START
/* // First sphere
spheres[0].posX = 0;
spheres[0].posY = 0;
spheres[0].posZ = 0;
spheres[0].velX = .1;
spheres[0].velY = 0;
spheres[0].velZ = 0;
spheres[0].radius = 1;
spheres[0].id = 0;
// Second sphere
spheres[1].posX = 2;
spheres[1].posY = 4;
spheres[1].posZ = 0;
spheres[1].velX = 0;
spheres[1].velY = -.1;
spheres[1].velZ = 0;
spheres[1].radius = 1;
spheres[1].id = 1; */
// UNCOMMENT END
// Test case: Different masses -----
// UNCOMMENT START
// First sphere
spheres[0].posX = 0;
spheres[0].posY = 0;
spheres[0].posZ = 0;
spheres[0].velX = .1;
spheres[0].velY = 0;
spheres[0].velZ = 0;
spheres[0].radius = 1;
spheres[0].id = 0;
// Second sphere
spheres[1].posX = 3;
spheres[1].posY = 0;
spheres[1].posZ = 0;
spheres[1].velX = 0;
spheres[1].velY = 0;
spheres[1].velZ = 0;
spheres[1].radius = 1.26;
spheres[1].id = 1;
// UNCOMMENT END
for(i = 2; i < 200; i++) {
spheres[i].posX = i * 2;
spheres[i].posY = i * 2;
spheres[i].posZ = 10;
spheres[i].velX = 1;
spheres[i].velY = 1;
spheres[i].velZ = 0;
spheres[i].radius = 1;
spheres[i].id = i;
}
// Create the position buffer
float posBuf[NUM_SPHERES * 3];
// Create the velocity buffer
float velBuf[NUM_SPHERES * 3];
// Create the radius buffer
float radBuf[NUM_SPHERES];
// Create the ID buffer
unsigned idBuf[NUM_SPHERES];
// Fill posradBuf
for(i = 0; i < NUM_SPHERES; i++) {
posBuf[i * 3 + 0] = spheres[i].posX;
posBuf[i * 3 + 1] = spheres[i].posY;
posBuf[i * 3 + 2] = spheres[i].posZ;
}
// Fill velocity buffer
for(i = 0; i < NUM_SPHERES; i++) {
velBuf[i * 3 + 0] = spheres[i].velX;
velBuf[i * 3 + 1] = spheres[i].velY;
velBuf[i * 3 + 2] = spheres[i].velZ;
}
// Fill radius buffer
for(i = 0; i < NUM_SPHERES; i++) {
radBuf[i] = spheres[i].radius;
}
// Fill id buffer
for(i = 0; i < NUM_SPHERES; i++) {
idBuf[i] = spheres[i].id;
}
// Allocate space on the GPU for storing these buffers
d_posBuf_size = sizeof(float) * NUM_SPHERES * 3;
d_velBuf_size = sizeof(float) * NUM_SPHERES * 3;
d_radBuf_size = sizeof(float) * NUM_SPHERES;
d_idBuf_size = sizeof(unsigned) * NUM_SPHERES;
cudaMalloc((void **) &d_posBuf, d_posBuf_size);
cudaMalloc((void **) &d_velBuf, d_velBuf_size);
cudaMalloc((void **) &d_radBuf, d_radBuf_size);
cudaMalloc((void **) &d_idBuf, d_idBuf_size);
cudaMalloc((void **) &d_velBuf_temp, d_velBuf_size);
// Copy into GPU
cudaMemcpy(d_posBuf, posBuf, d_posBuf_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velBuf, velBuf, d_velBuf_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_radBuf, radBuf, d_radBuf_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_idBuf, idBuf, d_idBuf_size, cudaMemcpyHostToDevice);
/* while(true) {
loop();
} */
for(i = 0; i < 100; i++) {
loop();
}
cudaFree(d_posBuf);
cudaFree(d_velBuf);
cudaFree(d_radBuf);
cudaFree(d_idBuf);
cudaFree(d_velBuf_temp);
return 0;
}
|
13,825 | /*
* CUDA C++ code to multiply two square matrices
*
* To compile and link this example, use
*
* nvcc matMul.cu -o matMul.x
*
* To run this code, use
*
* ./matMul.x
*/
#include <iostream>
// parameter describing the size of the matrices
const int rows = 1024;
const int cols = 1024;
// block size for tiled multiplication using shared memory
const int BLOCK_SIZE = 16;
// total number of blocks along X and Y
const int NUM_BLOCKS = rows/BLOCK_SIZE;
// print the matrix
void displayMatrix(float *a)
{
std::cout << std::endl;
for(int i=0; i<rows; i++) {
for(int j=0; j<cols; j++) {
std::cout << a[i*cols+j] << " ";
}
std::cout << std::endl;
}
}
// using global memory
__global__ void matrixMultiplyNaive(float *_a, // pointer to matrix A on the device
float *_b, // pointer to matrix B on the device
float *_c) // pointer to matrix C = AB on the device
{
// TODO: Add the calculation of the inner product using global memory
}
__global__ void matrixMultiplyTiled(float *_a, // pointer to matrix A on the device
float *_b, // pointer to matrix B on the device
float *_c) // pointer to matrix C = AB on the device
{
// TODO: calculate the thread mapping to the product matrix element
// TODO: define two shared memory arrays, one for matrix A and one for matrix B
// TODO: Add in a loop over the tiles in A and B
// TODO: calculate the partial inner products and add them up to get the element of the product matrix
}
// the main program starts life on the CPU and calls device kernels as required
int main(int argc, char *argv[])
{
// allocate space in the host for storing input arrays (a and b) and the output array (c)
float *a = new float[rows*cols];
float *b = new float[rows*cols];
float *c = new float[rows*cols];
// define device pointers for the same arrays when they'll be copied to the device
float *_a, *_b, *_c;
// allocate memory on the device (GPU) and check for errors (if any) during this call
cudaError_t err;
// allocate space for matrix A
err = cudaMalloc((void **) &_a, rows*cols*sizeof(float));
if (err!= cudaSuccess) {
std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl;
exit(EXIT_FAILURE);
}
// allocate space for matrix B
err = cudaMalloc((void **) &_b, rows*cols*sizeof(float));
if (err!= cudaSuccess) {
std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl;
exit(EXIT_FAILURE);
}
// allocate space for matrix C = AB
err = cudaMalloc((void **) &_c, rows*cols*sizeof(float));
if (err!= cudaSuccess) {
std::cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << std::endl;
exit(EXIT_FAILURE);
}
// Fill matrix A
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
a[row + col*rows] = 2.0;
}
}
if((rows<33) && (cols<33)) displayMatrix(a);
// Fill matrix B
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
b[row + col*rows] = 4.0;
}
}
if((rows<33) && (cols<33)) displayMatrix(b);
// Copy array contents of A and B from the host (CPU) to the device (GPU)
// Note that this is copied to the "global" memory on the device and is accessible to all threads in all blocks
// WARNING: Global memory is slow (latency of a few 100 cycles)
//
cudaMemcpy(_a, a, rows*cols*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(_b, b, rows*cols*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(float(cols)/float(dimBlock.x)), ceil(float(rows)/float(dimBlock.y)), 1 );
float time;
// create CUDA events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the timer
cudaEventRecord( start, 0);
// launch the GPU kernel for parallel matrix multiplication of A and B
// matrixMultiplyNaive<<<dimGrid,dimBlock>>>(_a, _b, _c);
matrixMultiplyTiled<<<dimGrid,dimBlock>>>(_a, _b, _c);
// stop the timer
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop);
// print out the number of GFLOPs
double GFLOPs = (double)(rows*cols) * 2*rows * 1000 / (1000*1000*1000*time);
std::cout << "Elapsed Time = " << time << " GFLOPs = " << GFLOPs << std::endl;
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(c, _c, rows*cols*sizeof(float), cudaMemcpyDeviceToHost);
if((rows<33) && (cols<33)) displayMatrix(c);
// free device memory
cudaFree(_a);
cudaFree(_b);
cudaFree(_c);
// free host memory
delete a;
delete b;
delete c;
// successful program termination
return 0;
}
|
13,826 | /***************************************************************************//**
* \file intermediateVelocity.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the right hand side for the initial velocity solve
*/
#include "intermediateVelocity.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
__global__
void size_LHS1(int *hybridTagsUV, int *count, int *startI, int *startJ, int width, int height, int nx, int ny)
{
int iu,iv,counter = 0;
for (int j=startJ[0]; j<startJ[0]+height; j++)
{
for (int i=startI[0]; i<startI[0]+width; i++)
{
iu = j*(nx-1)+i;
if (hybridTagsUV[iu]>0)
{
counter+=1;
count[iu] = counter;
}
}
}
for (int j=startJ[0]; j<startJ[0]+height; j++)
{
for (int i=startI[0]; i<startI[0]+width; i++)
{
iv = j*nx+i + (nx-1)*ny;
if (hybridTagsUV[iv]>0)
{
counter+=1;
count[iv] = counter;
}
}
}
}
__global__
void update_rhs1_x(double *rhs1, double *ns_rhs, double *interp_rhs, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int iu = threadIdx.x + blockDim.x * blockIdx.x,
I = iu % (nx-1),
J = iu / (nx-1);
if (I == 0 || I == nx-2 || J == 0 || J == ny-1)
return;
rhs1[iu] = rhs1[iu] * ns_rhs[iu] + interp_rhs[iu];
}
__global__
void update_rhs1_y(double *rhs1, double *ns_rhs, double *interp_rhs, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iv = ip + (nx-1)*ny;
if (I == 0 || I == nx-1 || J == 0 || J == ny-2)
return;
rhs1[iv] = rhs1[iv] * ns_rhs[iv] + interp_rhs[iv];
}
}
|
13,827 | /* Produced by CVXGEN, 2017-11-20 12:18:49 -0500. */
/* CVXGEN is Copyright (C) 2006-2017 Jacob Mattingley, jem@cvxgen.com. */
/* The code in this file is Copyright (C) 2006-2017 Jacob Mattingley. */
/* CVXGEN, or solvers produced by CVXGEN, cannot be used for commercial */
/* applications without prior written permission from Jacob Mattingley. */
/* Filename: solver.c. */
/* Description: Main solver file. */
#include "solver.cuh"
CUDA_CALLABLE_MEMBER
double eval_gap(Workspace& work) {
int i;
double gap;
gap = 0;
for (i = 0; i < 40; i++)
gap += work.z[i]*work.s[i];
return gap;
}
CUDA_CALLABLE_MEMBER
void set_defaults(Settings& settings) {
settings.resid_tol = 1e-6;
settings.eps = 1e-4;
settings.max_iters = 25;
settings.refine_steps = 1;
settings.s_init = 1;
settings.z_init = 1;
settings.debug = 0;
settings.verbose = 1;
settings.verbose_refinement = 0;
settings.better_start = 1;
settings.kkt_reg = 1e-7;
}
CUDA_CALLABLE_MEMBER
void setup_pointers(Workspace& work, Vars& vars) {
work.y = work.x + 20;
work.s = work.x + 21;
work.z = work.x + 61;
vars.Weights = work.x + 0;
}
CUDA_CALLABLE_MEMBER
void setup_indexing(Workspace& work, Vars& vars) {
setup_pointers(work, vars);
}
CUDA_CALLABLE_MEMBER
void set_start(Workspace& work, Settings& settings) {
int i;
for (i = 0; i < 20; i++)
work.x[i] = 0;
for (i = 0; i < 1; i++)
work.y[i] = 0;
for (i = 0; i < 40; i++)
work.s[i] = (work.h[i] > 0) ? work.h[i] : settings.s_init;
for (i = 0; i < 40; i++)
work.z[i] = settings.z_init;
}
CUDA_CALLABLE_MEMBER
double eval_objv(Workspace& work, Params& params) {
int i;
double objv;
/* Borrow space in work.rhs. */
multbyP(work.rhs, work.x, params);
objv = 0;
for (i = 0; i < 20; i++)
objv += work.x[i]*work.rhs[i];
objv *= 0.5;
for (i = 0; i < 20; i++)
objv += work.q[i]*work.x[i];
objv += 0;
return objv;
}
CUDA_CALLABLE_MEMBER
void fillrhs_aff(Workspace& work, Params& params) {
int i;
double *r1, *r2, *r3, *r4;
r1 = work.rhs;
r2 = work.rhs + 20;
r3 = work.rhs + 60;
r4 = work.rhs + 100;
/* r1 = -A^Ty - G^Tz - Px - q. */
multbymAT(r1, work.y);
multbymGT(work.buffer, work.z);
for (i = 0; i < 20; i++)
r1[i] += work.buffer[i];
multbyP(work.buffer, work.x, params);
for (i = 0; i < 20; i++)
r1[i] -= work.buffer[i] + work.q[i];
/* r2 = -z. */
for (i = 0; i < 40; i++)
r2[i] = -work.z[i];
/* r3 = -Gx - s + h. */
multbymG(r3, work.x);
for (i = 0; i < 40; i++)
r3[i] += -work.s[i] + work.h[i];
/* r4 = -Ax + b. */
multbymA(r4, work.x);
for (i = 0; i < 1; i++)
r4[i] += work.b[i];
}
CUDA_CALLABLE_MEMBER
void fillrhs_cc(Workspace& work) {
int i;
double *r2;
double *ds_aff, *dz_aff;
double mu;
double alpha;
double sigma;
double smu;
double minval;
r2 = work.rhs + 20;
ds_aff = work.lhs_aff + 20;
dz_aff = work.lhs_aff + 60;
mu = 0;
for (i = 0; i < 40; i++)
mu += work.s[i]*work.z[i];
/* Don't finish calculating mu quite yet. */
/* Find min(min(ds./s), min(dz./z)). */
minval = 0;
for (i = 0; i < 40; i++)
if (ds_aff[i] < minval*work.s[i])
minval = ds_aff[i]/work.s[i];
for (i = 0; i < 40; i++)
if (dz_aff[i] < minval*work.z[i])
minval = dz_aff[i]/work.z[i];
/* Find alpha. */
if (-1 < minval)
alpha = 1;
else
alpha = -1/minval;
sigma = 0;
for (i = 0; i < 40; i++)
sigma += (work.s[i] + alpha*ds_aff[i])*
(work.z[i] + alpha*dz_aff[i]);
sigma /= mu;
sigma = sigma*sigma*sigma;
/* Finish calculating mu now. */
mu *= 0.025;
smu = sigma*mu;
/* Fill-in the rhs. */
for (i = 0; i < 20; i++)
work.rhs[i] = 0;
for (i = 60; i < 101; i++)
work.rhs[i] = 0;
for (i = 0; i < 40; i++)
r2[i] = work.s_inv[i]*(smu - ds_aff[i]*dz_aff[i]);
}
CUDA_CALLABLE_MEMBER
void refine(double *target, double *var, Workspace& work, Settings& settings) {
int i, j;
double *residual = work.buffer;
double norm2;
double *new_var = work.buffer2;
for (j = 0; j < settings.refine_steps; j++) {
norm2 = 0;
matrix_multiply(residual, var, work, settings);
for (i = 0; i < 101; i++) {
residual[i] = residual[i] - target[i];
norm2 += residual[i]*residual[i];
}
#ifndef ZERO_LIBRARY_MODE
if (settings.verbose_refinement) {
if (j == 0)
printf("Initial residual before refinement has norm squared %.6g.\n", norm2);
else
printf("After refinement we get squared norm %.6g.\n", norm2);
}
#endif
/* Solve to find new_var = KKT \ (target - A*var). */
ldl_solve(residual, new_var, work, settings);
/* Update var += new_var, or var += KKT \ (target - A*var). */
for (i = 0; i < 101; i++) {
var[i] -= new_var[i];
}
}
#ifndef ZERO_LIBRARY_MODE
if (settings.verbose_refinement) {
/* Check the residual once more, but only if we're reporting it, since */
/* it's expensive. */
norm2 = 0;
matrix_multiply(residual, var, work, settings);
for (i = 0; i < 101; i++) {
residual[i] = residual[i] - target[i];
norm2 += residual[i]*residual[i];
}
if (j == 0)
printf("Initial residual before refinement has norm squared %.6g.\n", norm2);
else
printf("After refinement we get squared norm %.6g.\n", norm2);
}
#endif
}
CUDA_CALLABLE_MEMBER
double calc_ineq_resid_squared(Workspace& work) {
/* Calculates the norm ||-Gx - s + h||. */
double norm2_squared;
int i;
/* Find -Gx. */
multbymG(work.buffer, work.x);
/* Add -s + h. */
for (i = 0; i < 40; i++)
work.buffer[i] += -work.s[i] + work.h[i];
/* Now find the squared norm. */
norm2_squared = 0;
for (i = 0; i < 40; i++)
norm2_squared += work.buffer[i]*work.buffer[i];
return norm2_squared;
}
CUDA_CALLABLE_MEMBER
double calc_eq_resid_squared(Workspace& work) {
/* Calculates the norm ||-Ax + b||. */
double norm2_squared;
int i;
/* Find -Ax. */
multbymA(work.buffer, work.x);
/* Add +b. */
for (i = 0; i < 1; i++)
work.buffer[i] += work.b[i];
/* Now find the squared norm. */
norm2_squared = 0;
for (i = 0; i < 1; i++)
norm2_squared += work.buffer[i]*work.buffer[i];
return norm2_squared;
}
CUDA_CALLABLE_MEMBER
void better_start(Workspace& work, Settings& settings, Params& params) {
/* Calculates a better starting point, using a similar approach to CVXOPT. */
/* Not yet speed optimized. */
int i;
double *x, *s, *z, *y;
double alpha;
work.block_33[0] = -1;
/* Make sure sinvz is 1 to make hijacked KKT system ok. */
for (i = 0; i < 40; i++)
work.s_inv_z[i] = 1;
fill_KKT(work, params);
ldl_factor(work, settings);
fillrhs_start(work);
/* Borrow work.lhs_aff for the solution. */
ldl_solve(work.rhs, work.lhs_aff, work, settings);
/* Don't do any refinement for now. Precision doesn't matter too much. */
x = work.lhs_aff;
s = work.lhs_aff + 20;
z = work.lhs_aff + 60;
y = work.lhs_aff + 100;
/* Just set x and y as is. */
for (i = 0; i < 20; i++)
work.x[i] = x[i];
for (i = 0; i < 1; i++)
work.y[i] = y[i];
/* Now complete the initialization. Start with s. */
/* Must have alpha > max(z). */
alpha = -1e99;
for (i = 0; i < 40; i++)
if (alpha < z[i])
alpha = z[i];
if (alpha < 0) {
for (i = 0; i < 40; i++)
work.s[i] = -z[i];
} else {
alpha += 1;
for (i = 0; i < 40; i++)
work.s[i] = -z[i] + alpha;
}
/* Now initialize z. */
/* Now must have alpha > max(-z). */
alpha = -1e99;
for (i = 0; i < 40; i++)
if (alpha < -z[i])
alpha = -z[i];
if (alpha < 0) {
for (i = 0; i < 40; i++)
work.z[i] = z[i];
} else {
alpha += 1;
for (i = 0; i < 40; i++)
work.z[i] = z[i] + alpha;
}
}
CUDA_CALLABLE_MEMBER
void fillrhs_start(Workspace& work) {
/* Fill rhs with (-q, 0, h, b). */
int i;
double *r1, *r2, *r3, *r4;
r1 = work.rhs;
r2 = work.rhs + 20;
r3 = work.rhs + 60;
r4 = work.rhs + 100;
for (i = 0; i < 20; i++)
r1[i] = -work.q[i];
for (i = 0; i < 40; i++)
r2[i] = 0;
for (i = 0; i < 40; i++)
r3[i] = work.h[i];
for (i = 0; i < 1; i++)
r4[i] = work.b[i];
}
CUDA_CALLABLE_MEMBER
long solve(Workspace& work, Settings& settings, Params& params, Vars& vars) {
int i;
int iter;
double *dx, *ds, *dy, *dz;
double minval;
double alpha;
work.converged = 0;
setup_pointers(work, vars);
pre_ops();
#ifndef ZERO_LIBRARY_MODE
if (settings.verbose)
printf("iter objv gap |Ax-b| |Gx+s-h| step\n");
#endif
fillq(work, params);
fillh(work);
fillb(work);
if (settings.better_start)
better_start(work, settings, params);
else
set_start(work, settings);
for (iter = 0; iter < settings.max_iters; iter++) {
for (i = 0; i < 40; i++) {
work.s_inv[i] = 1.0 / work.s[i];
work.s_inv_z[i] = work.s_inv[i]*work.z[i];
}
work.block_33[0] = 0;
fill_KKT(work, params);
ldl_factor(work, settings);
/* Affine scaling directions. */
fillrhs_aff(work, params);
ldl_solve(work.rhs, work.lhs_aff, work, settings);
refine(work.rhs, work.lhs_aff, work, settings);
/* Centering plus corrector directions. */
fillrhs_cc(work);
ldl_solve(work.rhs, work.lhs_cc, work, settings);
refine(work.rhs, work.lhs_cc, work, settings);
/* Add the two together and store in aff. */
for (i = 0; i < 101; i++)
work.lhs_aff[i] += work.lhs_cc[i];
/* Rename aff to reflect its new meaning. */
dx = work.lhs_aff;
ds = work.lhs_aff + 20;
dz = work.lhs_aff + 60;
dy = work.lhs_aff + 100;
/* Find min(min(ds./s), min(dz./z)). */
minval = 0;
for (i = 0; i < 40; i++)
if (ds[i] < minval*work.s[i])
minval = ds[i]/work.s[i];
for (i = 0; i < 40; i++)
if (dz[i] < minval*work.z[i])
minval = dz[i]/work.z[i];
/* Find alpha. */
if (-0.99 < minval)
alpha = 1;
else
alpha = -0.99/minval;
/* Update the primal and dual variables. */
for (i = 0; i < 20; i++)
work.x[i] += alpha*dx[i];
for (i = 0; i < 40; i++)
work.s[i] += alpha*ds[i];
for (i = 0; i < 40; i++)
work.z[i] += alpha*dz[i];
for (i = 0; i < 1; i++)
work.y[i] += alpha*dy[i];
work.gap = eval_gap(work);
work.eq_resid_squared = calc_eq_resid_squared(work);
work.ineq_resid_squared = calc_ineq_resid_squared(work);
#ifndef ZERO_LIBRARY_MODE
if (settings.verbose) {
work.optval = eval_objv(work, params);
printf("%3d %10.3e %9.2e %9.2e %9.2e % 6.4f\n",
iter+1, work.optval, work.gap, sqrt(work.eq_resid_squared),
sqrt(work.ineq_resid_squared), alpha);
}
#endif
/* Test termination conditions. Requires optimality, and satisfied */
/* constraints. */
if ( (work.gap < settings.eps)
&& (work.eq_resid_squared <= settings.resid_tol*settings.resid_tol)
&& (work.ineq_resid_squared <= settings.resid_tol*settings.resid_tol)
) {
work.converged = 1;
work.optval = eval_objv(work, params);
return iter+1;
}
}
return iter;
}
|
13,828 | #include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void mykernel(){
printf("Hello\n");
}
int main(){
mykernel<<<1,1>>>();
cudaDeviceSynchronize();
cudaCheckErrors("kernel fail");
return 0;
}
|
13,829 | /*
* a simple serial scan.
*/
#include <stdio.h>
#include <stdlib.h>
void scan(float* out, float* in, int size);
void startClock(char*);
void stopClock(char*);
void printClock(char*);
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage: %s size-of-array\n",argv[0]);
exit(1);
}
int size = atoi(argv[1]);
printf("size = %d\n",size);
/* host based data */
float *h_in;
float *h_out;
h_in = (float*) malloc(size*sizeof(float));
h_out =(float*) malloc(size*sizeof(float));
for (int i = 0; i < size; i++) {
h_in[i] = 1.0;
}
startClock("compute");
scan(h_out,h_in,size);
stopClock("compute");
for (int i = 0; i < size; i++) {
printf("%d %f -> %f\n",i,h_in[i],h_out[i]);
}
free(h_in);
free(h_out);
printClock("compute");
}
void scan(float* out, float* in, int size) {
out[0] = 0;
for (int i = 1; i < size; i++) {
out[i] = out[i-1] + in[i-1];
}
}
|
13,830 |
#include <stdio.h>
#include <math.h>
///////////////////////////////// DEVICE FUNCTIONS /////////////////////////////////
__device__ void _bicubicCoeff(float* alpha, float* psi, int pitch, int i, int j);
__device__ void _newtonStep(float* A, float& x, float& y, float& error, int xi, int yi);
__device__ int sgn(float val) {
return ((0.0 < val) ? 1 : -1 );
}
///////////////////////////////// GLOBAL GPU FUNCTIONS /////////////////////////////////
// Determines which voxels are active.
// Gets coefficients for interpolating polynomial for each active voxel.
__global__ void getVoxels(float* psi, int pitch, int* voxelList, float* alphaList, int Nx, int Ny)
{
int row = blockDim.x*blockIdx.x + threadIdx.x;
int col = blockDim.y*blockIdx.y + threadIdx.y;
if (row<Nx-1 && col<Ny-1)
{
int idx = row * pitch + col;
int pairity = sgn(psi[idx]) + sgn(psi[idx+pitch]) + sgn(psi[idx+pitch+1]) + sgn(psi[idx+1]);
if (-3<pairity && pairity<3)
{
int old = atomicAdd(voxelList,1);
*(voxelList + old + 1) = idx;
_bicubicCoeff(alphaList+16*old, psi, pitch, row, col);
}
}
}
__global__ void reinitPhi(float* phi, int pitch, float* psi, int* voxelList, float* alphaList,
int Nx, int Ny, float dx, float thres)
{
int row = blockDim.x*blockIdx.x + threadIdx.x;
int col = blockDim.y*blockIdx.y + threadIdx.y;
if (row<Nx && col<Ny)
{
float* alpha;
int idx, r ,c;
float minDist = 7770000.0; //for error checking
float xO, yO;
float error;
for(int k = 0; k < voxelList[0]; ++k)
{
idx = voxelList[k+1];
alpha = alphaList + 16*k;
r = idx/pitch;
c = idx%pitch;
xO = .5;
yO = .5;
bool inVoxel = true;
do
{
_newtonStep(alpha, xO, yO, error, row-r, col-c);
inVoxel = (yO>=-0.1f) && (yO<=1.1f) && (xO>=-0.1f) && (xO<=1.1f);
} while (error>thres && inVoxel);
if (inVoxel){
float xdist = (row-r-xO);
float ydist = (col-c-yO);
minDist = min(minDist, dx*sqrt(ydist*ydist + xdist*xdist));
}
}
// if (minDist>100){
// printf("\nWHAT IS UP HERE? err=%3.3f, (x,y)=(%2.2f,%2.2f) [r,c]=%d,%d\n", error, xO,yO,row,col );
// }
phi[row*pitch+col] = sgn(psi[row*pitch+col]) * minDist;
}
}
///////////////////////////////// DEVICE FUNCTION IMPLEMENTATIONS /////////////////////////////////
__device__ void _bicubicCoeff(float* alpha, float* psi, int pitch, int i, int j)
{
int idx = i*pitch + j;
float f00, f10, f01, f11;
float fx00, fx10, fx01, fx11;
float fy00, fy10, fy01, fy11;
float fxy00, fxy10, fxy01, fxy11;
//f00 = psi[idx];
//f01 = psi[idx+pitch];//psi[idx+1];
//f10 = psi[idx+1];//psi[idx+N];
//f11 = psi[idx+pitch+1];//psi[idx+N+1];
//fy00 = (psi[idx+pitch]-psi[idx-pitch])/2.0;//(psi[idx+1]-psi[idx-1])/2.0;
//fy01 = (psi[idx+2*pitch]-psi[idx])/2.0;//(psi[idx+2]-psi[idx])/2.0;
//fy10 = (psi[idx+pitch+1]-psi[idx-pitch+1])/2.0;//(psi[idx+N+1]-psi[idx+N-1])/2.0;
//fy11 = (psi[idx+2*pitch+1]-psi[idx+1])/2.0;//(psi[idx+N+2]-psi[idx+N])/2.0;
//fx00 = (psi[idx+1]-psi[idx-1])/2.0;//(psi[idx+N]-psi[idx-N])/2.0;
//fx01 = (psi[idx+pitch+1]-psi[idx+pitch-1])/2.0;//(psi[idx+N+1]-psi[idx-N+1])/2.0;
//fx10 = (psi[idx+2]-psi[idx])/2.0;//(psi[idx+2*N]-psi[idx])/2.0;
//fx11 = (psi[idx+pitch+2]-psi[idx+pitch])/2.0;//(psi[idx+2*N+1]-psi[idx+1])/2.0;
//fxy00 = (psi[idx+pitch+1]-psi[idx+pitch-1]-psi[idx-pitch+1]+psi[idx-pitch-1])/4.0;
//fxy01 = (psi[idx+2*pitch+1]-psi[idx+2*pitch-1]-psi[idx+1]+psi[idx-1])/4.0;
//fxy10 = (psi[idx+pitch+2]-psi[idx+pitch]-psi[idx-pitch+2]+psi[idx-pitch])/4.0;
//fxy11 = (psi[idx+2*pitch+2]-psi[idx+2*pitch]-psi[idx+2]+psi[idx])/4.0;
f00 = psi[idx];
f01 = psi[idx+1];
f10 = psi[idx+pitch];
f11 = psi[idx+pitch+1];
fy00 = (psi[idx+1]-psi[idx-1])/2.0;
fy01 = (psi[idx+2]-psi[idx])/2.0;
fy10 = (psi[idx+pitch+1]-psi[idx+pitch-1])/2.0;
fy11 = (psi[idx+pitch+2]-psi[idx+pitch])/2.0;
fx00 = (psi[idx+pitch]-psi[idx-pitch])/2.0;
fx01 = (psi[idx+pitch+1]-psi[idx-pitch+1])/2.0;
fx10 = (psi[idx+2*pitch]-psi[idx])/2.0;
fx11 = (psi[idx+2*pitch+1]-psi[idx+1])/2.0;
fxy00 = (psi[idx+pitch+1]-psi[idx+1-pitch]-psi[idx-1+pitch]+psi[idx-pitch-1])/4.0;
fxy01 = (psi[idx+pitch+2]-psi[idx-pitch+2]-psi[idx+pitch]+psi[idx-pitch])/4.0;
fxy10 = (psi[idx+2*pitch+1]-psi[idx+1]-psi[idx+2*pitch-1]+psi[idx-1])/4.0;
fxy11 = (psi[idx+2*pitch+2]-psi[idx+2]-psi[idx+2*pitch]+psi[idx])/4.0;
alpha[0] = f00;
alpha[1] = fy00;
alpha[2] = -3*f00 + 3*f01 - 2*fy00 - fy01;
alpha[3] = 2*f00 - 2*f01 + fy00 + fy01;
alpha[4] = fx00;
alpha[5] = fxy00;
alpha[6] = -3*fx00 + 3*fx01 - 2*fxy00 - fxy10;
alpha[7] = 2*fx00 - 2*fx01 + fxy00 + fxy01;
alpha[8] = -3*f00 + 3*f10 - 2*fx00 - fx10;
alpha[9] = -3*fy00 + 3*fy10 - 2*fxy00 -fxy10;
alpha[10] = 9*f00 -9*f01 -9*f10 +9*f11 +6*fy00 +3*fy01 -6*fy10 -3*fy11
+6*fx00 -6*fx01 +3*fx10 -3*fx11 +4*fxy00 +2*fxy01 +2*fxy10 + fxy11;
alpha[11] = -6*f00 +6*f01 +6*f10 -6*f11 -3*fy00 -3*fy01 +3*fy10 +3*fy11
-4*fx00 +4*fx01 -2*fx10 +2*fx11 -2*fxy00 -2*fxy01 - fxy10 - fxy11;
alpha[12] = 2*f00 - 2*f10 + fx00 + fx10;
alpha[13] = 2*fy00 - 2*fy10 + fxy00 + fxy10;
alpha[14] = -6*f00 +6*f01 +6*f10 -6*f11 -4*fy00 -2*fy01 +4*fy10 +2*fy11
-3*fx00 +3*fx01 -3*fx10 +3*fx11 -2*fxy00 -fxy01 -2*fxy10 - fxy11;
alpha[15] = 4*f00 -4*f01 -4*f10 +4*f11 +2*fy00 +2*fy01 -2*fy10 -2*fy11
+2*fx00 -2*fx01 +2*fx10 -2*fx11 + fxy00 + fxy01 + fxy10 + fxy11;
}//_bicubicCoeff
__device__ void _newtonStep(float* A, float& x, float& y, float& error, int xi, int yi)
{
float p, px, py, pxx, pyy, pxy;
float d1, d2, d3, D;
float y2 = y*y;
float y3 = y2*y;
float x2 = x*x;
float x3 = x2*x;
p = A[0] + A[1]*y + A[2]*y2 + A[3]*y3
+ (A[4] + A[5]*y + A[6]*y2 + A[7]*y3)*x
+ (A[8] + A[9]*y + A[10]*y2 + A[11]*y3)*x2
+ (A[12] + A[13]*y + A[14]*y2 + A[15]*y3)*x3;
py = A[1] + 2*A[2]*y + 3*A[3]*y2
+ (A[5] + 2*A[6]*y + 3*A[7]*y2)*x
+ (A[9] + 2*A[10]*y + 3*A[11]*y2)*x2
+ (A[13] + 2*A[14]*y + 3*A[15]*y2)*x3;
px = A[4] + 2*A[8]*x + 3*A[12]*x2
+ (A[5] + 2*A[9]*x + 3*A[13]*x2)*y
+ (A[6] + 2*A[10]*x + 3*A[14]*x2)*y2
+ (A[7] + 2*A[11]*x + 3*A[15]*x2)*y3;
pyy = 2*A[2] + 6*A[3]*y + (2*A[6] + 6*A[7]*y)*x
+ (2*A[10] + 6*A[11]*y)*x2
+ (2*A[14] + 6*A[15]*y)*x3;
pxx = 2*A[8] + 6*A[12]*x + (2*A[9] + 6*A[13]*x)*y
+ (2*A[10] + 6*A[14]*x)*y2
+ (2*A[11] + 6*A[15]*x)*y3;
pxy = A[5] + 2*A[6]*y + 3*A[7]*y2 +
(A[9] + 2*A[10]*y + 3*A[11]*y2)*2*x +
(A[13] + 2*A[14]*y + 3*A[15]*y2)*3*x2;
d1 = py*(x-xi) - px*(y-yi);
d2 = pyy*(x-xi) - pxy*(y-yi) - px;
d3 = pxy*(x-xi) - pxx*(y-yi) + py ;
D = py*d3 - px*d2;
error = p*p + d1*d1;
y -= ( p*d3 - px*d1) / D;
x -= ( py*d1 - p*d2 ) / D;
}//_newtonStep
|
13,831 | #include "includes.h"
__global__ void subtract_kernel(float * in1, float * in2, float * out)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= c_Size.x || j >= c_Size.y || k >= c_Size.z)
return;
long int id = (k * c_Size.y + j) * c_Size.x + i;
out[id] = in1[id] - in2[id];
} |
13,832 | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This script prints out the gencode of the specified GPU id. Defaults to GPU 0.
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
int device_count;
cudaError_t status = cudaGetDeviceCount(&device_count);
if (status != cudaSuccess) {
fprintf(stderr, "cudaGetDeviceCount() failed: %s\n", cudaGetErrorString(status));
return -1;
}
if (device_id + 1 > device_count) {
fprintf(stderr, "Invalid device index %d (Max index: %d)\n", device_id, device_count-1);
return -1;
}
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device_id);
int gencode = prop.major * 10 + prop.minor;
printf("%d\n", gencode);
return 0;
}
|
13,833 | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define Y 3
#define K 4 //NUMBERR OF CLUSTERS
// This kermel is used to find the index of the centroid that is nearest to each pixel.
//Ech thread is responsible for a particular pixel
__global__
void findclosestcentroids(double* num, double* centroids_c, int* idx, int each_chunk){
int index=blockIdx.x*blockDim.x+threadIdx.x;
int stride=blockDim.x*gridDim.x;
int offset=0; //offset keeps track if the same thread (number enters the loop the next time, as the thread id will be same)
for(int i=index; i<each_chunk; i+=stride){
int x=index+offset*stride;
int j, l, min_ind;
double sum, dist[K],min_dist;
for (j=0;j<K;j++){
sum=0;
for (l=0;l<Y;l++){
sum=sum+(*(num+x*Y+l)-*(centroids_c+j*Y+l))*(*(num+x*Y+l)-*(centroids_c+j*Y+l));
}
dist[j]=sqrt(sum);
}
min_dist=dist[0];
min_ind=0;
for (j=0; j<K; j++){
if (dist[j]<min_dist) {
min_dist=dist[j];
min_ind=j;
}
}
*(idx+x)=min_ind;
offset++;
}
}
// This kernel is launched to update the centroids in each iteration. this is basically a reduction function where the
// mean of all the data points belonging to one cluster is calculated.
__global__
void computeCentroids(double* num, int* idx, double* centroids_c, int each_chunk){
int index=blockIdx.x*blockDim.x+threadIdx.x;
int stride=blockDim.x*gridDim.x;
int offset=0; //offset keeps track if the same thread (number enters the loop the next time, as the thread id will be same)
int m, j, l, count;
double sum[Y]; //for(i=0;i<Y;i++) sum[i]=0.0;//is it reqd ?
for(int i=index; i<K; i+=stride){
int x=index+offset*stride;
count=0;
for(m=0;m<Y;m++) sum[m]=0.0;
for(j =0; j<each_chunk; j++){
if(idx[j]==x){
count++;
for (l=0;l<Y;l++){
sum[l]=sum[l]+ *(num+j*Y+l);
}
}
}
if (count==0) continue;
//printf("Counts is %d \n", count);
for (l=0;l<Y;l++){
*(centroids_c+x*Y+l)=sum[l]/count;
}
}
}
//Kernel that performs the repalcement of each pixel in the image by the centroid it is closest to.
//This is basically the step that quantizes the image.
__global__
void assign_thru_gpu(double* num, double* centroids_c, int* idx, int each_chunk){
int index=blockIdx.x*blockDim.x+threadIdx.x;
int stride=blockDim.x*gridDim.x;
int offset=0; //offset keeps track if the same thread number enters the loop the next time, as the thread id will be same
for(int i=index; i<each_chunk; i+=stride){
int x=index+offset*stride;
int j, k ;
for (k=0;k<K;k++){
if (idx[x]==k){
for (j=0;j<Y;j++){
*(num+x*Y+j)=*(centroids_c+k*Y+j);
}
}
}
offset++;
}
}
//Assignment of each CUDA device to a particular rank
extern "C" void cuda_init(int each_chunk, int myrank, int numranks){
int cudaDeviceCount=-1;
cudaError_t cE;
//Check if cuda device exists and get the number of working cuda devices
if ((cE=cudaGetDeviceCount( &cudaDeviceCount))!=cudaSuccess){
printf("Unable to determine cuda Device count, error is %d count is %d \n", cE, cudaDeviceCount);
exit (-1);
}
//Set cuda device for each MPI rank
if ((cE=cudaSetDevice(myrank%cudaDeviceCount))!=cudaSuccess){
printf("Unable to have rank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE);
exit (-1);
}
extern double* num;
extern double* centroids_c;
extern double* centroids_cresult;
extern int* idx;
num=NULL;
centroids_c=NULL;
centroids_cresult=NULL;
idx=NULL;
cudaMallocManaged( &num, sizeof(double)*each_chunk*Y);
cudaMallocManaged( ¢roids_c, sizeof(double)*K*Y);
cudaMallocManaged( ¢roids_cresult, sizeof(double)*K*Y);
cudaMallocManaged(&idx, sizeof(int)*each_chunk);
}
//function that initiates kernel launches from the main function.
extern "C" void k_means_kernel_launch(double* num, double* centroids_c, int* idx, int each_chunk, int n_blocks, int no_of_threads){
int cudaDeviceCount;
cudaError_t cE2, cE3;
findclosestcentroids<<< n_blocks, no_of_threads>>>(num, centroids_c, idx, each_chunk);
cudaGetDeviceCount( &cudaDeviceCount);
cE2=cudaDeviceSynchronize();
//printf("The two errors are %d %d \n",cE1,cE2);
const char* x_err=cudaGetErrorString (cE2);
printf("%s \n",x_err);
computeCentroids<<<1, 32>>>(num, &idx[0], centroids_c,each_chunk);
cE3=cudaDeviceSynchronize();
//printf("The error is %d\n",cE3);
x_err=cudaGetErrorString (cE3);
printf("%s \n",x_err);
}
//Repalcement of each pixel in the image by the centroid it is closest to. This is the step that quantizes the image.
extern "C" void assign(double* num, double* centroids_c, int* idx, int each_chunk, int n_blocks, int no_of_threads){
assign_thru_gpu<<<n_blocks, no_of_threads>>>(num, centroids_c, idx, each_chunk);
cudaDeviceSynchronize();
}
//Freeing the dynamic memory
extern "C" void cuda_free(double* num, double* centroids_c, double* centroids_cresult, int* idx){
cudaFree(num);
cudaFree(centroids_c);
cudaFree(centroids_cresult);
cudaFree(idx);
}
|
13,834 |
// Includes
#include <stdio.h>
// Constants
#define WPT 128
#define THREADS 512
#define BLOCKS 14*2*4
#define N (BLOCKS*THREADS)
// Kernel
__global__ void pebench(unsigned *A, unsigned *B) {
unsigned i = blockIdx.x*THREADS + threadIdx.x;
unsigned acc = A[i];
for (unsigned w=0; w<WPT; w++) {
acc = acc * acc;
}
B[i] = acc;
}
// Timers
cudaEvent_t start;
void timer_start();
void timer_stop();
// Main function
int main(void) {
unsigned size = N*sizeof(unsigned);
// Allocate and initialise the data
unsigned *A = (unsigned *)malloc(size);
unsigned *B = (unsigned *)malloc(size);
for (unsigned i=0; i<N; i++) {
A[i] = i;
B[i] = 0;
}
// Allocate CUDA arrays
unsigned *devA = 0;
unsigned *devB = 0;
cudaMalloc(&devA, size);
cudaMalloc(&devB, size);
// Copy to the GPU
cudaMemcpy(devA, A, size, cudaMemcpyHostToDevice);
// Configure the kernel
dim3 threads(THREADS);
dim3 blocks(BLOCKS);
// Launch the kernel
timer_start();
pebench<<<blocks, threads>>>(devA, devB);
timer_stop();
// Copy from the GPU
cudaMemcpy(B, devB, size, cudaMemcpyDeviceToHost);
// Clean-up and exit
cudaFree(A);
cudaFree(B);
free(A);
free(B);
return 0;
}
// Start the timer
void timer_start() {
cudaDeviceSynchronize();
cudaEventCreate(&start);
cudaEventRecord(start);
}
// End the timer
void timer_stop() {
cudaDeviceSynchronize();
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float timer = 0;
cudaEventElapsedTime(&timer,start,stop);
printf("Execution time: %.3lf ms \n", timer);
float megabytes = (N*2*sizeof(unsigned)) / (1024*1024.0);
printf("Bandwidth: %.3lf GB/s \n", megabytes/timer);
}
|
13,835 | #include "HashFunc.cuh"
__device__
uint32_t
__hash_func1(const void *key, uint32_t len) {
const uint8_t *e, *k;
uint32_t h;
uint8_t c;
k = reinterpret_cast<const uint8_t *>(key);
e = k + len;
for (h = 0; k != e;) {
c = *k++;
if (!c && k > e)
break;
DCHARHASH(h, c);
}
return (h);
}
|
13,836 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,int var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float* var_26,float* var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
if (comp >= log10f((var_1 - var_2))) {
if (comp < -1.2261E19f + coshf(var_3 * +0.0f * (var_4 * (-1.4165E-25f * var_5)))) {
comp += var_7 - (var_8 + var_9 - (-0.0f * -1.2062E23f + +1.4310E-41f));
comp += var_10 + var_11 + (-1.0789E-35f + var_12 - +1.3540E35f - +1.2578E-36f);
if (comp > -1.3189E36f + +1.2720E36f - (var_13 * (-1.6585E36f / var_14 - var_15))) {
comp += var_16 - (var_17 * (var_18 * acosf((+0.0f / var_19 - -1.2988E-22f / var_20))));
}
if (comp <= (var_21 * -1.9488E-29f * -1.2776E36f)) {
comp = -0.0f * sinf(+1.4719E26f);
float tmp_1 = logf(+1.3238E-44f);
comp = tmp_1 + +1.3141E35f + (var_22 / (var_23 - -0.0f + (var_24 * var_25)));
}
for (int i=0; i < var_6; ++i) {
var_26[i] = +0.0f;
float tmp_2 = +1.1754E-17f;
var_27[i] = (var_28 - log10f(var_29 - (-1.3263E-36f - (var_30 / +1.1926E-42f))));
comp = var_27[i] / tmp_2 * var_26[i] / (var_31 - (+1.1612E-36f / (var_32 * var_33 + (-1.0845E-36f * -1.6945E-44f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
int tmp_7 = atoi(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float* tmp_27 = initPointer( atof(argv[27]) );
float* tmp_28 = initPointer( atof(argv[28]) );
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
cudaDeviceSynchronize();
return 0;
}
|
13,837 | #include "includes.h"
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return log1pf(expf(x));
//return logf(expf(x) + 1);
}
__global__ void gradient_array_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
// log1p(x) == log(x + 1)
const float inp = activation_input_gpu[i];
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
//const float grad_sp = 1 - expf(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
} |
13,838 | /*
* RFC 1321 compliant MD5 implementation
*
* Copyright (C) 2006-2007 Christophe Devine
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License, version 2.1 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
/*
* The MD5 algorithm was designed by Ron Rivest in 1991.
*
* http://www.ietf.org/rfc/rfc1321.txt
*/
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE 1
#endif
#include <string.h>
#include <stdio.h>
#include "md5.cuh"
/*
* 32-bit integer manipulation macros (little endian)
*/
#ifndef GET_UINT32_LE
#define GET_UINT32_LE(n,b,i) \
{ \
(n) = ( (unsigned long) (b)[(i) ] ) \
| ( (unsigned long) (b)[(i) + 1] << 8 ) \
| ( (unsigned long) (b)[(i) + 2] << 16 ) \
| ( (unsigned long) (b)[(i) + 3] << 24 ); \
}
#endif
#ifndef PUT_UINT32_LE
#define PUT_UINT32_LE(n,b,i) \
{ \
(b)[(i) ] = (unsigned char) ( (n) ); \
(b)[(i) + 1] = (unsigned char) ( (n) >> 8 ); \
(b)[(i) + 2] = (unsigned char) ( (n) >> 16 ); \
(b)[(i) + 3] = (unsigned char) ( (n) >> 24 ); \
}
#endif
/*
* MD5 context setup
*/
__device__
void md5_starts(md5_context *ctx) {
ctx->total[0] = 0;
ctx->total[1] = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
}
__device__
static void md5_process(md5_context *ctx, unsigned char data[64]) {
unsigned long X[16], A, B, C, D;
GET_UINT32_LE( X[ 0], data, 0 );
GET_UINT32_LE( X[ 1], data, 4 );
GET_UINT32_LE( X[ 2], data, 8 );
GET_UINT32_LE( X[ 3], data, 12 );
GET_UINT32_LE( X[ 4], data, 16 );
GET_UINT32_LE( X[ 5], data, 20 );
GET_UINT32_LE( X[ 6], data, 24 );
GET_UINT32_LE( X[ 7], data, 28 );
GET_UINT32_LE( X[ 8], data, 32 );
GET_UINT32_LE( X[ 9], data, 36 );
GET_UINT32_LE( X[10], data, 40 );
GET_UINT32_LE( X[11], data, 44 );
GET_UINT32_LE( X[12], data, 48 );
GET_UINT32_LE( X[13], data, 52 );
GET_UINT32_LE( X[14], data, 56 );
GET_UINT32_LE( X[15], data, 60 );
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#define P(a,b,c,d,k,s,t) \
{ \
a += F(b,c,d) + X[k] + t; a = S(a,s) + b; \
}
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
#define F(x,y,z) (z ^ (x & (y ^ z)))
P( A, B, C, D, 0, 7, 0xD76AA478 );
P( D, A, B, C, 1, 12, 0xE8C7B756 );
P( C, D, A, B, 2, 17, 0x242070DB );
P( B, C, D, A, 3, 22, 0xC1BDCEEE );
P( A, B, C, D, 4, 7, 0xF57C0FAF );
P( D, A, B, C, 5, 12, 0x4787C62A );
P( C, D, A, B, 6, 17, 0xA8304613 );
P( B, C, D, A, 7, 22, 0xFD469501 );
P( A, B, C, D, 8, 7, 0x698098D8 );
P( D, A, B, C, 9, 12, 0x8B44F7AF );
P( C, D, A, B, 10, 17, 0xFFFF5BB1 );
P( B, C, D, A, 11, 22, 0x895CD7BE );
P( A, B, C, D, 12, 7, 0x6B901122 );
P( D, A, B, C, 13, 12, 0xFD987193 );
P( C, D, A, B, 14, 17, 0xA679438E );
P( B, C, D, A, 15, 22, 0x49B40821 );
#undef F
#define F(x,y,z) (y ^ (z & (x ^ y)))
P( A, B, C, D, 1, 5, 0xF61E2562 );
P( D, A, B, C, 6, 9, 0xC040B340 );
P( C, D, A, B, 11, 14, 0x265E5A51 );
P( B, C, D, A, 0, 20, 0xE9B6C7AA );
P( A, B, C, D, 5, 5, 0xD62F105D );
P( D, A, B, C, 10, 9, 0x02441453 );
P( C, D, A, B, 15, 14, 0xD8A1E681 );
P( B, C, D, A, 4, 20, 0xE7D3FBC8 );
P( A, B, C, D, 9, 5, 0x21E1CDE6 );
P( D, A, B, C, 14, 9, 0xC33707D6 );
P( C, D, A, B, 3, 14, 0xF4D50D87 );
P( B, C, D, A, 8, 20, 0x455A14ED );
P( A, B, C, D, 13, 5, 0xA9E3E905 );
P( D, A, B, C, 2, 9, 0xFCEFA3F8 );
P( C, D, A, B, 7, 14, 0x676F02D9 );
P( B, C, D, A, 12, 20, 0x8D2A4C8A );
#undef F
#define F(x,y,z) (x ^ y ^ z)
P( A, B, C, D, 5, 4, 0xFFFA3942 );
P( D, A, B, C, 8, 11, 0x8771F681 );
P( C, D, A, B, 11, 16, 0x6D9D6122 );
P( B, C, D, A, 14, 23, 0xFDE5380C );
P( A, B, C, D, 1, 4, 0xA4BEEA44 );
P( D, A, B, C, 4, 11, 0x4BDECFA9 );
P( C, D, A, B, 7, 16, 0xF6BB4B60 );
P( B, C, D, A, 10, 23, 0xBEBFBC70 );
P( A, B, C, D, 13, 4, 0x289B7EC6 );
P( D, A, B, C, 0, 11, 0xEAA127FA );
P( C, D, A, B, 3, 16, 0xD4EF3085 );
P( B, C, D, A, 6, 23, 0x04881D05 );
P( A, B, C, D, 9, 4, 0xD9D4D039 );
P( D, A, B, C, 12, 11, 0xE6DB99E5 );
P( C, D, A, B, 15, 16, 0x1FA27CF8 );
P( B, C, D, A, 2, 23, 0xC4AC5665 );
#undef F
#define F(x,y,z) (y ^ (x | ~z))
P( A, B, C, D, 0, 6, 0xF4292244 );
P( D, A, B, C, 7, 10, 0x432AFF97 );
P( C, D, A, B, 14, 15, 0xAB9423A7 );
P( B, C, D, A, 5, 21, 0xFC93A039 );
P( A, B, C, D, 12, 6, 0x655B59C3 );
P( D, A, B, C, 3, 10, 0x8F0CCC92 );
P( C, D, A, B, 10, 15, 0xFFEFF47D );
P( B, C, D, A, 1, 21, 0x85845DD1 );
P( A, B, C, D, 8, 6, 0x6FA87E4F );
P( D, A, B, C, 15, 10, 0xFE2CE6E0 );
P( C, D, A, B, 6, 15, 0xA3014314 );
P( B, C, D, A, 13, 21, 0x4E0811A1 );
P( A, B, C, D, 4, 6, 0xF7537E82 );
P( D, A, B, C, 11, 10, 0xBD3AF235 );
P( C, D, A, B, 2, 15, 0x2AD7D2BB );
P( B, C, D, A, 9, 21, 0xEB86D391 );
#undef F
#undef P
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
}
/*
* MD5 process buffer
*/
__device__
void md5_update(md5_context *ctx, unsigned char *input, int ilen) {
int fill;
unsigned long left;
if (ilen <= 0)
return;
left = ctx->total[0] & 0x3F;
fill = 64 - left;
ctx->total[0] += ilen;
ctx->total[0] &= 0xFFFFFFFF;
if (ctx->total[0] < (unsigned long) ilen)
ctx->total[1]++;
if (left && ilen >= fill) {
//memcpy((void *) (ctx->buffer + left), (void *) input, fill);
unsigned char* toFill = ctx->buffer + left;
for (int i = 0; i < fill; i++) {
toFill[i] = input[i];
}
md5_process(ctx, ctx->buffer);
input += fill;
ilen -= fill;
left = 0;
}
while (ilen >= 64) {
md5_process(ctx, input);
input += 64;
ilen -= 64;
}
if (ilen > 0) {
//memcpy((void *) (ctx->buffer + left), (void *) input, ilen);
unsigned char* toFill = ctx->buffer + left;
for (int i = 0; i < ilen; i++) {
toFill[i] = input[i];
}
}
}
__device__
static const unsigned char md5_padding[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0 };
/*
* MD5 final digest
*/
__device__
void md5_finish(md5_context *ctx, unsigned char *output) {
unsigned long last, padn;
unsigned long high, low;
unsigned char msglen[8];
high = (ctx->total[0] >> 29) | (ctx->total[1] << 3);
low = (ctx->total[0] << 3);
PUT_UINT32_LE( low, msglen, 0 );
PUT_UINT32_LE( high, msglen, 4 );
last = ctx->total[0] & 0x3F;
padn = (last < 56) ? (56 - last) : (120 - last);
md5_update(ctx, (unsigned char *) md5_padding, padn);
md5_update(ctx, msglen, 8);
PUT_UINT32_LE( ctx->state[0], output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_LE( ctx->state[1], output, 4 );
PUT_UINT32_LE( ctx->state[2], output, 8 );
PUT_UINT32_LE( ctx->state[3], output, 12 );
#endif
}
/*
* Output = MD5( input buffer )
*/
__device__
void md5(unsigned char *input, int ilen, unsigned char *output) {
md5_context ctx;
md5_starts(&ctx);
md5_update(&ctx, input, ilen);
md5_finish(&ctx, output);
//TODO What for?
//memset(&ctx, 0, sizeof(md5_context));
}
|
13,839 | #include "includes.h"
__global__ void compute_array_log(float* array, float* groundTruth, int size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < size; i += num_threads)
{
int index = i + thread_index;
if(index < size)
{
groundTruth[index] = log(array[index]) * groundTruth[index];
}
}
} |
13,840 | #include <stdio.h>
__global__ void matrixAdd(float *A, float *B, float *C, int numElements)
{
int column = ( blockDim.x * blockIdx.x ) + threadIdx.x;
int row = ( blockDim.y * blockIdx.y ) + threadIdx.y;
int tid = ( blockDim.x * gridDim.x * row ) + column;
if (tid < numElements)
C[tid] = A[tid] + B[tid];
}
int main(int argc, char* argv[])
{
int matrixSize = 4096; // square matrix matrixSize * matrixSize
int numElements = matrixSize * matrixSize;
// Allocate host memory
float *h_A = (float *)malloc(numElements * sizeof(float));
float *h_B = (float *)malloc(numElements * sizeof(float));
float *h_C = (float *)malloc(numElements * sizeof(float));
// Initialize the host input matrixs
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input matrix A
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, numElements * sizeof(float));
cudaMalloc(&d_B, numElements * sizeof(float));
cudaMalloc(&d_C, numElements * sizeof(float));
// Copy the host input matrixs A and B in host memory to the device input matrixs in
cudaMemcpy(d_A, h_A, numElements * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, numElements * sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
// Option 1: 2D grid of 2D thread blocks 16x16 (OK)
{
int threadsPerBlockDim = 16;
int gridDimSize = (matrixSize + threadsPerBlockDim - 1) / threadsPerBlockDim;
dim3 blockSize(threadsPerBlockDim, threadsPerBlockDim);
dim3 gridSize (gridDimSize, gridDimSize);
printf("CUDA kernel launch with %dx%d blocks of %dx%d threads\n", gridDimSize, gridDimSize, threadsPerBlockDim, threadsPerBlockDim);
cudaEventRecord(start);
matrixAdd<<<gridSize, blockSize>>>(d_A, d_B, d_C, numElements);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU option 1 time to sum the matrixes %f ms\n", milliseconds);
// Copy the device result matrix in device memory to the host result matrix
cudaMemcpy(h_C, d_C, numElements * sizeof(float), cudaMemcpyDeviceToHost);
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
fprintf(stderr, "cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
// Verify that the result matrix is correct
for (int i = 0; i < numElements; i++)
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
printf("Sum of the matrixes was OK\n");
}
// Option 2: 1D grid of 1D thread blocks 1x256 (INEFFICIENT ON PURPOSE), multiple memory transactions!!
{
int threadsPerBlock = 256;
int gridDim = (numElements + threadsPerBlock - 1) / threadsPerBlock; // the dimensionality per grid dimension cannot be larger than 65536 for GPUs using CC 2.0
dim3 blocksize(1, threadsPerBlock);
printf("CUDA kernel launch with %d blocks of 1x%d threads\n", gridDim, threadsPerBlock);
cudaEventRecord(start);
matrixAdd<<<gridDim, blocksize>>>(d_A, d_B, d_C, numElements);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU option 2 time to sum the matrixes %f ms\n", milliseconds);
// Copy the device result matrix in device memory to the host result matrix
cudaMemcpy(h_C, d_C, numElements * sizeof(float), cudaMemcpyDeviceToHost);
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
fprintf(stderr, "cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
// Verify that the result matrix is correct
for (int i = 0; i < numElements; i++)
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
printf("Sum of the matrixes was OK\n");
}
// Option 3: 1D grid of 1D thread blocks (MOST EFFICIENT), smaller number of larger transactions
{
int threadsPerBlock = 256;
// the dimensionality per grid dimension cannot be larger than 65536 for GPUs using CC 2.0
int gridDim = (numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %dx1 threads\n", gridDim, threadsPerBlock);
cudaEventRecord(start);
matrixAdd<<<gridDim, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU option 3 time to sum the matrixes %f ms\n", milliseconds);
// Copy the device result matrix in device memory to the host result matrix
cudaMemcpy(h_C, d_C, numElements * sizeof(float), cudaMemcpyDeviceToHost);
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
fprintf(stderr, "cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
// Verify that the result matrix is correct
for (int i = 0; i < numElements; i++)
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
printf("Sum of the matrixes was OK\n");
}
// Compute CPU time
cudaEventRecord(start);
for (int i = 0; i < numElements; i++)
h_C[i] = h_A[i] + h_B[i];
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("CPU time to sum the matrixes %f ms\n", milliseconds);
// Free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
13,841 | #include "includes.h"
extern "C"
__global__ void reverseVec(int n, float *a, float *b)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
b[n-1-i] = a[i];
}
} |
13,842 | #include "includes.h"
__global__ void ScaleUp(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int x = blockIdx.x*SCALEUP_W + 2*tx;
int y = blockIdx.y*SCALEUP_H + 2*ty;
if (x<2*width && y<2*height) {
int xl = blockIdx.x*(SCALEUP_W/2) + tx;
int yu = blockIdx.y*(SCALEUP_H/2) + ty;
int xr = min(xl + 1, width - 1);
int yd = min(yu + 1, height - 1);
float vul = d_Data[yu*pitch + xl];
float vur = d_Data[yu*pitch + xr];
float vdl = d_Data[yd*pitch + xl];
float vdr = d_Data[yd*pitch + xr];
d_Result[(y + 0)*newpitch + x + 0] = vul;
d_Result[(y + 0)*newpitch + x + 1] = 0.50f*(vul + vur);
d_Result[(y + 1)*newpitch + x + 0] = 0.50f*(vul + vdl);
d_Result[(y + 1)*newpitch + x + 1] = 0.25f*(vul + vur + vdl + vdr);
}
} |
13,843 | #include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
typedef unsigned long long int UINT;
const int MAXTHREADSPERBLOCK = 512;
using namespace std;
__global__ void GPU(const int tilesize, const int paddsize, const int maxThreads,
int *dev_table, const int rowsize, const int maxlevel, int tileX, int lenY, int *dev_arr1, int *dev_arr2){
//This code has to ensure n2 size is the multiple of 128. And n2 is no smaller than n1, where n2 is row array size, n1 is colum array size
//on K40, tile size is max to 48K, which is 128*96; on pascal and volta, tile size is max to 64K which is 128*128
//For running on K40, we reserve the shared memory space for a 64*64 tile. Because of the dependency, the actual shared memory size is 96 * 96. 96 is picked for ensuring memory coalscing.
// __shared__ int table[9216];
int thread = threadIdx.x;
int curjobs = 0;
int curlevel = 1;
int startIdx, startx, starty;
int tableX = tileX + paddsize;
int x,y;
while (curlevel <= maxlevel){
if (curlevel <= lenY){
curjobs++;
// printf("curlevel: %d, lenY: %d, curjobs: %d, thread: %d\n", curlevel, lenY, curjobs, thread);
}
startx = paddsize + curlevel - 1;
starty = paddsize;
if (curlevel > tileX){
curjobs--;
startx = tableX -1;
starty = paddsize + curlevel - tileX;
// printf("curlevel: %d, curjobs: %d, thread: %d\n", curlevel, curjobs, thread);
}
if (thread < curjobs){
startx -= thread;
starty += thread;
startIdx = startx + starty * rowsize;
dev_table[startIdx] = max(dev_table[startIdx - 1], dev_table[startIdx - rowsize]);
x = startx - paddsize;
y = starty - paddsize;
if (dev_arr1[x] == dev_arr2[y])
dev_table[startIdx] = dev_table[startIdx - rowsize - 1] + 1;
}
curlevel++;
__syncthreads();
}
}
void checkGPUError(cudaError err){
if (cudaSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int LCS(int n1, int n2, int *arr1, int *arr2){
int lcslength;
int paddsize = 1;
int tileX = 256;
int tileY = 256;
int rowsize = paddsize + n2;
int colsize = paddsize + n1;
int *dev_table, *dev_arr1, *dev_arr2;
int *table;
table = new int[colsize * rowsize];
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
int tablesize = colsize * rowsize;
cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
cudaError err = cudaMalloc(&dev_table, tablesize * sizeof(int));
checkGPUError(err);
cudaMalloc(&dev_arr1, n1*sizeof(int));
cudaMalloc(&dev_arr2, n2*sizeof(int));
cudaMemset(dev_table, 0, tablesize * sizeof(int));
cudaMemcpy(dev_arr1, arr1, n1*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_arr2, arr2, n2*sizeof(int), cudaMemcpyHostToDevice);
int maxthreads;
int maxlevel;
int threadPerBlock = tileY + paddsize;
int blockPerGrid = (threadPerBlock + MAXTHREADSPERBLOCK -1)/MAXTHREADSPERBLOCK;
int numStream = 32;
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
int xseg = (n1+tileX-1) / tileX;
int yseg = (n2+tileY-1) / tileY;
int maxSegThreads = min(xseg, yseg); //max number of segs at either of the seg levels.
int maxSegLevel = xseg + yseg - 1;
int curSegLevel = 1;
// int curSegJobs = 1;
int startSegX, startSegY;
cudaStream_t stream[numStream];
for (int s=0; s<numStream; s++)
cudaStreamCreate(&stream[s]);
while(curSegLevel <= maxSegLevel){
int segIdx = 0;
if (curSegLevel <= xseg){
//curSegJobs = curSegLevel;
startSegX = curSegLevel - 1;
startSegY = 0;
}
else{
//startSegIdx = xseg - 1 + xseg * (curSegLevel - xseg);
//curSegJobs = 2 * xseg - curSegLevel;
startSegX = xseg - 1;
startSegY = curSegLevel - xseg;
}
// cout << "curSegLevel: " << curSegLevel << ", maxSegLevel: " << maxSegLevel << endl;
while ( startSegX >= 0 && startSegY <= yseg - 1){
//suppose n2 is the row size and the longer array
//int i = paddsize + startSegX * tileX;
//int j = paddsize + startSegY * tileY;
int i = startSegX * tileX;
int j = startSegY * tileY;
int startSegAdd = j * rowsize + i;
int s = segIdx % numStream;
//resY is used to determine the rest size of Y. This is used to check if the rest size of Y is smaller than tileY.
int resY = n1 - startSegY * tileY;
int lenY = min(resY, tileY);
maxlevel = tileX + lenY - 1;
maxthreads = min(tileX, lenY);
int tilesize = (tileX+paddsize) * (lenY+paddsize);
GPU<<<blockPerGrid, threadPerBlock, 0, stream[s]>>>(tilesize, paddsize, maxthreads, &dev_table[startSegAdd], rowsize,
maxlevel, tileX, lenY, &dev_arr1[i], &dev_arr2[j]);
// cout << "startSegX: " << startSegX << ", startSegY: " << startSegY << ", segIdx: " << segIdx << endl;
startSegX--;
startSegY++;
segIdx++;
}
//this synchronization is might removable
cudaDeviceSynchronize();
curSegLevel++;
}
cudaMemcpy(&lcslength, &dev_table[tablesize-1], sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(table, dev_table, (n1+paddsize)*rowsize*sizeof(int), cudaMemcpyDeviceToHost);
/*
//display table
cout << "full table: " << endl;
for (int i=0; i<n1+paddsize; i++){
for (int j=0; j<n2+paddsize; j++){
cout << table[i * rowsize + j] << " ";
}
cout << endl;
}
*/
for (int s=0; s<numStream; s++)
cudaStreamDestroy(stream[s]);
cudaFree(dev_arr1);
cudaFree(dev_arr2);
cudaFree(dev_table);
delete[] table;
return lcslength;
}
|
13,844 | #include "includes.h"
__global__ void addOffset(int *dev_array, int length)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < length)
{
dev_array[tid] += OFFSET;
}
} |
13,845 | #include "includes.h"
__global__ void compress_s_write_permutation(unsigned short *d_s_write_permutation, unsigned short *d_s_write_permutation_offset, int *d_write_permutation, int c_size, int chunk)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= c_size * chunk) {
return;
}
int chunk_id = i / chunk;
d_s_write_permutation[i] = (unsigned short)(d_write_permutation[i] % USHORT_MAX);
if (i % chunk == 0) {
d_s_write_permutation_offset[chunk_id] = (unsigned short)(d_write_permutation[i] / USHORT_MAX);
}
} |
13,846 | #include<stdio.h>
#include<iostream>
using namespace std;
/* a sum reduction on the array of floats 'in'.
* The reduction result is written to the
* address 'result'. The number of elements to
* be reduced is given by 'size'
*
* The example contains data races because barrier
* synchronisation statements, of the form:
* __syncthreads();
* are missing.
*
* Can you add them to eliminate all data races?
*/
#define N 4/* Same as blockDim */
__global__ void reduce(int *in) {
int tid = threadIdx.x;
for(int d = N/2; d > 0; d >>= 1) {
if(tid < d) {
in[tid] += in[tid + d];
}
__syncthreads();
}
/* Master thread writes out result */
}
|
13,847 | /**
* @file packGraph.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void packGraph_gpu(int * newSrc, int * oldSrc, int * newDst, int * oldDst, int * newWeight, int * oldWeight, int * edgeMap, int numEdges) {
// Get Thread ID
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
const int COMPARE_T_ID = curTID+1;
if(edgeMap[curTID] != edgeMap[COMPARE_T_ID]) {
newSrc[edgeMap[curTID]] = oldSrc[curTID];
newDst[edgeMap[curTID]] = oldDst[curTID];
newWeight[edgeMap[curTID]] = oldWeight[curTID];
}
// else {
// newSrc[edgeMap[curTID]] = -1;
// newDst[edgeMap[curTID]] = -1;
// newWeight[edgeMap[curTID]] = -1;
// }
}
}
|
13,848 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
__global__ void reduction(float *out, float *in, unsigned size)
{
/********************************************************************
Load a segment of the input vector into shared memory
Traverse the reduction tree
Write the computed sum to the output vector at the correct index
********************************************************************/
// INSERT KERNEL CODE HERE
__shared__ float outArr[2*BLOCK_SIZE];
int tx = threadIdx.x;
int i = (2* blockIdx.x * blockDim.x) + tx;
outArr[tx] = 0.0;
if(i < size){
outArr[tx] = in[i];
}
outArr[BLOCK_SIZE + tx] = 0.0;
if(i + BLOCK_SIZE < size)
outArr[BLOCK_SIZE + tx] = in[i + BLOCK_SIZE];
__syncthreads();
for (int offset = BLOCK_SIZE; offset > 0; offset >>= 1) {
if (tx < offset)
outArr[tx] += outArr[tx + offset];
__syncthreads();
}
if(tx == 0)
out[blockIdx.x] = outArr[0];
__syncthreads();
}
|
13,849 | /* Produced by CVXGEN, 2017-11-20 12:18:49 -0500. */
/* CVXGEN is Copyright (C) 2006-2017 Jacob Mattingley, jem@cvxgen.com. */
/* The code in this file is Copyright (C) 2006-2017 Jacob Mattingley. */
/* CVXGEN, or solvers produced by CVXGEN, cannot be used for commercial */
/* applications without prior written permission from Jacob Mattingley. */
/* Filename: matrix_support.c. */
/* Description: Support functions for matrix multiplication and vector filling. */
#include "solver.cuh"
CUDA_CALLABLE_MEMBER
void multbymA(double *lhs, double *rhs) {
lhs[0] = -rhs[0]*(1)-rhs[1]*(1)-rhs[2]*(1)-rhs[3]*(1)-rhs[4]*(1)-rhs[5]*(1)-rhs[6]*(1)-rhs[7]*(1)-rhs[8]*(1)-rhs[9]*(1)-rhs[10]*(1)-rhs[11]*(1)-rhs[12]*(1)-rhs[13]*(1)-rhs[14]*(1)-rhs[15]*(1)-rhs[16]*(1)-rhs[17]*(1)-rhs[18]*(1)-rhs[19]*(1);
}
CUDA_CALLABLE_MEMBER
void multbymAT(double *lhs, double *rhs) {
for (int i = 0; i < 20; ++i)
lhs[i] = -rhs[0]*(1);
}
CUDA_CALLABLE_MEMBER
void multbymG(double *lhs, double *rhs) {
for (int i = 0; i < 20; ++i) {
lhs[i] = -rhs[i];
lhs[i + 20] = rhs[i];
}
}
CUDA_CALLABLE_MEMBER
void multbymGT(double *lhs, double *rhs) {
for (int i = 0; i < 20; ++i)
lhs[i] = -rhs[i] + rhs[i + 20];
}
CUDA_CALLABLE_MEMBER
void multbyP(double *lhs, double *rhs, Params& params) {
for (int i = 0; i < 20; ++i) {
lhs[i] = 0;
for (int j = 0; j < 20; ++j)
lhs[i] += rhs[j] * 2 * params.Sigma[j * 20 + i];
}
}
CUDA_CALLABLE_MEMBER
void fillq(Workspace& work, Params& params) {
for (int i = 0; i < 20; ++i)
work.q[i] = -params.lambda[i] * params.Returns[i];
}
CUDA_CALLABLE_MEMBER
void fillh(Workspace& work) {
for (int i = 0; i < 20; ++i) {
work.h[i] = 0.2;
work.h[i + 20] = -0.01;
}
}
CUDA_CALLABLE_MEMBER
void fillb(Workspace& work) {
work.b[0] = 1;
}
CUDA_CALLABLE_MEMBER
void pre_ops(void) {
}
|
13,850 | #include <stdio.h>
__global__ void helloFromGPU(void)
{
if (threadIdx.x == 5)
{
printf("Hello World from GPU thread %d!\n", threadIdx.x);
}
}
int main(void)
{
printf("Hello World from CPU!\n");
helloFromGPU<<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
13,851 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void forwardPathKernel(int noPaths, int nYears, int noSpecies, int noPatches, int noControls, int noUncertainties, float timeStep, float* initPops, float* pops, float*mmm, int* rowIdx, int* elemsPerCol, int maxElems, float* speciesParams, float* caps, float* aars, float* uncertParams, int* controls, float* uJumps, float* uBrownian, float* uJumpSizes, float* uJumpsSpecies, float* uBrownianSpecies, float* uJumpSizesSpecies, float* rgr, float* uResults, float* totalPops) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// Only perform matrix multiplication sequentially for now. Later, if so
// desired, we can use dynamic parallelism because the card in the
// machine has CUDA compute compatability 3.5
if (idx < noPaths) {
// Initialise the population data at time t=0
for (int ii = 0; ii < noSpecies; ii++) {
float population = 0;
for (int jj = 0; jj < noPatches; jj++) {
pops[idx*(nYears+1)*noSpecies*noPatches + ii*noPatches + jj] =
initPops[jj];
population += pops[idx*(nYears+1)*noSpecies*noPatches + ii*
noPatches + jj];
}
totalPops[idx*(nYears+1)*noSpecies + ii] = population;
// The aars are computed in the next for loop.
}
// Carry over the initial value for all uncertainties
for (int ii = 0; ii < noUncertainties; ii++) {
uResults[idx*noUncertainties*(nYears+1) + ii] = uncertParams[ii*6];
}
float* grMean;
grMean = (float*)malloc(noSpecies*sizeof(float));
for (int ii = 0; ii < noSpecies; ii++) {
grMean[ii] = speciesParams[ii*8];
}
// All future time periods
for (int ii = 0; ii < nYears; ii++) {
// Control to pick
int control = controls[idx*nYears + ii];
for (int jj = 0; jj < noSpecies; jj++) {
totalPops[idx*(nYears+1)*noSpecies + (ii+1)*noSpecies + jj] =
0;
// Adjust the global growth rate mean for this species at this
// time step for this path.
float jump = (uJumpsSpecies[idx*noSpecies*nYears +
ii*noSpecies + jj] < speciesParams[jj*8 + 5]) ?
1.0f : 0.0f;
float meanP = speciesParams[jj*8 + 1];
float reversion = speciesParams[jj*8 + 4];
float brownian = uBrownianSpecies[idx*noSpecies*nYears +
ii*noSpecies + jj]*speciesParams[jj*8 + 2];
float jumpSize = uJumpSizesSpecies[idx*noSpecies*nYears
+ ii*noSpecies + jj]*pow(speciesParams[
jj*8 + 5],2) - pow(speciesParams[jj*8 + 5],2)/2;
grMean[jj] = grMean[jj] + reversion*(meanP - grMean[jj])*
timeStep + grMean[jj]*brownian + (exp(jumpSize) - 1)*
grMean[jj]*jump;
// Initialise temporary populations
float initialPopulation = 0.0f;
for (int kk = 0; kk < noPatches; kk++) {
initialPopulation += pops[idx*(nYears+1)*noSpecies*
noPatches + ii*noSpecies*noPatches + jj*noPatches
+ kk];
}
// For each patch, update the population for the next time
// period by using the movement and mortality matrix for the
// correct species/control combination. We use registers due
// to their considerably lower latency over global memory.
for (int kk = 0; kk < noControls; kk++) {
// Overall population at this time period
float totalPop = 0.0f;
int iterator = 0;
for (int ll = 0; ll < noPatches; ll++) {
// Population for this patch
float population = 0.0f;
// Transfer animals from each destination patch to
// this one for the next period.
for (int mm = 0; mm < elemsPerCol[(jj*noControls + kk)*
noPatches + ll]; mm++) {
float value = pops[idx*(nYears+1)*noSpecies*
noPatches + ii*noSpecies*noPatches + jj*
noPatches + rowIdx[iterator + (jj*
noControls + kk)*maxElems]]*mmm[iterator +
(jj*noControls + kk)*maxElems];
population += value;
iterator++;
}
totalPop += population;
// We only update the actual populations if we are in
// the control that was selected. Save the total
// population for the start of the next time period.
if (kk == control && ii < nYears) {
// Population growth based on a mean-reverting process
rgr[idx*noSpecies*noPatches*nYears + ii*noSpecies*
noPatches + jj*noPatches + ll] = grMean[jj]
+ rgr[idx*noSpecies*noPatches*nYears + ii*
noSpecies*noPatches + jj*noPatches + ll]*
speciesParams[jj*8 + 7];
float gr = rgr[idx*noSpecies*noPatches*nYears + ii*
noSpecies*noPatches + jj*noPatches + ll];
pops[idx*(nYears+1)*noSpecies*noPatches + (ii+1)*
noSpecies*noPatches + jj*noPatches + ll] =
population*(1.0f + gr*(caps[jj*noPatches +
ll] - population)/caps[jj*noPatches + ll]/
100.0);
totalPops[idx*noSpecies*(nYears+1) + (ii+1)*
noSpecies + jj] += pops[idx*(nYears+1)*
noSpecies*noPatches + (ii+1)*noSpecies*
noPatches + jj*noPatches + ll];
}
}
// Save AAR for this control at this time
aars[idx*(nYears+1)*noControls*noSpecies + ii*noControls*
noSpecies + jj*noControls + kk] = totalPop/
initialPopulation;
}
}
// Other uncertainties
for (int jj = 0; jj < noUncertainties; jj++) {
float jump = (uJumps[idx*noUncertainties*nYears +
ii*noUncertainties + jj] < uncertParams[jj*6 + 5]) ?
1.0f : 0.0f;
float curr = uResults[idx*noUncertainties*(nYears+1) +
ii*noUncertainties + jj];
float meanP = uncertParams[jj*6 + 1];
float reversion = uncertParams[jj*6 + 3];
float brownian = uBrownian[idx*noUncertainties*nYears +
ii*noUncertainties + jj]*uncertParams[jj*6 + 2];
float jumpSize = uJumpSizes[idx*noUncertainties*nYears +
ii*noUncertainties + jj]*pow(uncertParams[jj*6 + 4],2)
- pow(uncertParams[jj*6 + 4],2)/2;
// Save the value of the uncertainty for the next time period
uResults[idx*noUncertainties*(nYears+1)+(ii+1)*noUncertainties+jj]
= curr + reversion*(meanP - curr)*timeStep +
curr*brownian + (exp(jumpSize) - 1)*curr*jump;
}
}
free(grMean);
}
} |
13,852 | #include<stdio.h>
__global__ void parallel_vector_add(int* d_a, int* d_b, int* d_c, int* d_n)
{
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
if(i < *d_n){
printf("i am thread #%d and about to compute c[%d]\n", i, i);
d_c[i] = d_a[i] + d_b[i];
}
else{
printf("i am thread #%d I am doing nothing\n", i);
}
}
int main(){
int n;
scanf("%d",&n);
//declare input and output on host
int h_a[n];
int h_b[n];
for(int i = 0; i < n; i++){
h_a[i] = i;
h_b[i] = n-i;
}
int h_c[n];
//part1 copy data from host to device
int *d_a, *d_b, *d_c, *d_n;
cudaMalloc((void **) &d_a, n*sizeof(int));
cudaMalloc((void **) &d_b, n*sizeof(int));
cudaMalloc((void **) &d_c, n*sizeof(int));
cudaMalloc((void **) &d_n, sizeof(int));
cudaMemcpy(d_a, &h_a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
//part2 kernel launch
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
parallel_vector_add<<<(n+15)/16, 512>>>(d_a, d_b, d_c, d_n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float millisec = 0;
cudaEventElapsedTime(&millisec, start, stop);
//part3 copy data from device back to host and free all data allocate on device
cudaMemcpy(&h_c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for(int i = 0; i < n; i++)
printf("%d ", h_c[i]);
printf("\n");
printf("Effective time(ms): %f \n",millisec);
}
|
13,853 | // Exhibits a data race (RW, WW) in global memory.
// Gklee and Gkleep both detect.
#include <iostream>
#define N 50
#define T 128
#define B 2
__global__ void colonel(int* in)
{
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
in[tidx%N]++;
}
int main()
{
int* in = (int*) calloc(N,sizeof(int));
int* din;
cudaMalloc((void**)&din,N*sizeof(int));
cudaMemcpy(din, in, N*sizeof(int), cudaMemcpyHostToDevice);
colonel<<<B,T>>>(din);
cudaMemcpy(in, din, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
std::cout << in[i] << " ";
std::cout << std::endl;
free(in); cudaFree(din);
} |
13,854 | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <stdint.h>
#include <stdlib.h>
__global__ void symm_unit_clip_fwd_f32_kernel(
uint32_t dim,
const float *clip,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float a = c / max(fabs(c), 1.0f);*/
float a = clip[0];
float x_i = x[idx];
y[idx] = x_i * ((x_i > 0.0f) + a * (x_i < 0.0f));
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_fwd_f32(
size_t dim,
const float *clip,
const float *x,
float *y,
cudaStream_t stream)
{
symm_unit_clip_fwd_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, clip, x, y);
}
__global__ void symm_unit_clip_param_bwd_f32_atomic_naive_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float u = max(fabs(c), 1.0f);
float du = 1.0f * (c > 1.0f) - 1.0f * (c < -1.0f);
float x_i = x[idx];
atomicAdd(&grad[0], (1.0f / u) * (1.0f - du * c / u) * dy[idx] * x_i * (x_i < 0.0f));*/
float x_i = x[idx];
atomicAdd(&grad[0], dy[idx] * x_i * (x_i < 0.0f));
}
}
__global__ void symm_unit_clip_param_bwd_f32_atomic_fast_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad)
{
__shared__ float cache[1024];
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float u = max(fabs(c), 1.0f);
float du = 1.0f * (c > 1.0f) - 1.0f * (c < -1.0f);
float x_i = x[idx];
cache[threadIdx.x] = (1.0f / u) * (1.0f - du * c / u) * dy[idx] * x_i * (x_i < 0.0f);*/
float x_i = x[idx];
cache[threadIdx.x] = dy[idx] * x_i * (x_i < 0.0f);
} else {
cache[threadIdx.x] = 0.0f;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (idx < dim) {
if (threadIdx.x == 0) {
atomicAdd(&grad[0], cache[0]);
}
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_param_bwd_nondeterministic_f32(
size_t dim,
const float *clip,
const float *x,
const float *dy,
float *grad,
cudaStream_t stream)
{
symm_unit_clip_param_bwd_f32_atomic_fast_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, clip, x, dy, grad);
}
__global__ void symm_unit_clip_input_bwd_f32_kernel(
uint32_t dim,
const float *clip,
const float *x,
const float *dy,
float *dx)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
/*float c = clip[0];
float a = c / max(fabs(c), 1.0f);*/
float a = clip[0];
float x_i = x[idx];
dx[idx] += dy[idx] * ((x_i > 0.0f) + a * (x_i < 0.0f));
}
}
extern "C" void arraydiff_cuda_kernel_symm_unit_clip_input_bwd_f32(
size_t dim,
const float *clip,
const float *x,
const float *dy,
float *dx,
cudaStream_t stream)
{
symm_unit_clip_input_bwd_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, clip, x, dy, dx);
}
|
13,855 | /*
* cudaQueueTemplates.cu
*
* Created on: Jun 17, 2015
* Author: fpantale
*/
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
#include <algorithm>
// CUDAQueue is a single-block queue.
// One may want to use it as a __shared__ struct, and have multiple threads
// pushing data into it.
template< int maxSize, class T>
struct CUDAQueue
{
__inline__ __device__
void push(const T& element) { auto oldvalue = atomicAdd (&size, 1); data[oldvalue] = element; };
T data[maxSize];
int size;
};
__global__ void Find3(int* a, int* results, int* N)
{
__shared__ CUDAQueue<1024, int> queue ;
int index = threadIdx.x;
if (threadIdx.x ==0 )
queue.size= 0;
__syncthreads();
if(a[index] ==3)
queue.push(index);
__syncthreads();
if(threadIdx.x < queue.size)
results[index] = queue.data[index];
if (threadIdx.x ==0 )
*N = queue.size;
}
int main ()
{
int* a;
int* d_a, *d_results;
int* d_numberOfThrees;
int N = 1024;
a= (int*)malloc(N*sizeof(int));
cudaMalloc((void**)&d_a, N*sizeof(int));
cudaMalloc((void**)&d_results, N*sizeof(int));
cudaMalloc((void**)&d_numberOfThrees, sizeof(int));
std::vector<int> idOf3;
for (int i =0; i< N; ++i)
{
a[i] = i % 4;
if(a[i] == 3)
idOf3.push_back(i);
}
cudaMemcpy(d_a, a, sizeof(int)*N, cudaMemcpyHostToDevice);
Find3<<<1, 1024>>> (d_a, d_results, d_numberOfThrees);
int results[1024];
int numberOfThrees;
cudaMemcpy(results, d_results, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(&numberOfThrees, d_numberOfThrees, sizeof(int), cudaMemcpyDeviceToHost);
std::sort(idOf3.begin(), idOf3.end());
std::vector<int> idOf3_gpu(numberOfThrees);
for(int i =0; i< numberOfThrees; ++i)
{
idOf3_gpu[i] = results[i];
}
std::sort(idOf3_gpu.begin(), idOf3_gpu.end());
for(int i =0; i< numberOfThrees; ++i)
{
std::cout << idOf3_gpu[i] << "\t" << idOf3[i] << std::endl;
}
cudaFree(d_a);
cudaFree(d_results);
cudaFree(d_numberOfThrees);
free(a);
}
|
13,856 | #include "includes.h"
#define tam 1.0
#define dx 0.00001
#define dt 0.000001
#define T 0.01
#define kappa 0.000045
__global__ void Atualiza( double *u, double *u_prev, const int n ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx == 0 ) {
u[ 0 ] = u[ n ] = 0.; /* forca condicao de contorno */
}
else if( idx < n ) {
u[ idx ] = u_prev[ idx ] + kappa * dt / ( dx * dx ) * ( u_prev[ idx - 1 ] - 2 * u_prev[ idx ] + u_prev[ idx + 1 ] );
}
} |
13,857 | #include "stdio.h"
#include <cuda_runtime.h>
bool InitCuda(void)
{
// Get cuda device count
int iCount;
cudaGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
cudaSetDevice(i);
return true;
}
int main(int argv, char* argc[])
{
if(!InitCuda())
{
printf("CUDA initialized failed!\n");
return 0;
}
printf("CUDA initialized success!\n");
}
|
13,858 | #include<iostream>
#include<math.h>
#include<stdint.h>
#include<stdlib.h>
#define N 16
#define M 16
__device__
void convolve(uint8_t input[N][M],
int* numer,
int* denom,
int* kernel,
int i,
int j)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int kpos = tx + ty * 5;
int x = i + tx - 2;
int y = j + ty - 2;
if (x>=0 && y>=0 && x<N && y<M)
{
int weightedVal = kernel[kpos] * int(input[x][y]);
int kVal = kernel[kpos];
atomicAdd(numer, weightedVal);
atomicAdd(denom, kVal);
}
}
__global__
void gauss(uint8_t input[N][M], uint8_t output[N][M], int* kernel)
{
int j = blockIdx.y;
int i = blockIdx.x;
__shared__ int numer;
__shared__ int denom;
numer = 0;
denom = 0;
__syncthreads();
convolve(input, &numer, &denom, kernel, i, j);
if(threadIdx.x==0 && threadIdx.y==0)
{
output[i][j] = uint8_t((numer) / (denom));
}
}
void print(uint8_t image[N][M])
{
for (int i=0; i<N; i++)
{
for (int j=0; j<M; j++)
{
std::cout<< int(image[i][j]) << ",\t";
}
std::cout<< "\n";
}
}
int main()
{
srand(NULL);
uint8_t *image, *blur;
cudaMallocManaged(&image, N*M*sizeof(uint8_t));
cudaMallocManaged(&blur, N*M*sizeof(uint8_t));
for (int i = 0; i<N; i++)
for (int j = 0; j<M; j++)
reinterpret_cast<uint8_t (*)[M]>(image)[i][j] = rand()% 256;
int* kernel;
cudaMallocManaged(&kernel, sizeof(int) * 25);
int dummy[25] = { 1, 4, 7, 4, 1,
4,16,26,16, 4,
7,26,41,26, 7,
4,16,26,16, 4,
1, 4, 7, 4, 1 };
for (int i=0; i<25; i++)
kernel[i] = dummy[i];
dim3 blockSize(5, 5);
dim3 gridSize(N, M);
print(reinterpret_cast<uint8_t (*)[M]>(image));
gauss<<<gridSize, blockSize>>>(reinterpret_cast<uint8_t (*)[M]>(image),
reinterpret_cast<uint8_t (*)[M]>(blur),
kernel);
cudaDeviceSynchronize();
std::cout<<"\n";
print(reinterpret_cast<uint8_t (*)[M]>(blur));
cudaFree(image);
cudaFree(blur);
cudaFree(kernel);
return 0;
}
|
13,859 | /*
*See if we can get a GPU to throw cudaErrorMemoryAllocation
*/
#include <iostream>
#include <cstdio>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool
abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void) {
float* ptr = NULL;
size_t size = pow(10, 9) * sizeof(float);
gpuErrchk( cudaMalloc((void**)&ptr, size) );
printf("Successfully allocated %zu bytes.\n", size);
cudaFree(ptr);
/*Matrix mat = AllocateMatrix(n, k, 1);*/
/*printMatrix(mat);*/
/*FreeMatrix(&mat);*/
/*cv::Mat image = cv::imread( "outputImages/result.jpg", 1 );*/
/*printf("size = (%i, %i)\n", image.rows, image.cols);*/
return 0;
}
|
13,860 | #include <iostream>
#include <math.h>
int main(int argc, char *argv[])
{
if (argc > 1)
{
std::cout << argv[1] << " " << argv[2];
}
else
{
std::cout << "Nothing entered.";
}
return 0;
} |
13,861 | //errorcheck_wcheck.cu: The program is designed to produce output
//'data = 7'. However, errors have been intentionally placed into
//the program as an exercise in error checking.
#include <stdio.h>
#include <stdlib.h>
__global__ void setData(int *ptr)
{
*ptr = 7;
}
int main(void)
{
int *data_d = 0;
int *data_h = 0;
cudaError_t error;
error = cudaMalloc((void**)&data_d, UINT_MAX*sizeof(int));
if( error != cudaSuccess)
{
printf("cudaMalloc error: %s\n", cudaGetErrorString(error));
}
data_h = (int *)malloc(sizeof(int));
setData<<<1,1>>>(0);
cudaThreadSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("setData error: %s\n", cudaGetErrorString(error));
}
error = cudaMemcpy(data_h, data_d, sizeof(int), cudaMemcpyDeviceToHost);
if(error != cudaSuccess)
{
printf("cudaMemcpy error: %s\n", cudaGetErrorString(error));
}
printf("data = %d\n", *data_h);
free(data_h);
cudaFree(data_d);
return 0;
}
|
13,862 | struct UserUpdate
{
__device__ UserUpdate()
{
}
int update_id;
int user_id;
int section;
float tag;
float timestamp;
__device__ UserUpdate(int id, int uid, int sec, float t, float time)
{
update_id = id;
user_id = uid;
section = sec;
tag = t;
timestamp = time;
}
};
struct FitnessParameter
{
__device__ FitnessParameter()
{
}
float var1;
float var2;
float var3;
float var4;
float var5;
__device__ FitnessParameter(float variable1, float variable2, float variable3, float variable4, float variable5)
{
var1 = variable1;
var2 = variable2;
var3 = variable3;
var4 = variable4;
var5 = variable5;
}
__device__ FitnessParameter( float* variables, int variablesLen0)
{
var1 = variables[(0)];
var2 = variables[(1)];
var3 = variables[(2)];
var4 = variables[(3)];
var5 = variables[(4)];
}
};
struct PredictionPerformances
{
__device__ PredictionPerformances()
{
}
double occupancyPerformance;
double occupancyPerformanceRandom;
double trustPerformance;
double trustPerformanceRandom;
};
struct SimOptions
{
__device__ SimOptions()
{
}
float I;
float lambda_promote;
float lambda_punish;
float certainty_coeff;
float score_coeff;
float decay;
__device__ SimOptions(float val_I, float val_lambda_promote, float val_lambda_punish, float val_certainty_coeff, float val_score_coeff, float val_decay)
{
I = val_I;
lambda_promote = val_lambda_promote;
lambda_punish = val_lambda_punish;
certainty_coeff = val_certainty_coeff;
score_coeff = val_score_coeff;
decay = val_decay;
}
};
struct FitnessData
{
__device__ FitnessData()
{
}
float* GroundTruth; int GroundTruthLen0; int GroundTruthLen1;
float* UserTrusts; int UserTrustsLen0;
UserUpdate* Updates; int UpdatesLen0; int UpdatesLen1; int UpdatesLen2;
int nSections;
__device__ FitnessData( float* _groundTruth, int _groundTruthLen0, int _groundTruthLen1, float* _userTrusts, int _userTrustsLen0, UserUpdate* _updates, int _updatesLen0, int _updatesLen1, int _updatesLen2, int _nSections)
{
GroundTruth = _groundTruth;
UserTrusts = _userTrusts;
Updates = _updates;
nSections = _nSections;
}
};
// GeneticAlgorithm.Population
extern "C" __global__ void calculateFitnessOnDevice( float* dev_fitnesses, int dev_fitnessesLen0, float* groundTruth, int groundTruthLen0, int groundTruthLen1, float* userTrusts, int userTrustsLen0, UserUpdate* updates, int updatesLen0, int updatesLen1, int updatesLen2, FitnessParameter* dev_fitnessParams, int dev_fitnessParamsLen0);
// GeneticAlgorithm.Fitness
__device__ float fitness( float* groundTruth, int groundTruthLen0, int groundTruthLen1, float* userTrusts, int userTrustsLen0, UserUpdate* updates, int updatesLen0, int updatesLen1, int updatesLen2, FitnessParameter fitnessParams);
// GeneticAlgorithm.Experiment
__device__ float execute( float* groundTruth, int groundTruthLen0, int groundTruthLen1, float* userTrusts, int userTrustsLen0, UserUpdate* updates, int updatesLen0, int updatesLen1, int updatesLen2, FitnessParameter fitnessParam);
// GeneticAlgorithm.Population
extern "C" __global__ void calculateFitnessOnDevice( float* dev_fitnesses, int dev_fitnessesLen0, float* groundTruth, int groundTruthLen0, int groundTruthLen1, float* userTrusts, int userTrustsLen0, UserUpdate* updates, int updatesLen0, int updatesLen1, int updatesLen2, FitnessParameter* dev_fitnessParams, int dev_fitnessParamsLen0)
{
int num = blockIdx.x * blockDim.x + threadIdx.x;
if (num < dev_fitnessesLen0)
{
dev_fitnesses[(num)] = fitness(groundTruth, groundTruthLen0, groundTruthLen1, userTrusts, userTrustsLen0, updates, updatesLen0, updatesLen1, updatesLen2, dev_fitnessParams[(num)]);
}
}
// GeneticAlgorithm.Fitness
__device__ float fitness( float* groundTruth, int groundTruthLen0, int groundTruthLen1, float* userTrusts, int userTrustsLen0, UserUpdate* updates, int updatesLen0, int updatesLen1, int updatesLen2, FitnessParameter fitnessParams)
{
return execute(groundTruth, groundTruthLen0, groundTruthLen1, userTrusts, userTrustsLen0, updates, updatesLen0, updatesLen1, updatesLen2, fitnessParams);
}
// GeneticAlgorithm.Experiment
__device__ float execute( float* groundTruth, int groundTruthLen0, int groundTruthLen1, float* userTrusts, int userTrustsLen0, UserUpdate* updates, int updatesLen0, int updatesLen1, int updatesLen2, FitnessParameter fitnessParam)
{
return -1.1f;
}
|
13,863 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
__global__ void reverseWord(char *a , char *b , int size)
{
int id = threadIdx.x;
b[size-id] = a[id];
}
int main()
{
int size;
char *a = (char*)malloc(sizeof(char)*(30));
printf("Enter the string \n");
scanf("%[^\n]%*c", a);
char *b = (char*)malloc(sizeof(char)*(30));
char *d_a , *d_b;
printf("Input String = %s \n",a);
size = strlen(a);
int size1 = sizeof(char)*(size+1);
int size2 = sizeof(char)*(size+1);
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_b,size2);
cudaMemcpy(d_a,a,sizeof(char)*(size+1),cudaMemcpyHostToDevice);
reverseWord<<<1,size>>>(d_a,d_b,size-1);
cudaMemcpy(b,d_b,size2,cudaMemcpyDeviceToHost);
printf("Output string = %s \n",b);
cudaFree(d_a);
cudaFree(d_b);
} |
13,864 | #include "includes.h"
#ifndef __CUDACC__
#define __CUDACC__
#endif
// generate a random square matrix
__global__ void matMulKernel20(float* P, float* M, float* N, int width) {
__shared__ float Mds20[20][20];
__shared__ float Nds20[20][20];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*20 + ty; int col = bx*20 + tx;
float pVal = 0;
for(int ph = 0; ph < width/20; ++ph) {
Mds20[ty][tx] = M[row*width + ph*20 + tx];
Nds20[ty][tx] = N[(ph*20 + ty)*width + col];
__syncthreads();
for(int k = 0; k < 20; ++k)
pVal += Mds20[ty][k]*Nds20[k][tx];
__syncthreads();
}
P[row*width + col] = pVal;
} |
13,865 | /**
* Demo code of Cuda programming lecture
*
* This programme illustrates the benefit of using shared memory
*
*
*/
#include <cstdio>
#include <cstdlib>
#include <sys/time.h>
//Kernel function that does not use shared memory, each memory read is from global memory
__global__ void compute_no_shared_memory(int *data)
{
int tid = threadIdx.x;
int* base = data + blockIdx.x * blockDim.x;
int tmp = 0;
//Do some computation
for (int i = 0; i < tid; i++)
tmp += base[i];
//Make sure all threads in a block have completed computation
//__syncthreads();
base[tid] = tmp + base[tid];
}
//Kernel function that utilizes shared memory
__global__ void compute_use_shared_memory(int *data)
{
int tid = threadIdx.x;
int* base = data + blockIdx.x * blockDim.x;
int tmp = 0;
__shared__ int myblock[1024];
// load data from global memory to shared memory
myblock[tid] = base[tid];
// ensure that all threads have loaded their values into
// shared memory; Otherwise, one thread might be computing
// on unitialized data.
__syncthreads();
//Do some computation
for (int i = 0; i < tid; i++)
tmp += myblock[i];
// write the result back to global memory
base[tid] = tmp + myblock[tid];
}
int main()
{
//Host and device pointers
int * h_data, *d_data;
int N = 33554432;
int data_size = N * sizeof(int);
//Kernel configuration parameters
int threads_per_block = 1024;
int blocks_per_grid = N / threads_per_block;
//For time measurement
timeval start, end;
float elapsed_time_use_shared_m;
float elapsed_time_no_shared_m;
//Host memory allocation
h_data = (int*)malloc(data_size);
//Device memory allocation
cudaMalloc((void**)&d_data, data_size);
//Initialization
for (int i = 0; i < N; i++)
h_data[i] = i;
//Memory copy from the host to the device
cudaMemcpy(d_data, h_data, data_size, cudaMemcpyHostToDevice);
//Start timer
gettimeofday(&start, NULL);
//Invoke the kernel that utilize shared memory
compute_use_shared_memory<<<blocks_per_grid, threads_per_block>>>(d_data);
//Wait for kernel execution
cudaDeviceSynchronize();
//End timer
gettimeofday(&end, NULL);
//Calculate elapsed time
elapsed_time_use_shared_m = 1000*(end.tv_sec-start.tv_sec) + (float)(end.tv_usec - start.tv_usec)/1000;
//Copy data to device memory
cudaMemcpy(d_data, h_data, data_size, cudaMemcpyHostToDevice);
//Start timer
gettimeofday(&start, NULL);
//Invoke the kernel that does not use shared memory
compute_no_shared_memory<<<blocks_per_grid, threads_per_block>>>(d_data);
//Wait for kernel execution
cudaDeviceSynchronize();
//End timer
gettimeofday(&end, NULL);
//Calculate time
elapsed_time_no_shared_m = 1000*(end.tv_sec-start.tv_sec) + (float)(end.tv_usec - start.tv_usec)/1000;
printf("elapsed time of kernel funtion that uses shared memory: %.2f ms\n", elapsed_time_use_shared_m);
printf("elapsed time of kernel funtion that does not use shared memory: %.2f ms\n", elapsed_time_no_shared_m);
//Free device and host memory
free(h_data);
cudaFree(d_data);
return 0;
}
|
13,866 | #include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef struct MapFileList {
char* filename;
struct MapFileList* next;
}MapFileList;
typedef struct MapInputList{
char* key;
char* value;
struct MapInputList* next;
}MapInputList;
typedef enum InputFormat{TextInputFormat,KeyValueInputFormat,SequenceFileInputFormat} input_format;
typedef struct MapReduceSpec{
MapFileList* map_file_list;
MapInputList* map_input_list;
unsigned map_block_num;
unsigned map_thread_num;
input_format map_input_format;
}MapReduceSpec;
void init_map_file_list(MapFileList* list){
list->filename=NULL;
list->next=NULL;
}
void free_map_file_list(MapFileList* list){
MapFileList* del;
MapFileList* tmp;
del=list;
tmp=list->next;
while(tmp){
if(del->filename!=NULL)
free(del->filename);
free(del);
del=tmp;
tmp=tmp->next;
}
if(del->filename!=NULL)
free(del->filename);
free(del);
}
void init_map_input_list(MapInputList* list){
list->key=NULL;
list->value=NULL;
list->next=NULL;
}
void free_map_input_list(MapInputList* list){
MapInputList* del;
MapInputList* tmp;
del=list;
tmp=list->next;
while(tmp){
if(del->key!=NULL)
free(del->key);
if(del->value!=NULL)
free(del->value);
free(del);
del=tmp;
tmp=tmp->next;
}
if(del->key!=NULL)
free(del->key);
if(del->value!=NULL)
free(del->value);
free(del);
}
void init_mapreduce_spec(MapReduceSpec* spec){
spec->map_file_list=NULL;
spec->map_input_list=NULL;
spec->map_block_num=4;
spec->map_thread_num=128;
spec->map_input_format=TextInputFormat;
}
void free_spec(MapReduceSpec* spec){
free_map_file_list(spec->map_file_list);
free_map_input_list(spec->map_input_list);
free(spec);
}
void map_input_split(MapReduceSpec* spec){
MapFileList* file_list_entry;
size_t buffer_size=(size_t)256*1024*1024;
size_t buffer_used=0;
MapInputList* input_list_entry;
FILE* pFile;
file_list_entry=spec->map_file_list;
input_list_entry=(MapInputList*)malloc(sizeof(MapInputList));
init_map_input_list(input_list_entry);
spec->map_input_list=input_list_entry;
if(spec->map_input_format==TextInputFormat){
size_t file_size;
while(file_list_entry->filename!=NULL){
pFile=fopen(file_list_entry->filename,"rb");
if (pFile==NULL) {fputs ("File error\n",stderr); exit (1);}
fseek (pFile , 0 , SEEK_END);
file_size = ftell (pFile);
rewind (pFile);
if(buffer_used+file_size<=buffer_size){
ssize_t result=0;
size_t len = 0;
while (result!= -1) {
input_list_entry->key=(char*)malloc(10);
sprintf(input_list_entry->key,"%d",(int)ftell(pFile));
printf("%s ",input_list_entry->key);
result=getline(&(input_list_entry->value), &len, pFile);
printf("%s \n",input_list_entry->value);
input_list_entry->next=(MapInputList*)malloc(sizeof(MapInputList));
input_list_entry=input_list_entry->next;
init_map_input_list(input_list_entry);
}
buffer_used=buffer_used+file_size;
}
else
printf("Buffer full!!\n");
file_list_entry=file_list_entry->next;
fclose(pFile);
}
}
}
void add_input(char *path,MapReduceSpec* spec){
MapFileList* plist;
plist=(MapFileList*)malloc(sizeof(MapFileList));
spec->map_file_list=plist;
struct dirent* entry = NULL;
DIR *pDir;
pDir=opendir(path);
while((entry=readdir(pDir))!=NULL){
if(entry->d_type==DT_REG){
plist->filename=(char*)malloc(strlen(path)+strlen(entry->d_name)+1);
strcpy(plist->filename,path);
strcat(plist->filename,entry->d_name);
plist->next=(MapFileList*)malloc(sizeof(MapFileList));
plist=plist->next;
}
}
map_input_split(spec);
}
int main(int argc, char **argv){
MapReduceSpec* spec=(MapReduceSpec*)malloc(sizeof(MapReduceSpec));
add_input(argv[1],spec);
free(spec);
}
|
13,867 | /*
* Copyright (C) 2010, Florian Kummer, Technische Universitaet Darmstadt, Fachgebiet fuer Stroemungsmechanik
*
* Use, modification and distribution is subject to the Boost Software
* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Authors: Christoph Busold
*
*/
extern "C" __global__ void scale(double* vector, double alpha, unsigned int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size) {
vector[idx] *= alpha;
}
}
extern "C" __global__ void acc(double* x, double* y, double alpha, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size) {
x[idx] += y[idx] * alpha;
}
}
extern "C" __global__ void dnrm2(double* vector, double* result, int size) {
extern __shared__ double sdata[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
double value;
sdata[tid] = 0.0;
if(idx < size) {
value = vector[idx];
sdata[tid] += value * value;
}
if(idx + blockDim.x < size) {
value = vector[idx + blockDim.x];
sdata[tid] += value * value;
}
__syncthreads();
for(int s = blockDim.x / 2; s > 0; s >>= 1) {
if(tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if(tid == 0) {
result[blockIdx.x] = sdata[0];
}
}
extern "C" __global__ void innerprod(double* x, double* y, double* result, int size) {
extern __shared__ double sdata[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
sdata[tid] = 0.0;
if(idx < size) {
sdata[tid] += x[idx] * y[idx];
}
if(idx + blockDim.x < size) {
sdata[tid] += x[idx + blockDim.x] * y[idx + blockDim.x];
}
__syncthreads();
for(int s = blockDim.x / 2; s > 0; s >>= 1) {
if(tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if(tid == 0) {
result[blockIdx.x] = sdata[0];
}
}
extern "C" __global__ void mew(double* x, double* y, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size) {
x[idx] *= y[idx];
}
}
extern "C" __global__ void fillSendBuffer(double* sendBuffer, int* indices, double* data, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size) {
sendBuffer[idx] = data[indices[idx]];
}
}
|
13,868 | #include <stdio.h>
#define N 10
#define M 10
#define ThreadsPerBlock 10
#define NumBlocks (ThreadsPerBlock + (N*M))/ThreadsPerBlock
__global__ void valid_convolution(float *d_kernel, int k_size, float *d_matrix, int size_x, int size_y, float *d_conv, int max_row, int max_col){
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if(max_row > row && max_col > col){
d_conv[col + row*max_col] = 0;
for(int k_row = 0; k_row < k_size; k_row ++){
for(int k_col = 0; k_col < k_size ; k_col ++){
d_conv[col+ row*max_col] +=
d_kernel[k_col +
(k_row*k_size)] *
d_matrix[(col+k_col) + (row+k_row)*size_x];
// printf("row %i col %i d_conv[] = %f \n", row, col, d_conv[col+ row*max_col]);
}
}
}
}
void print_mat(float *mat, int n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%.1f\t", mat[i*n+j]);
}
printf("\n");
}
printf("\n");
}
void fill_mat(float *mat, int n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
mat[i + j*n] = i + j;
}
}
}
int main(){
float *h_kernel, *h_matrix, *h_conv;
float *d_kernel, *d_matrix, *d_conv;
int k_size = 5;
int size_x = N;
int size_y = M;
int max_row = size_x - (k_size/2)*2;
int max_col = size_y - (k_size/2)*2;
h_kernel = (float *)malloc(sizeof(float)*k_size*k_size);
h_matrix = (float *)malloc(sizeof(float)*size_x*size_y);
h_conv = (float *)malloc(sizeof(float)*max_row*max_col);
fill_mat(h_kernel, k_size);
fill_mat(h_matrix, size_x);
print_mat(h_kernel, k_size);
print_mat(h_matrix, size_x);
cudaMalloc((void**)&d_kernel,sizeof(float)*k_size*k_size);
cudaMalloc((void**)&d_matrix,sizeof(float)*size_x*size_y);
cudaMalloc((void**)&d_conv,sizeof(float)*max_row*max_col);
cudaMemcpy(d_kernel, h_kernel,sizeof(float)*k_size*k_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrix, h_matrix,sizeof(float)*size_x*size_y, cudaMemcpyHostToDevice);
dim3 Blocks(NumBlocks,NumBlocks);
dim3 Threads(ThreadsPerBlock,ThreadsPerBlock);
//printf("Blocks %i \nThreads %i \n", NumBlocks, ThreadsPerBlock);
valid_convolution<<<Blocks, Threads>>>(d_kernel, k_size, d_matrix, size_x, size_y, d_conv, max_row, max_col);
cudaMemcpy(h_conv, d_conv,sizeof(float)*max_row*max_col, cudaMemcpyDeviceToHost);
print_mat(h_conv, max_col);
free(h_kernel);
free(h_conv);
free(h_matrix);
cudaFree(d_kernel);
cudaFree(d_conv);
cudaFree(d_matrix);
} |
13,869 | /* 32-bit (int) largest prime finder.
Atomic reduction solution.
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Range of k-numbers for primes search:
#define KMIN 100000000
// Should be smaller than 357,913,941 (because we are using signed int)
#define KMAX 100100000
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Number of blocks to run:
#define NBLOCKS (KMAX-KMIN+BLOCK_SIZE)/BLOCK_SIZE
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
__device__ int d_xmax;
// Kernel(s) should go here:
// The kernel:
__global__ void MyKernel ()
{
int x, y, ymax;
// Global index is shifted by KMIN:
int k = KMIN + threadIdx.x + blockDim.x * blockIdx.x;
if (k > KMAX)
return;
int j = 2*blockIdx.y - 1;
// Prime candidate:
x = 6*k + j;
// We should be dividing by numbers up to sqrt(x):
ymax = (int)ceil(sqrt((double)x));
// Primality test:
for (y=3; y<=ymax; y=y+2)
{
// To be a success, the modulus should not be equal to zero:
if (x%y == 0)
return;
}
// We get here only if x is a prime number
// Storing the globally largest prime:
atomicMax (&d_xmax, x);
return;
}
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr;
double restime;
int devid, devcount, error, xmax;
if (BLOCK_SIZE>1024)
{
printf ("Bad BLOCK_SIZE: %d\n", BLOCK_SIZE);
exit (1);
}
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
}
//--------------------------------------------------------------------------------
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
// It is very convenient to create blocks on a 2D grid, with the second dimension
// of size two corresponding to "-1" and "+1" cases:
dim3 Nblocks (NBLOCKS, 2, 1);
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE>>> ();
// Copying the result back to host:
if (error = cudaMemcpyFromSymbol (&xmax, d_xmax, sizeof(int), 0, cudaMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
printf ("%d\n", xmax);
printf ("Time: %e\n", restime);
//--------------------------------------------------------------------------------
return 0;
}
|
13,870 | #include <cuda.h>
#include <stdio.h>
void printMatrix(float *matrix, int rows, int columns)
{
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++)
printf("%g\t", matrix[i * columns + j]);
printf("\n");
}
printf("\n");
}
void Output(float* a, int N)
{
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
fprintf(stdout, "%g\t", a[j + i * N]);
}
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
__global__ void initMatrix_1D(float *matrix)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
matrix[i] = i;
}
__global__ void initMatrix(float *matrix)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int I = gridDim.x * blockDim.x;
matrix[i + j * I] = (float) (i + j * I);
}
__global__ void transp(float *matrix, float *matrix_t, int N)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
matrix_t[y * N + x] = matrix[x * N + y];
}
int main(int argc, char *argv[])
{
int N = (argc > 1) ? atoi(argv[1]) : 4;
int size_matrix = N * N;
int block_x = (argc > 2) ? atoi(argv[2]) : 1;
int block_y = block_x;
float *dmatrix1, *hmatrix1;
float *dmatrix2, *hmatrix2;
cudaMalloc((void**) &dmatrix1, size_matrix * sizeof(float));
cudaMalloc((void**) &dmatrix2, size_matrix * sizeof(float));
hmatrix1 = (float*) calloc(size_matrix, sizeof(float));
hmatrix2 = (float*) calloc(size_matrix, sizeof(float));
dim3 dimGrid = dim3(N / block_x, N / block_y, 1);
dim3 dimBlock = dim3(block_x, block_y, 1);
printf("Size matrix(%dx%d): %d\n", N, N, N * N);
printf("gridDim.x = %d gridDim.y = %d\n", dimGrid.x, dimGrid.y);
printf("blockDim.x = %d blockDim.y = %d\n", dimBlock.x, dimBlock.y);
initMatrix<<<dimGrid, dimBlock>>>(dmatrix1);
cudaDeviceSynchronize();
cudaMemcpy(hmatrix1, dmatrix1, size_matrix * sizeof(float), cudaMemcpyDeviceToHost);
Output(hmatrix1, N);
transp<<<dimGrid, dimBlock>>>(dmatrix1, dmatrix2, N);
cudaDeviceSynchronize();
cudaMemcpy(hmatrix2, dmatrix2, size_matrix * sizeof(float), cudaMemcpyDeviceToHost);
Output(hmatrix2, N);
#if 0
float *test_matrix = (float*) calloc(size_matrix, sizeof(float));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
test_matrix[j * N + i] = hmatrix1[i * N + j];
}
}
Output(test_matrix, N);
free(test_matrix);
#endif
cudaFree(dmatrix1);
cudaFree(dmatrix2);
free(hmatrix1);
free(hmatrix2);
return 0;
}
|
13,871 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__device__ int small_helper(int x, int y){
if(x < y){
return x;
}
else{
return y;
}
}//end helper function
__global__ void strongestNeighborScan_gpu(int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges) {
int i ;
int tid = blockIdx.x * blockDim.x + threadIdx.x; //global index of the thread
int total_threads = blockDim.x * gridDim.x; //total number of threads
for(i=tid; i < numEdges; i += total_threads){
if(tid >= numEdges){ //terminate if thread ID is larger than array size
return;
}
if(src[i] == src[i-distance]){//if they are in same segments
if(oldWeight[i] == oldWeight[i-distance]){//if adjacent weights are same
newDst[i] = small_helper(oldDst[i], oldDst[i-distance]);//update newDst
newWeight[i] = oldWeight[i];//update newWeight
}
else{//if adjacent weights are not same
newWeight[i] = max(oldWeight[i], oldWeight[i-distance]); //update newWeight
if(newWeight[i] == oldWeight[i]){ //update newDst
newDst[i] = oldDst[i];
}else{
newDst[i] = oldDst[i-distance];
}
}
}
else{//they are not in same segment
newWeight[i] = oldWeight[i];
newDst[i] = oldDst[i];
}
//check termination
if(oldDst[i] != newDst[i]){
*madeChanges = 1;
}
}//end for loop
}//end fucntion |
13,872 | #include "includes.h"
#define DOUBLE
#ifdef DOUBLE
#define Complex cufftDoubleComplex
#define Real double
#define Transform CUFFT_Z2Z
#define TransformExec cufftExecZ2Z
#else
#define Complex cufftComplex
#define Real float
#define Transform CUFFT_C2C
#define TransformExec cufftExecC2C
#endif
#define TILE_DIM 8
// synchronize blocks
__global__ void spread(Real* src, unsigned int spitch, Real* dst, unsigned int dpitch)
{
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.x;
Real res = (tid >= spitch) ? src[bid * spitch + tid-spitch] : 0.0;
if( tid < dpitch) {
dst[bid * dpitch + tid] = res;
}
} |
13,873 | #include "includes.h"
__global__ void stencil_ld(unsigned *X, unsigned *out, int width, int height){
int kernel[3][3] = { 0, -1, 0,
-1, 5, -1,
0, -1, 0};
int cikti;
int x = blockIdx.y*32*width + blockIdx.x*32 + threadIdx.y*width + threadIdx.x; //current pixel
//if(x/width<1 || x/width>height-1 || x%width == width-1 || x%width == 1) return; // kenar noktalarinda
cikti =(kernel[0][0]*X[x-width-1] +
kernel[0][1]*X[x-width] +
kernel[0][2]*X[x-width+1] +
kernel[1][0]*X[x-1] +
kernel[1][1]*X[x] +
kernel[1][2]*X[x+1] +
kernel[2][0]*X[x+width-1] +
kernel[2][1]*X[x+width-1] +
kernel[2][2]*X[x+width-1]);
if(cikti < 0) out[x] = 0;
else if(cikti > 255) out[x] = 255;
else out[x] = cikti;
} |
13,874 | #include <inttypes.h>
#include <cfloat>
#include "gisp_util.cuh"
namespace sp{
__global__ void kernel_extract_labels(
int32_t *hard, int hard_steps,
float * oR, int oR_steps, int oR_lysize,
int W, int H, int v_x, int v_y, int n_x, int n_y, int rl, int ru
){
#define t_x 1
#define t_y 1
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if (x >= W || y >= H){
return;
}
int ilabel_x = (x - rl) / v_x; if (ilabel_x == n_x) ilabel_x = n_x - 1;
int ilabel_y = (y - ru) / v_y; if (ilabel_y == n_y) ilabel_y = n_y - 1;
float max_dense = -FLT_MAX;
int final_label = -1;
for (int dy = -t_y; dy <= t_y; dy++){
for (int dx = -t_x; dx <= t_x; dx++){
const int al_x = ilabel_x + dx;
const int al_y = ilabel_y + dy;
if (al_x < 0 || al_y < 0 || al_x >= n_x || al_y >= n_y){
continue;
}
const int al_k = al_y*n_x + al_x;
int xxb, yyb;
dranging(xxb, yyb, al_x, al_y, v_x, v_y, rl, ru);
float cur_dense = oR[(y - yyb)*oR_steps + x - xxb + al_k*oR_lysize];
if (max_dense < cur_dense){
max_dense = cur_dense;
final_label = al_k;
}
}
}
hard[y*hard_steps + x] = final_label;
#undef t_x
#undef t_y
}
void gpu_extract_labels(
int32_t *hard, int hard_steps,
float * oR, int oR_steps, int oR_lysize,
int W, int H, int v_x, int v_y, int rl, int ru
){
int n_x = W / v_x;
int n_y = H / v_y;
#define _BX 16
#define _BY 8
dim3 blocks(_BX, _BY);
dim3 grids;
grids.x = (W + blocks.x - 1) / blocks.x;
grids.y = (H + blocks.y - 1) / blocks.y;
kernel_extract_labels<<<grids, blocks>>>(hard, hard_steps, oR, oR_steps, oR_lysize, W, H, v_x, v_y, n_x, n_y, rl, ru);
#undef _BX
#undef _BY
}
} |
13,875 | #include <stdio.h>
template <typename T>
inline __device__
T clamp(T val, T vMin, T vMax) {
return min(max(val, vMin), vMax);
}
__global__
void kernel(int* input, int* output, int length) {
int tID = threadIdx.x;
if (tID >= length) {
return;
}
//output[tID] = min(max(input[tID], 100), 130);
output[tID] = clamp<int>(input[tID], 100, 130);
}
int main(int argc, char const *argv[])
{
int *d_input;
int *d_output;
int input[10] = { 10, 50, 100, 150, 131, 99, 155, 10000, 0, 100 };
int output[10];
cudaMalloc(&d_input, sizeof(int) * 10);
cudaMalloc(&d_output, sizeof(int) * 10);
cudaMemcpy(d_input, input, sizeof(int) * 10, cudaMemcpyHostToDevice);
kernel<<<1, 256>>>(d_input, d_output, 10);
cudaMemcpy(output, d_output, sizeof(int) * 10, cudaMemcpyDeviceToHost);
for (int i=0; i<10; ++i) {
printf("%d: %5d -> %5d\n", i, input[i], output[i]);
}
cudaFree(d_input);
cudaFree(d_output);
return 0;
}
|
13,876 | #include<stdio.h>
//#include"gputimer.h"
#define NUM_THREADS 100000
#define ARRAY_SIZE 10
#define BLOCK_WIDTH 1000
__global__ void increment_naive(int *g){
int i = blockIdx.x*blockDim.x + threadIdx.x;
i = i%ARRAY_SIZE;
g[i] = g[i]+1;
}
__global__ void increment_atomic(int *g){
int i = blockIdx.x*blockDim.x + threadIdx.x;
i = i%ARRAY_SIZE;
atomicAdd(&g[i], 1);
}
int main(){
//GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS,NUM_THREADS/BLOCK_WIDTH,ARRAY_SIZE);
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int *d_array;
cudaMalloc((void **)&d_array,ARRAY_BYTES);
cudaMemset((void **)d_array,0,ARRAY_BYTES);
//timer.start();
//increment_naive<<< NUM_THREADS/BLOCK_WIDTH,BLOCK_WIDTH >>>(d_array);
increment_atomic<<< NUM_THREADS/BLOCK_WIDTH,BLOCK_WIDTH >>>(d_array);
//timer.stop();
cudaMemcpy(h_array,d_array,ARRAY_BYTES,cudaMemcpyDeviceToHost);
//printf("Time elapsed = %g ms\n",timer.Elapsed());
for(int i=0;i< ARRAY_SIZE ; i++) printf("%d ", h_array[i]);
cudaFree(d_array);
return 0;
}
|
13,877 | #include <cuda_runtime.h>
#include <getopt.h>
#include <iomanip>
#include <iostream>
#include <vector>
#include <algorithm>
int main(int argc, char* argv[])
{
float min_cc = 3.0f;
signed char c = '\0';
bool latest_arch = false;
while ((c = getopt(argc, argv, "lh?")) != -1) {
switch (c) {
case 'l': latest_arch = true; break;
case '?':
case 'h':
default:
std::cout << "usage: " << argv[0] << std::endl << " -l {select latest cuda architecture} " << std::endl;
return -1;
}
}
int n_devices = 0;
int rc = cudaGetDeviceCount(&n_devices);
if (rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
std::cout << "CUDA error: " << cudaGetErrorString(error) << std::endl;
return rc;
}
std::vector<std::pair<int, int>> arch(n_devices);
for (int cd = 0; cd < n_devices; ++cd) {
cudaDeviceProp dev;
int rc = cudaGetDeviceProperties(&dev, cd);
if (rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
std::cout << "CUDA error: " << cudaGetErrorString(error) << std::endl;
return rc;
}
else {
arch[cd] = {dev.major, dev.minor};
}
}
std::pair<int, int> best_cc {0, 0};
if (latest_arch) {
best_cc = *std::max_element(begin(arch), end(arch));
}
else {
best_cc = *std::min_element(begin(arch), end(arch));
}
if ((best_cc.first + best_cc.second / 10.f) < min_cc) {
std::cout << "Min Compute Capability of " << std::fixed << std::setprecision(1) << min_cc << " required; "
<< best_cc.first << "." << best_cc.second << " found.";
return 1;
}
else {
std::cout << "sm_" << best_cc.first << best_cc.second;
return 0;
}
}
|
13,878 | #include "includes.h"
#define FLOAT_N 3214212.01
__global__ void calcdata(double* d_data, double* d_mean, int M, int N)
{
int j;
int i = blockDim.x * blockIdx.x + threadIdx.x+1;
if (i<=(N+1)) {
for (j = 1; j < (M+1); j++) {
d_data[i*(M+1) + j] -= d_mean[j];
}
}
} |
13,879 | #include "includes.h"
__global__ void padding(int *op,int *ip,int N,int C,int H,int W,int Py,int Px){
unsigned int input_id = (blockIdx.x*gridDim.y + blockIdx.y + blockIdx.z*gridDim.x*gridDim.y)*blockDim.x + threadIdx.x;
int i = input_id/(C*H*W);
input_id = input_id%(C*H*W);
int j = input_id/(H*W);
input_id = input_id%(H*W);
int k = input_id/W;
int l = input_id%W;
*(op + i*C*(H + 2*Py)*(W + 2*Px) + j*(H + 2*Py)*(W + 2*Px) + (k + Py)*(W + 2*Px) + (l + Px)) = *(ip + i*C*H*W + j*H*W + k*W + l);
} |
13,880 | #include <iostream>
#include <vector>
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/device_free.h>
#include <ctime>
using namespace std;
int main(){
//freopen("graph.txt", "r", stdin);
// ======================== Input and Adj list formation ====================================
// Input nodes and edges
int nodes, edges;
cin>>nodes>>edges;
// create the adjancency list
vector<vector<pair<int,int> > > adjacency_list(nodes);
for(int i = 0; i < edges; ++i){
int node1, node2, weight;
cin>>node1>>node2>>weight;
adjacency_list[node1].push_back(make_pair(node2, weight));
adjacency_list[node2].push_back(make_pair(node1, weight));
}
// create compressed adjancency list
int * V = new int[nodes];
int * E = new int[2 * edges];
int * W = new int[2 * edges];
int cumulative_sum = 0, limit;
for(int i = 0; i < nodes; ++i){
V[i] = cumulative_sum;
limit = adjacency_list[i].size();
for(int j = 0; j < limit; ++j){
E[cumulative_sum + j] = adjacency_list[i][j].first;
W[cumulative_sum + j] = adjacency_list[i][j].second;
}
cumulative_sum += limit;
}
// Check
// for(int i = 0; i < nodes; i++)
// {
// cout<<V[i]<<" ";
// }
// cout<<endl;
// for(int i = 0; i < 2 * edges; i++)
// {
// cout<<E[i]<<" "<<W[i]<<"\n";
// }
// ======================== Variables init ====================================
// sum of edge weights in MST
long long int edge_sum = 0;
// current vertex under consideration
int current = 0;
// count of vertex added to MST
int count = 0;
int *parent = new int[nodes];
vector<int> weights(nodes);
bool *inMST = new bool[nodes];
// init parents, weight and inMST array
parent[0] = -1;
for(int i = 0; i < nodes; ++i) {
weights[i] = INT_MAX;
inMST[i] = false;
}
// device vector for the weights array
thrust::device_vector<int> device_weights(weights.begin(), weights.end());
thrust::device_ptr<int> ptr = device_weights.data();
// ======================== Main code ====================================
clock_t begin = clock();
while(count < nodes-1){
// add current vertex to MST
++count;
inMST[current] = true;
// update weights and parent arrays as per the current vertex in consideration
int len = adjacency_list[current].size();
for(int i = 0; i < len; ++i) {
int incoming_vertex = adjacency_list[current][i].first;
if(!inMST[incoming_vertex]) {
if(weights[incoming_vertex] > adjacency_list[current][i].second) {
weights[incoming_vertex] = adjacency_list[current][i].second;
parent[incoming_vertex] = current;
}
}
}
// move/copy the host array to device
device_weights = weights;
// get the min index
int min_index = thrust::min_element(ptr, ptr + nodes) - ptr;
// cout<<"Min Weight Index: "<<min_index<<endl;
// add the least edge weight found outto answer
parent[min_index] = current;
edge_sum += weights[min_index];
// reset weight to INT_MAX for this vertex as it is already considered in MST
weights[min_index] = INT_MAX;
// new current
current = min_index;
}
clock_t end = clock();
// ======================== Results ====================================
// Print parent of nodes in MST
// for(int i = 0; i < nodes; ++i) {
// cout<<i<<"'s parent is "<<parent[i]<<endl;
// }
// Print the sum of edges in MST
cout<<"Sum of Edges in MST: "<<edge_sum<<endl;
// Print the time for execution
double elapsed_time = double(end - begin) / CLOCKS_PER_SEC;
cout<<"Execution time: "<<elapsed_time<<endl;
// ======================== Memory Deallocation ====================================
// thrust::device_free(ptr);
// device_weights.clear();
// thrust::device_vector<int>().swap(device_weights);
free(V); free(E); free(W);
free(parent); free(inMST);
return 0;
}
// Sample Input
/*
9 14
0 1 4
0 7 8
1 7 11
1 2 8
2 8 2
2 3 7
2 5 4
7 8 7
7 6 1
6 8 6
6 5 2
3 5 14
3 4 9
4 5 10
*/ |
13,881 | /*!
\file indirectCallDriver.cu
\author Andrew Kerr <arkerr@gatech.edu>
\brief demonstrates indirect function calling
*/
extern "C" __device__ __noinline__ int funcDouble(int a) {
return a*2;
}
extern "C" __device__ __noinline__ int funcTriple(int a) {
return a*3;
}
extern "C" __device__ __noinline__ int funcQuadruple(int a) {
return a*4;
}
extern "C" __device__ __noinline__ int funcPentuple(int a) {
return a*5;
}
extern "C" __global__ void kernelEntry(int *A, int b) {
/*
int (*filter[])(int) = {
&funcDouble,
&funcTriple,
&funcQuadruple,
&funcPentuple
};
*/
int i = threadIdx.x + blockDim.x * blockIdx.x;
int p = ((b + i) & 3);
int (*filter)(int);
if (p == 0) {
filter = &funcDouble;
}
else if (p == 1) {
filter = &funcTriple;
}
else if (p == 2) {
filter = &funcQuadruple;
}
else if (p == 3) {
filter = &funcPentuple;
}
A[i] = filter(i);
}
|
13,882 | #include "includes.h"
__global__ void MatrixMulKernelTiled(float* Md, float* Nd, float* Pd, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
} |
13,883 | #include "includes.h"
__global__ void DataPointMap(int *size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const double *inArrayBody = &inputX[ix* *length];
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
} |
13,884 | #include <stdio.h>
__global__ void matrix_add(float *c, float *a, float *b) {
int x = threadIdx.x;
c[x] = a[x] + b[x];
}
void matrix_add_cpu(float *c, float *a, float *b, int size) {
for (int i=0; i<size; i++){
c[i] = a[i] + b[i];
}
}
void print_matrix(float *m, int size){
for (int i=0; i<size; i++){
printf("m[%d]=%f\n", i, m[i]);
}
}
bool compare (float *ref, float *gpu_rlt, int size){
for (int i = 0; i<size; i++) {
if (ref[i] != gpu_rlt[i]) {
return false;
}
}
return true;
}
int main(int argc, char** argv){
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//generate matrix A and B
float A[ARRAY_SIZE];
float B[ARRAY_SIZE];
for (int i=0; i<ARRAY_SIZE; i++){
A[i] = float(i);
B[i] = float(i);
}
//for results
float C[ARRAY_SIZE];
float ref_results[ARRAY_SIZE];
// alloc memory on gpu
float *d_c, *d_a, *d_b;
cudaMalloc((void**) &d_c, ARRAY_BYTES);
cudaMalloc((void**) &d_a, ARRAY_BYTES);
cudaMalloc((void**) &d_b, ARRAY_BYTES);
// copy A and B to gpu
cudaMemcpy(d_a, A, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, ARRAY_BYTES, cudaMemcpyHostToDevice);
// invoke kernel
matrix_add<<<1, ARRAY_SIZE>>>(d_c, d_a, d_b);
cudaMemcpy(C, d_c, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_matrix(C, ARRAY_SIZE);
matrix_add_cpu(ref_results, A, B, ARRAY_SIZE);
if (compare(ref_results, C, ARRAY_SIZE)) {
printf("Correct\n");
} else {
printf ("Wrong\n");
}
}
|
13,885 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// Your job is to implemment a bitonic sort. A description of the bitonic sort
// can be see at:
// http://en.wikipedia.org/wiki/Bitonic_sort
__device__
void compare(float *data, int pos1, int pos2){
if(data[pos1] > data[pos2]){
float temp = data[pos1];
data[pos1] = data[pos2];
data[pos2] = temp;
}
}
__global__ void batcherBitonicMergesort64(float * d_out, const float * d_in)
{
// you are guaranteed this is called with <<<1, 64, 64*4>>>
extern __shared__ float sdata[];
int tid = threadIdx.x;
sdata[tid] = d_in[tid];
__syncthreads();
if(tid < 32)
for (int stage = 1; stage <= 6; stage++)
{
//MERGE
int n = (int) pow((float)2,(float)stage);
int group = (2*tid)/n;
int i = tid%(n/2);
compare(sdata,n*group+i, n*group+n-i-1);
__syncthreads();
for (int substage = stage -1; substage > 0; substage--)
{
int n = (int) pow((float)2,(float)substage);
int group = (2*tid)/n;
int i = tid%(n/2);
compare(sdata,n*group+i, n*group+i+n/2);
}
}
__syncthreads();
d_out[tid] = sdata[tid];
}
int compareFloat (const void * a, const void * b)
{
if ( *(float*)a < *(float*)b ) return -1;
if ( *(float*)a == *(float*)b ) return 0;
if ( *(float*)a > *(float*)b ) return 1;
return 0; // should never reach this
}
void printArray(float* in, float* out, int n){
printf("\n");
for(int i = 0; i < n; i++){
printf("%d - (%f - %f) \n",i, in[i],out[i]);
}
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float h_sorted[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX;
h_sorted[i] = h_in[i];
}
qsort(h_sorted, ARRAY_SIZE, sizeof(float), compareFloat);
// declare GPU memory pointers
float * d_in, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
batcherBitonicMergesort64<<<1, ARRAY_SIZE, ARRAY_SIZE * sizeof(float)>>>(d_out, d_in);
// copy back the sum from GPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
printArray(h_in, h_out, ARRAY_SIZE);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
}
|
13,886 | /*#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include<opencv2\imgproc.hpp>
#include <iostream>
#define N 1024*1024
#define FullSize 20*N
__global__ void kernel(unsigned int* a, unsigned int* b, unsigned int* c)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
int main()
{
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop,whichDevice);
if (!prop.deviceOverlap)
printf("Device overlap not supported by nVidia device, speedup from streams not possible\n");
unsigned int *h_a, *h_b, *h_c;
unsigned int *d_a, *d_b, *d_c;
//streamcreated and initialized
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaMalloc((void**)&d_a, sizeof(unsigned int)*N);
cudaMalloc((void**)&d_b, sizeof(unsigned int)*N);
cudaMalloc((void**)&d_c, sizeof(unsigned int)*N);
//un-paged memory is essential for stream access
cudaHostAlloc((void**)&h_a,sizeof(unsigned int)*FullSize, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_b, sizeof(unsigned int)*FullSize,cudaHostAllocDefault);
cudaHostAlloc((void**)&h_c, sizeof(unsigned int)*FullSize, cudaHostAllocDefault);
unsigned int h_count = 0;
for (int i = 0; i < FullSize; i++)
{
h_a[i] = rand()%30;
h_b[i] = rand()%30;
h_count += (h_a[i] + h_b[i]);
}
float elapsedTime = 0.0f;
//timing the GPU
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//sending data in chunks in streams... the need for pinned memor is understandable now.
for (int i = 0; i < FullSize; i += N)
{
cudaMemcpyAsync(d_a, h_a+i, sizeof(unsigned int)*N, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_b, h_b+i, sizeof(unsigned int)*N, cudaMemcpyHostToDevice, stream);
kernel << <N / 256, 256, 0, stream >> >(d_a, d_b, d_c);
cudaMemcpyAsync(h_c+i, d_c, sizeof(unsigned int)*N, cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Total Time: %3.1f\n", elapsedTime);
unsigned int h_newCount = 0;
for (int i = 0; i < FullSize; i++)
{
h_newCount += h_c[i];
}
(h_newCount == h_count) ? printf("True\n") : printf("False\n");
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
*/ |
13,887 | #include "cuda_runtime.h"
#include "vecAdd.cuh"
__global__ void vecAdd (float *input1, float *input2, float *output, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
output[tid] = input1[tid] +input2[tid];
tid += blockDim.x * gridDim.x;
}
}
void cudaVecAdd (float *input1, float *input2, float *output, int size) {
float* dev_input1;
float* dev_input2;
float* dev_output;
cudaMalloc((void**)&dev_input1, size * sizeof(float));
cudaMalloc((void**)&dev_input2, size * sizeof(float));
cudaMalloc((void**)&dev_output, size * sizeof(float));
cudaMemcpy(dev_input1, input1, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_input2, input2, size * sizeof(float), cudaMemcpyHostToDevice);
vecAdd<<<1, size>>>(dev_input1, dev_input2, dev_output, size);
cudaDeviceSynchronize();
cudaMemcpy(output, dev_output, size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_input1);
cudaFree(dev_input2);
cudaFree(dev_output);
}
|
13,888 | __device__ void f() {
int a[5];
a[4] = 42;
a[50] = 42;
}
__device__ void with_args(int i) {
int a[5];
a[i] = 42;
}
__device__ void with_buffer_arg(int *a, int i) {
a[i] = 42;
}
__global__ void test_call_user_functions() {
int a[5];
a[4] = 42;
a[5] = 42;
f();
with_args(4);
with_args(5);
with_buffer_arg(a, 4);
with_buffer_arg(a, 5);
} |
13,889 | #include "includes.h"
__global__ void child_kernel(int *data, int seed)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&data[idx], seed);
} |
13,890 | #include "includes.h"
__global__ void scan(float * input, float * output, int len) {
//@@ Load a segment of the input vector into shared memory
__shared__ float sh_input[2048];
int tx = threadIdx.x;
int tx2= tx + blockDim.x;
int bdimx = blockDim.x;
int i = 2*blockIdx.x*blockDim.x + tx;
int start = 2*blockIdx.x*blockDim.x;
int Col1 = start + tx;
int Col2 = start + bdimx + tx;
if( Col2 < len)
{
// Collaborative loading of A
sh_input[tx] = input[ Col1];
sh_input[tx2] = input[ Col2];
}
else if ( Col1 < len)
{ // Control divergence at the edge
sh_input[tx] = input[ Col1];
sh_input[tx2]= 0.0f;
}
else
{ // Control divergence at the edge
sh_input[tx] = 0.0f;
sh_input[tx2]= 0.0f;
}
__syncthreads();
//output[Col1] = sh_input[tx]; output[Col2] = sh_input[tx2];
unsigned int stride; int index;
// @@ Traverse the reduction tree down
for (stride = 1;stride <= 2*bdimx ; stride *= 2)
{
index = (tx +1)* stride*2 -1;
if (index < 2*bdimx)
sh_input[index] += sh_input[index-stride];
__syncthreads();
}
//@@ Traverse the reduction tree up
for ( stride = bdimx/2; stride > 0; stride/=2)
{
__syncthreads();
index = (tx +1)* stride*2 -1;
if (index + stride < 2*bdimx)
sh_input[index+stride] += sh_input[index];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
__syncthreads();
output[i] = sh_input[tx];
if ( i + bdimx < len)
{
output[i + bdimx] = sh_input[tx2];
}
} |
13,891 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define LO8(x) ((x) & 0x000000FF)
#define MI16(x) (((x) & 0x0000FFFF) >> 8)
#define HI24(x) (((x) & 0x00FFFFFF) >> 16)
#define MUL24(x,y) ((x) * (y))
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
__global__ void kSampleMultinomial(int* output, float* distribution, float* random, int k, int n){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n){
distribution += k * id;
random += id;
output += k * id;
float preSum = 0, nowSum = 0;
for(int i = 0; i < k; i++){
nowSum += distribution[i];
output[i] = random[0] >= preSum && random[0] < nowSum;
preSum = nowSum;
}
}
}
__global__ void kExp(float* output, float* input, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __expf(input[i]);
}
__global__ void kDivide(float* output, float* leftInput, float* rightInput, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __fdividef(leftInput[i], rightInput[i]);
}
__global__ void kConvolve_forward(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride) {
__shared__ float shFilters[4*1][4 * 4]; // pre-load 4 pixels from 4*4 filters
__shared__ float shImages[4*1][32 * 1]; // pre-load 4 pixels from 32*2 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int blocksPerModule = numFilters / (4*4);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = blockIdx.y % blocksPerModule;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int imgLoadModPosZ = (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = ((moduleIdx / numModulesX) % numModulesY )* moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 4);
const int shFilterLoadX = tidx % (4 * 4);
const int myImgIdx = blockIdx.x * 32 * 1 + threadIdx.x;
images += myImgIdx;
filters += 4 * 4 * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx * 4 * 4 + threadIdx.y) * numImages * numModulesZ * numModulesY * numModulesX
+ myImgIdx;
float prod[4][1];
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*4 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/4) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = paddingStart + imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.y + c * 4][threadIdx.x + i * 32] = images[imgStride * (c * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x) + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.y + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.y + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*1; i++) {
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.x] * shFilters[i][threadIdx.y + f * 4];
}
}
}
__syncthreads();
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 4; f++) {
targets[g * 32 + f * 4 * numImages * numModulesZ * numModulesY * numModulesX] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum, const float scaleOutputs) {
__shared__ float shImages[5 * 8 * 1][32]; // preload 32 cases of 8 * 5 pixels
__shared__ float shHidActs[16][32 + 1]; // preload 32 cases of 16 hidActs
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / 16;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 16 * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * 8 * 5;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * 1
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[1][5];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int p = 0; p < 5; p++) {
prod[c][p] = 0;
}
}
__shared__ int pxDivs[8*5];
if (tidx < 8 * 5) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8 * 5) {
/*
* As long as 8 * 16 is divisible by 32 this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8 * 5; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((8 * 5) % (16 * 8 / 32) == 0 || y + loadY < 8 * 5) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (16 % (16 * 8 / 32) == 0 || y + loadY < 16) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 5; p++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
prod[c][p] += shImages[threadIdx.y + p * 8 + c * 5 * 8][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
#pragma unroll
for (int p = 0; p < 5; p++) {
if (blockPixelOffset + p * 8 + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[p * 8 * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
__global__ void kConvolve_backward(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int numImages, const int numFilters, const int filterSize,
const int imgSizeZ, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride) {
__shared__ float shFilters[1*16][16 + 1]; // load 16 filter one time. See below.
__shared__ float shHidActs[16][16*2]; // each block deal with 16 * imgPerThread images.
const int blockCaseIdx = blockIdx.x * 16 * 2;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegionsY = DIVUP(imgSizeY, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = (blockRegionIdx / numRegionsX) % numRegionsY;
const int blockRegionIdxZ = blockRegionIdx / (numRegionsX * numRegionsY);
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int blockRegionFront = blockRegionIdxZ;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxZ = blockRegionFront;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX;
const bool isPxInImg = pxZ < imgSizeZ && pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32; // load 32 cases one time.
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[1][2];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockRegionFront - paddingStart < filterSize ? 0
: 1 + (blockRegionFront - paddingStart -filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockRegionFront + 3 - paddingStart) / moduleStride);
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int mz = startZ; mz < endZ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInModuleZ = pxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleZ >= 0 && pxInModuleZ < filterSize && pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleZ * filterSize * filterSize + pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 2 * 16; i += 32) { // IMAGES
if (!true || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = true ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * 1 * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < 1; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
#pragma unroll
for (int i = 0; i < 2; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[c * imgPixels * numImages + i * 16] = prod[c][i];
}
}
}
}
}
__global__ void kConvolve_forward_c(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups) {
__shared__ float shFilters[4*2][4 * 8]; // pre-load 4 pixels from 4*8 filters
__shared__ float shImages[4*2][32 * 1]; // pre-load 4 pixels from 32*1 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (4*8);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = 8 * 4 * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY * numModulesZ;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int imgLoadModPosZ = paddingStart + (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((moduleIdx / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 8);
const int shFilterLoadX = tidx % (4 * 8);
const int myImgIdx = blockIdx.x * 32 * 1 + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[8][1];
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += 2) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*8 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/8) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y + c * 4][threadIdx.x + i * 32] = m[c * imgStride * imgPixels + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*2; i++) {
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.x] * shFilters[i][threadIdx.y + f * 4];
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 8; f++) {
targets[g * 32 + f * 4 * numImages * numModules] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight_c(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleOutputs) {
__shared__ float shImages[8 * 8][32]; // preload 32 cases of 4 * pixelsPerThread pixels
__shared__ float shHidActs[2 * 16][32 + 1]; // preload 32 cases of 32 hidacts
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (16 * 2);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 2 * 16 * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesZ * numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/8)) * 8;
const int filterColorIdx = (blockIdx.y % (numFilterColors/8)) * 8;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[8][2];
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[8];
if (tidx < 8) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesY * numModulesX)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8) {
/*
* As long as 4 * 32 is divisible by 32 this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (8 % (16 * 8 / 32) == 0 || y + loadY < 8) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 * 2 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16 * 2; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((16 * 2) % (16 * 8 / 32) == 0 || y + loadY < 16 * 2) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] += shImages[threadIdx.y + c * 8][i] * shHidActs[threadIdx.x + f * 16][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
#pragma unroll
for (int f = 0; f < 2; f++) {
#pragma unroll
for (int c = 0; c < 8; c++) {
targets[c * filterPixels * numFilters + f * 16] = scaleOutputs * prod[c][f];
}
}
}
}
__global__ void kConvolve_backward_c(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups) {
__shared__ float shFilters[4*4][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][32*1];
const int numImgBlocks = DIVUP(numImages,32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32*1;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * 4*4; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = (blockPixelIdx / imgSizeX) % imgSizeY;
const int blockPixelIdxZ = blockPixelIdx / (imgSizeX * imgSizeY);
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesZ * numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[4][1];
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockPixelIdxZ - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxZ - paddingStart - filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockPixelIdxZ - paddingStart) / moduleStride);
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int mz = startZ; mz < endZ ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInFilterZ = blockPixelIdxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterZ * filterSize * filterSize + pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 1 * 32; i += 32) {
if (!true || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
const float* fLoad = true ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < 4*4; i+= 32*4/16) {
if ((4*4) % (32*4/16) == 0 || i + filtersLoadY < 4*4) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] += shFilters[c * 4 + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
__syncthreads();
}
}
}
}
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 4; c++) {
targets[c * 4 * imgPixels * numImages + i * 32] = prod[c][i];
}
}
}
}
|
13,892 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess !=err )
{
fprintf(stderr," CUDA Error: %s: %s. \n",msg, cudaGetErrorString(err));
exit(-1);
}
}
__global__ void dgemm(double *A, double *B, double *C, size_t N)
{
size_t myRow = blockIdx.y*blockDim.y + threadIdx.y;
size_t myCol = blockIdx.x*blockDim.x + threadIdx.x;
//if(myRow < N %% myCol < N)
C[myRow*N + myCol]=0;
for (size_t i = 0; i < N; i++)
C[myRow*N + myCol] += A[myRow*N +i] * B[i*N + myCol];
}
//pw: lo48cylrt08
int main(int argc, char** argv)
{
double *A, *B, *C;
double *dA, *dB, *dC;
size_t N = 2048;
A = (double*) malloc (sizeof(double)*N*N);
B = (double*) malloc (sizeof(double)*N*N);
C = (double*) malloc (sizeof(double)*N*N);
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++)
{
A[i*N + j] = sin(i);
B[i*N + j] = cos(j);
}
cudaSetDevice(0);
cudaMalloc(&dA, sizeof(double)*N*N);
checkCUDAError("Error allocating dA \n");
cudaMalloc(&dB, sizeof(double)*N*N);
checkCUDAError("Error allocating dB \n");
cudaMalloc(&dC, sizeof(double)*N*N);
checkCUDAError("Error allocating dC \n");
cudaMemcpy(dA,A, sizeof(double)*N*N,cudaMemcpyHostToDevice);checkCUDAError("Error coping A \n");
cudaMemcpy(dB,B, sizeof(double)*N*N,cudaMemcpyHostToDevice);checkCUDAError("Error coping B \n");
//cudaMemset(dC, size, 0);
auto startTime = std::chrono::system_clock::now();
dim3 threadsPerBlock(32,32);
dim3 blocksPerGrid(N/32,N/32);//depends on the size of the problem
dgemm<<< blocksPerGrid, threadsPerBlock >>>(dA,dB, dC,N);checkCUDAError("Error executing kernel \n");
cudaMemcpy(C,dC, sizeof(double)*N*N,cudaMemcpyDeviceToHost);checkCUDAError("Error coping C \n");
cudaDeviceSynchronize();
/*
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++)
{
C[i * N + j] = 0;
for (size_t k = 0; k < N; k++)
C[i * N + j] += A[i * N + k] * B[k * N + j];
}
*/
auto endTime = std::chrono::system_clock::now();
double checkSum = 0.0;
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++)
checkSum += C[i*N + j];
printf("[CPU] Checksum: %f - Elapsed Time: %fs\n", checkSum, std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
|
13,893 | #include "includes.h"
__global__ void recombine( unsigned int * p0 , unsigned int * p1 , unsigned int * off , unsigned int cols ) {
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int boffset = blockIdx.x * blockDim.x + tid;
unsigned int p = ((boffset < cols) ? p0[ boffset ] : 0 );
unsigned int q = ((boffset < cols) ? p1[ boffset ] : 0 );
unsigned int res = ((boffset < cols) ? off[ boffset ] : 0 );
__syncthreads();
res = (( p & ~res ) | ( q & res ));
__syncthreads();
if( boffset < cols ) {
off[ boffset ] = res;
}
} |
13,894 | #include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda_runtime.h>
__global__ void globalMem_reduce_kernel(float *d_out, float *d_in)
{
int ttid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) { // if threadIdx.x is on the left half
// d_in[ttid] += d_in[ttid + s];
atomicAdd(&d_in[ttid], d_in[ttid+s]);
}
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = d_in[ttid];
}
}
__global__ void sharedMem_reduce_kernel(float *d_out, float *d_in)
{
// shared data is allocated in the kernel call: 3rd argument
extern __shared__ float shared_data[];
int ttid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared memory from global memory
shared_data[tid] = d_in[ttid];
__syncthreads();
// reduction in shared memory
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_data[tid] += shared_data[tid+s];
}
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = shared_data[0];
}
}
void reduce(float *d_out, float *d_intermediate, float *d_in, int size,
bool useSharedMem)
{
// assumption 1: size is not greater than maxThreadsPerBlock**2
// assumption 2: size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
// int blocks = size % maxThreadsPerBlock ?
// (size / maxThreadsPerBlock + 1) : (size / maxThreadsPerBlock);
int blocks = size / maxThreadsPerBlock;
if (useSharedMem) {
sharedMem_reduce_kernel<<<blocks, threads, threads*sizeof(float)>>>(d_intermediate, d_in);
} else {
globalMem_reduce_kernel<<<blocks, threads>>>(d_intermediate, d_in);
}
threads = blocks;
blocks = 1;
if (useSharedMem) {
sharedMem_reduce_kernel<<<blocks, threads, threads*sizeof(float)>>>(d_out, d_intermediate);
} else {
globalMem_reduce_kernel<<<blocks, threads>>>(d_out, d_intermediate);
}
}
int main(int argc, char **argv)
{
// --- checking whether there is a device --- //
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
std::cerr << "No GPUs found" << std::endl;
exit(EXIT_FAILURE);
}
// --- get properties of device --- //
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProps;
if (cudaGetDeviceProperties(&deviceProps, dev) == 0) {
std::cout << "Using device:" << dev << "\n";
std::cout << deviceProps.name << "\n";
std::cout << "Global memory: " << deviceProps.totalGlobalMem << "\n";
std::cout << "Compute v:" << static_cast<int>(deviceProps.major) << "."
<< static_cast<int>(deviceProps.minor) << std::endl;
std::cout << "Clock:" << static_cast<int>(deviceProps.clockRate) << std::endl;
}
const int ARRAY_SIZE = 1 << 20;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
float sum = 0.0f;
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = -1.0f + static_cast<float>(random()) / (static_cast<float>(RAND_MAX)/2.0f);
sum += h_in[i];
}
float *d_in, *d_intermediate, *d_out;
cudaMalloc((void **)&d_in, ARRAY_BYTES);
cudaMalloc((void **)&d_intermediate, ARRAY_BYTES);
cudaMalloc((void **)&d_out, sizeof(float));
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
switch (whichKernel) {
case 0:
std::cout << "Global memory reduce" << "\n";
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false);
}
cudaEventRecord(stop, 0);
break;
case 1:
std::cout << "Shared memory reduce" << "\n";
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
}
cudaEventRecord(stop, 0);
break;
default:
std::cerr << "No kernel run!" << std::endl;
exit(EXIT_FAILURE);
}
// calculate elapsedTime
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
elapsed /= 100.0f;
float h_out;
cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Everage time elapsed:" << elapsed << std::endl;
std::cout << "Host result:" << sum << ", device result:" << h_out << std::endl;
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
}
|
13,895 | #include "includes.h"
__global__ void im2col_kernel(int n, float* data_im, int height, int width, int ksize_h, int ksize_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int height_col, int width_col, float* data_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
int w_out = index % width_col;
index /= width_col;
int h_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize_h * ksize_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
data_col += (channel_out * height_col + h_out) * width_col + w_out;
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize_h; ++i) {
for (int j = 0; j < ksize_w; ++j) {
int h = h_in + i * dilation_h;
int w = w_in + j * dilation_w;
*data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[i * dilation_h * width + j * dilation_w] : 0;
data_col += height_col * width_col;
}
}
}
} |
13,896 | extern "C"
__global__ void dnorm_kernel(float *vals, int N, float mu, float sigma)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N) {
float std = (vals[idx] - mu)/sigma;
float e = exp( - 0.5 * std * std);
vals[idx] = e / ( sigma * sqrt(2 * 3.141592653589793));
}
}
|
13,897 | #include "includes.h"
__global__ void expand_array_kernel(const float *src_gpu, float *dst_gpu, int current_size, int groups)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < current_size) {
for (int i = 0; i < groups; ++i) {
dst_gpu[index + i*current_size] = src_gpu[index];
}
}
} |
13,898 | #include "cuda_func.cuh"
void malloc_cuda_1d(int num_x, double *arr)
{
cudaMalloc((void**)&arr, num_x*sizeof(double));
}
void malloc_cuda_1i(int num_x, int *arr)
{
cudaMalloc((void**)&arr, num_x*sizeof(int));
}
void free_cuda_1d(double *arr)
{
cudaFree(arr);
}
void free_cuda_1i(int *arr)
{
cudaFree(arr);
}
|
13,899 | #include "includes.h"
__global__ void mapKex ( const int nwl, const float *r, int *kex ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
kex[i] = ( int ) truncf ( r[i] * ( 3 - 1 + 0.999999 ) );
}
} |
13,900 | #include "includes.h"
__global__ void divide_copy(double *dest, const double *src, int length, const double divisor)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
double factor = 1.0 / divisor;
while (tid < length) {
dest[tid] = src[tid] * factor;
tid += blockDim.x * gridDim.x;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.