serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
5,001 | #include "includes.h"
__global__ void cuda_dot(int N, double *a, double *b, double *c)
{
// __shared__ double localDot[threadsPerBlock]; /* Statically defined */
extern __shared__ double localDot[];
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int localIndex = threadIdx.x;
double localSum = 0;
while (ix < N)
{
localSum += a[ix] * b[ix]; /* Reduction is here */
ix += blockDim.x * gridDim.x;
}
/* Store sum computed by this thread */
localDot[localIndex] = localSum;
/* Wait for all threads to get to this point */
__syncthreads();
/* Every block should add up sum computed on
threads in the block */
int i = blockDim.x/2;
while (i != 0)
{
if (localIndex < i)
{
localDot[localIndex] += localDot[localIndex + i];
}
__syncthreads();
i /= 2;
}
/* Each block stores local dot product */
if (localIndex == 0)
c[blockIdx.x] = localDot[0];
} |
5,002 | __global__ void add2( double * v1, const double * v2 )
{
int idx = threadIdx.x;
v1[idx] += v2[idx];
} |
5,003 | #include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
__global__ void add(char * d_resbuffer , char * d_buffer, int * d_length)
{
int id = threadIdx.x ;
int start = id * (*d_length);
for(int i = 0 ; i<=(*d_length)-1;i++)
{
d_resbuffer[start] = d_buffer[i];
start ++ ;
}
}
int main(void)
{
char buffer[100] ="Hello";
char res[100] = "";
int length = strlen(buffer) ;
int duplicate = 4 ;
char * d_resbuffer ;
char * d_buffer ;
int * d_length ;
cudaMalloc((void **)&d_resbuffer,(length*duplicate+1) * sizeof(char));
cudaMalloc((void **)&d_buffer,(length+1)*sizeof(char));
cudaMalloc((void **)&d_length , sizeof(int));
cudaMemcpy(d_buffer,buffer,(length+1)*sizeof(char),cudaMemcpyHostToDevice);
cudaMemcpy(d_length , &length , sizeof(int),cudaMemcpyHostToDevice);
add<<<1,duplicate>>>(d_resbuffer,d_buffer,d_length);
cudaError_t error = cudaGetLastError();
if(error!= cudaSuccess)
{
printf("%s\n",cudaGetErrorString(error));
}
cudaMemcpy(res,d_resbuffer,(length*duplicate)*sizeof(char),cudaMemcpyDeviceToHost);
int location = length * duplicate ;
res[location]='\0';
printf("Result :%s",res);
} |
5,004 | #include <cuda_runtime.h>
#include <iostream>
class CUDAdem
{
public:
__device__ void add(int igdx)
{
printf("hello GPU = %d\n",igdx);
}
};
__global__ void add()
{
int igdx = threadIdx.x;
CUDAdem cdmo;
cdmo.add(igdx);
}
int main()
{
add<<<1,4>>>();
cudaDeviceReset();
printf("hello world!\n");
return 0;
}
|
5,005 | #include <iostream>
#include <string>
#include <stdio.h>
#include <cuda.h>
#include <fstream>
using namespace std;
__global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P,int width){
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < width)&&(Col < width)){
float Pvalue = 0;
for (int i = 0; i < width; ++i){
Pvalue += d_M[Row*width+i]*d_N[i*width+Col];
}
d_P[Row*width + Col] = Pvalue;
}
}
int matrixMulHost(float *h_M, float *h_N, float *h_P, int width){
int Pvalue;
for(int row = 0; row < width ; ++row){
for(int col = 0; col < width ; ++col){
Pvalue = 0;
for(int k = 0; k < width ; ++k){
Pvalue += h_M[row*width+k] * h_N[k*width+col];
}
h_P[row*width+col] = Pvalue;
}
}
return 0;
}
int initValues(float *data, int width){
for(int i = 0; i < width*width; i++)
data[i] = 2;
return 0;
}
int main(int argc, char const *argv[])
{
float *h_M, *h_N, *h_P,*h_P_d;
float *d_M, *d_N,*d_P;
std::string num = argv[1];
int width = std::stoi(num);
int size = width * width * sizeof(float);
clock_t start, end, startGPU, endGPU;
double cpu_time_used, gpu_time_used, aceleration;
for (int times = 0; times < 20; times++){
h_M = (float*)malloc(size);
h_N = (float*)malloc(size);
h_P = (float*)malloc(size);
h_P_d = (float*)malloc(size);
initValues(h_M, width);
initValues(h_N, width);
/////////Algoritmo Secuencial////////////////////////////////////////////
start = clock();
matrixMulHost(h_M, h_N, h_P, width);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo algoritmo secuencial: %.10f\n", cpu_time_used);
/////////Algoritmo Secuencial/////////////////////////////////////////////
cudaMalloc((void**)&d_M,size);
cudaMalloc((void**)&d_N,size);
cudaMalloc((void**)&d_P,size);
//////////////////////Algoritmo Paralelo///////////////////////////
startGPU = clock();
cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice);
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(width/float(blockSize)),1);
MatrixMulKernel<<<dimGrid,dimBlock>>>(d_M,d_N,d_P,width);
cudaDeviceSynchronize();
cudaMemcpy(h_P_d,d_P,size,cudaMemcpyDeviceToHost);
endGPU = clock();
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
aceleration = cpu_time_used/gpu_time_used;
printf("Tiempo algoritmo paralelo: %.10f\n", gpu_time_used);
printf("La aceleración obtenida es de %.10fX\n",aceleration);
std::string name = "TimesMult.txt"+num;
ofstream outfile(name,ios::binary | ios::app);
outfile << gpu_time_used<<" "<< cpu_time_used <<" "<< aceleration << "\n";
outfile.close();
free(h_M);
free(h_N);
free(h_P);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
}
return 0;
}
|
5,006 | #include <stdio.h>
__device__ void decipher(unsigned int, unsigned int*, unsigned int const*);
__global__ void decrypt_bytes(unsigned int *decrypted, unsigned int *encrypted, unsigned char *key)
{
//Get thread
const int tx = threadIdx.x + (blockIdx.x * blockDim.x);
unsigned int deciphered[2];
deciphered[0] = encrypted[0];
deciphered[1] = encrypted[1];
decipher(32, deciphered, (unsigned int*)key);
if (tx == 0)
{
decrypted[0] = deciphered[0] ^ (unsigned int)1;
decrypted[1] = deciphered[1] ^ (unsigned int)2;
}
//divide work on threads
int i = (tx + 1) * 2;
deciphered[0] = encrypted[i];
deciphered[1] = encrypted[i+1];
decipher(32, deciphered, (unsigned int*)key);
decrypted[i] = deciphered[0] ^ encrypted[i-2];
decrypted[i+1] = deciphered[1] ^ encrypted[i-1];
}
__global__ void reconstruct_secret(unsigned char *result, unsigned int *decrypted)
{
/*
decrypted: pointer to the decrypted data
result: pointer to where to store the unshuffled data
*/
//Get thread
const int tx = threadIdx.x + (blockIdx.x * blockDim.x);
//Divide work on each thread, max 10000 threads
if (tx < 10000)
{
unsigned int element = decrypted[tx];
result[(element >> 8) % 10000] = element & 0xff;
}
}
__device__ void decipher(unsigned int num_rounds, unsigned int v[2], unsigned int const key[4])
{
/*
num_rounds -- the number of iterations in the algorithm, 32 is reccomended
input_data -- the input data to use, 32 bits of the first 2 elements are used
key -- 128-bit key to use
*/
unsigned int i;
unsigned int v0=v[0], v1=v[1], delta=0x9E3779B9, sum=delta*num_rounds;
for (i=0; i < num_rounds; i++) {
v1 -= (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (sum + key[(sum>>11) & 3]);
sum -= delta;
v0 -= (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (sum + key[sum & 3]);
}
v[0]=v0; v[1]=v1;
}
|
5,007 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define DEVICE 0
#define HALO 1 // halo width along one direction when advancing to the next iteration
#define BENCH_PRINT
void run(int argc, char** argv);
int rows, cols;
int* data;
int** wall;
int* result;
#define M_SEED 9
int pyramid_height;
//#define BENCH_PRINT
void
init(int argc, char** argv)
{
if(argc==4){
cols = atoi(argv[1]);
rows = atoi(argv[2]);
pyramid_height=atoi(argv[3]);
}else{
printf("Usage: dynproc row_len col_len pyramid_height\n");
exit(0);
}
data = new int[rows*cols];
wall = new int*[rows];
for(int n=0; n<rows; n++)
wall[n]=data+cols*n;
result = new int[cols];
int seed = M_SEED;
srand(seed);
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
wall[i][j] = rand() % 10;
}
}
#ifdef BENCH_PRINT
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%d ",wall[i][j]) ;
}
printf("\n") ;
}
#endif
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
int iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
int cols,
int rows,
int startStep,
int border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
int bx = blockIdx.x;
int tx=threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkX = small_block_cols*bx-border;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
int W = tx-1;
int E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
int left = prev[W];
int up = prev[tx];
int right = prev[E];
int shortest = MIN(left, up);
shortest = MIN(shortest, right);
int index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \
int pyramid_height, int blockCols, int borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (int t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock>>>(
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
}
return dst;
}
int main(int argc, char** argv)
{
int num_devices;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) cudaSetDevice(DEVICE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
int borderCols = (pyramid_height)*HALO;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
int size = rows*cols;
cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols);
cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols);
cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols));
cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \
pyramid_height, blockCols, borderCols);
cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost);
#ifdef BENCH_PRINT
for (int i = 0; i < cols; i++)
printf("%d ",data[i]) ;
printf("\n") ;
for (int i = 0; i < cols; i++)
printf("%d ",result[i]) ;
printf("\n") ;
#endif
cudaFree(gpuWall);
cudaFree(gpuResult[0]);
cudaFree(gpuResult[1]);
delete [] data;
delete [] wall;
delete [] result;
}
|
5,008 | #include "includes.h"
__global__ void Sum(float * A, float *B, float *C, int size) {
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size) {
C[id] = A[id] + B[id];
}
} |
5,009 |
__global__ void thinEdgesGPU(int *mag, int *dir, int width, int height){
int y = blockIdx.y*blockDim.y + threadIdx.y + 1;
int x = blockIdx.x*blockDim.x + threadIdx.x + 1;
// Check whether thread is within image boundary
if (x > width-2 || y > height-2) return;
// Get gradient direction for current thread
int tDir = dir[y*width + x];
// Transform offsets so we can find adjacent pixels in direction of gradient mag
// 0 <= dir <= 3 3 2 1
// 0 P 0
// 1 2 3
int xOff = 1;
int yOff = 0;
if (tDir > 0){
xOff = tDir - 2;
yOff = 1;
}
int adjPixel1 = mag[ (y+yOff)*width + x + xOff ];
int curPixel = mag[ y*width + x ];
int adjPixel2 = mag[ (y-yOff)*width + x - xOff ];
if ( adjPixel1 > curPixel || adjPixel2 > curPixel){
mag[y*width + x] = 0;
}
}
|
5,010 | // System includes
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
// C = AB
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y>
__global__ void kernel_batch_matmul(
const float * __restrict__ matA,
const float * __restrict__ matB,
float * __restrict__ matC,
int dim0,
int dim1A, int dim2A,
int dim1B, int dim2B,
int dim1C, int dim2C){
extern __shared__ float smem[];
const unsigned int len_subA = BLOCK_SIZE_Y * dim2A, len_subB = BLOCK_SIZE_X * dim1B; //len of sub matrices of A and B.
const unsigned long
len_A = dim0*dim1A*dim2A,
len_B = dim0*dim1B*dim2B,
len_C = dim0*dim1C*dim2C;
const unsigned long
len_A_signleBatch = dim1A*dim2A,
len_B_signleBatch = dim1B*dim2B,
len_C_signleBatch = dim1C*dim2C;
const unsigned int BLOCKSIZE_P2 = BLOCK_SIZE_X*BLOCK_SIZE_Y;
//smemA = smem + 0;
//smemB = smem + len_subA;
// Block index
unsigned int bx = blockIdx.x; // mapped to the sub-matrices of output
unsigned int by = blockIdx.y; // mapped to the sub-matrices of output
unsigned int bz = blockIdx.z; // batch index
// Thread index
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int c_pos_x, c_pos_y;
c_pos_x = bx*BLOCK_SIZE_X + tx;
c_pos_y = by*BLOCK_SIZE_Y + ty;
unsigned long gidx1,gidx2;
unsigned int _d1,_d2;
//printf("## bx:%u, by:%u, tx:%u, ty:%u, c_pos_x:%u, c_pos_y:%u\n",bx,by,tx,ty,c_pos_x,c_pos_y);
unsigned long offsetA = (by * BLOCK_SIZE_Y) * dim2A;
unsigned long offsetB = (bx * BLOCK_SIZE_X); //first row (d1=0)
// Load sub matrices from global memory into shared memory
unsigned long idxA, idxB;
idxA = ty* BLOCK_SIZE_X + tx;
idxB = ty* BLOCK_SIZE_X + tx;
//printf("*** bx:%u, by:%u, tx:%u, ty:%u ,idxA:%ld, idxB:%ld\n",bx,by,tx,ty,idxA,idxB);
while(idxA < len_subA){//Block-stride loop
gidx1 = offsetA + idxA;
if(idxA < len_subA && gidx1 < len_A) {
smem[idxA] = matA[bz * len_A_signleBatch + gidx1];
/*printf("bx:%u, by:%u, tx:%u, ty:%u ,idxA:%ld, gidx1:%ld\n",bx,by,tx,ty,idxA,gidx1);*/
}else{
smem[idxA] = 0;
}
idxA += BLOCKSIZE_P2;
}
///TODO: It might be better to store transposed subMatB in shared memory to avoid shared memory read conflict.
/// But then we might get shared memory write conflict. (?)
while(idxB < len_subB ){//Block-stride loop
//gidx2 = offsetB + (bx*BLOCK_SIZE)*dim2B + (idxB % dim2B);
_d2 = idxB%BLOCK_SIZE_X;
_d1 = (idxB/BLOCK_SIZE_X);
gidx2 = offsetB + _d1*dim2B + _d2;
if(idxB < len_subB && _d1<dim1B && _d2<dim2B){
smem[len_subA+idxB] = matB[bz * len_B_signleBatch +gidx2];
/*printf("* bx:%u, by:%u ,tx:%u, ty:%u ,idxB:%ld, _d1:%d, _d2:%d, gidx2:%ld\n",bx,by,tx,ty,idxB,_d1,_d2,gidx2);*/
}else{
smem[len_subA+idxB] = 0;
}
idxB += BLOCKSIZE_P2;
}
__syncthreads();
// Multiply and add each result to produce output element of current thread in the thread block.
if(c_pos_x<dim2C && c_pos_y<dim1C){
float output_element = 0.0f;
//dim2A=dim1B is common equal dimension of 2 matrices --- block-stride loop
for (int k = 0; k < dim2A; k++) {
output_element += smem[ty*dim2A+k] * smem[len_subA+ k*BLOCK_SIZE_X + tx];
/*printf("###bz:%d, c_pos_x:%d, c_pos_y:%d, smem[%d]=%f, smem[%d]=%f\n",
bz,c_pos_x,c_pos_y,
ty*dim2A+k,smem[ty*dim2A+k],
len_subA+ k*BLOCK_SIZE+tx,smem[len_subA+ k*BLOCK_SIZE+tx]);*/
}
///TODO: Check matC index to not to exceed the len of matC!
matC[bz * len_C_signleBatch + c_pos_y*dim2C + c_pos_x] = output_element;
}
}
void batch_matmul(
const float * matA, //row-major device ptr (batch, hA, wA) == (dim0A, dim1A , *dim2A* )
const float * matB, //row-major device ptr (batch, hB, wB) == (dim0B, *dim1B* , dim2B )
float * matC, //row-major device ptr (batch, hB, wB) == (dim0B, dim1A , dim2B )
int dim0A, int dim1A, int dim2A,
int dim0B, int dim1B, int dim2B){
if(dim2A != dim1B){printf("ERR@batched_matmul: BAD SHAPE.\n"); return;}
if(dim0B != dim0A){printf("ERR@batched_matmul: BAD BATCH SIZES.\n"); return;}
const int BLOCK_DIM_X = 4;
const int BLOCK_DIM_Y = 4;
dim3 blocksize(BLOCK_DIM_X,BLOCK_DIM_Y,1);
dim3 gridsize(0,0,0);
gridsize.x = (dim2B + BLOCK_DIM_X-1)/BLOCK_DIM_X;
gridsize.y = (dim1A + BLOCK_DIM_Y-1)/BLOCK_DIM_Y;
gridsize.z = (dim0A);
unsigned long sharedmemsize = (BLOCK_DIM_Y*dim2A + BLOCK_DIM_X* dim1B)*sizeof(float);
//printf("@batched_matmul:\n");
//printf("\tBLOCK:(%d, %d)\n",blocksize.x,blocksize.y);
//printf("\t GRID:(%d, %d, %d)\n",gridsize.x,gridsize.y,gridsize.z);
//printf("\t SHARED: %d Bytes\n",sharedmemsize);
if(BLOCK_DIM_X==4 && BLOCK_DIM_Y==4){
kernel_batch_matmul<4,4> <<<gridsize, blocksize, sharedmemsize>>>(
matA,
matB,
matC,
dim0A,
dim1A, //hA
dim2A, //wA
dim1B, //hA
dim2B, //wA
dim1A,
dim2B);
CudaCheckError();
}else{
printf("ERR@batched_matmul: UNDEFINED BLOCK_DIM.\n"); return;
}
} |
5,011 | #include "includes.h"
__global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
5,012 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
int main(void)
{
// generate 32M random numbers serially
thrust::host_vector<int> h_vec(32 << 20);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device (846M keys per second on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
} |
5,013 | //#include<iostream>
//#include<cstring>
//#include<algorithm>
//#include<string>
//#include<cassert>
//#include<iomanip>
//using namespace std;
//
//#define MAX 100
//#define for(i,a,b) for(i=a;i<b; i++)
//
//string gram[MAX][MAX]; //to store entered grammar
//string dpr[MAX];
//int p, np; //np-> number of productions
//
//inline string concat(string a, string b) //concatenates unique non-terminals
//{
// int i;
// string r = a;
// for (i, 0, b.length())
// if (r.find(b[i]) > r.length())
// r += b[i];
// return (r);
//}
//
//inline void break_gram(string a) //seperates right hand side of entered grammar
//{
// int i;
// p = 0;
// while (a.length())
// {
// i = a.find("|");
// if (i>a.length())
// {
// dpr[p++] = a;
// a = "";
// }
// else
// {
// dpr[p++] = a.substr(0, i);
// a = a.substr(i + 1, a.length());
// }
// }
//}
//
//inline int lchomsky(string a) //checks if LHS of entered grammar is in CNF
//{
// if (a.length() == 1 && a[0] >= 'A' && a[0] <= 'Z')
// return 1;
// return 0;
//}
//
//inline int rchomsky(string a) //checks if RHS of grammar is in CNF
//{
// if (a.length() == 1 && a[0] >= 'a' && a[0] <= 'z')
// return 1;
// if (a.length() == 2 && a[0] >= 'A' && a[0] <= 'Z' && a[1] >= 'A' && a[1] <= 'Z')
// return 1;
// return 0;
//}
//
//inline string search_prod(string p) //returns a concatenated string of variables which can produce string p
//{
// int j, k;
// string r = "";
// for (j, 0, np)
// {
// k = 1;
// while (gram[j][k] != "")
// {
// if (gram[j][k] == p)
// {
// r = concat(r, gram[j][0]);
// }
// k++;
// }
// }
// return r;
//}
//
//inline string gen_comb(string a, string b) //creates every combination of variables from a and b . For eg: BA * AB = {BA, BB, AA, BB}
//{
// int i, j;
// string pri = a, re = "";
// for (i, 0, a.length())
// for (j, 0, b.length())
// {
// pri = "";
// pri = pri + a[i] + b[j];
// re = re + search_prod(pri); //searches if the generated productions can be created or not
// }
// return re;
//}
//
//int main()
//{
// int i, pt, j, l, k;
// string a, str, r, pr, start;
// cout << "\nEnter the start Variable ";
// cin >> start;
// cout << "\nNumber of productions ";
// cin >> np;
// for (i, 0, np)
// {
// cin >> a;
// pt = a.find("->");
// gram[i][0] = a.substr(0, pt);
// if (lchomsky(gram[i][0]) == 0)
// {
// cout << "\nGrammar not in Chomsky Form";
// abort();
// }
// a = a.substr(pt + 2, a.length());
// break_gram(a);
// for (j, 0, p)
// {
// gram[i][j + 1] = dpr[j];
// if (rchomsky(dpr[j]) == 0)
// {
// cout << "\nGrammar not in Chomsky Form";
// abort();
// }
// }
// }
// string matrix[MAX][MAX], st;
// cout << "\nEnter string to be checked : ";
// cin >> str;
// for (i, 0, str.length()) //Assigns values to principal diagonal of matrix
// {
// r = "";
// st = "";
// st += str[i];
// for (j, 0, np)
// {
// k = 1;
// while (gram[j][k] != "")
// {
// if (gram[j][k] == st)
// {
// r = concat(r, gram[j][0]);
// }
// k++;
// }
// }
// matrix[i][i] = r;
// }
// int ii, kk;
// for (k, 1, str.length()) //Assigns values to upper half of the matrix
// {
// for (j, k, str.length())
// {
// r = "";
// for (l, j - k, j)
// {
// pr = gen_comb(matrix[j - k][l], matrix[l + 1][j]);
// r = concat(r, pr);
// }
// matrix[j - k][j] = r;
// }
// }
//
// for (i, 0, str.length()) //Prints the matrix
// {
// k = 0;
// l = str.length() - i - 1;
// for (j, l, str.length())
// {
// cout << setw(5) << matrix[k++][j] << " ";
// }
// cout << endl;
// }
//
// int f = 0;
// for (i, 0, start.length())
// if (matrix[0][str.length() - 1].find(start[i]) <= matrix[0][str.length() - 1].length()) //Checks if last element of first row contains a Start variable
// {
// cout << "String can be generated\n";
// getchar();
//
// }
// cout << "String cannot be generated\n";
// getchar();
// return 0;
//} |
5,014 | #include "model.cuh"
float Model::train_batch(std::vector<Matrix>::iterator X_i, std::vector<Matrix>::iterator Y_i, const unsigned int batch_size, const float lr, const float momentum)
{
float loss = 0.0f;
for (unsigned int i = 0; i < batch_size; i++)
{
// create the inputs
Matrix tmp = *X_i;
// feedforward through the neural network
for (auto layer = layers.begin(); layer != layers.end(); ++layer)
{
(*layer)->feedforward_update(tmp);
tmp = (*layer)->neurons;
}
// calculate the loss
loss += loss_function.func(*Y_i, tmp);
// calculate the derivative for the loss function
// reuse temporary variable, as it is no longer needed
tmp = loss_function.deriv(*Y_i, tmp);
// now backpropogate through the layers
for (auto layer = layers.rbegin(); layer != layers.rend(); ++layer)
tmp = (*layer)->backpropogate(tmp);
++X_i; ++Y_i; // advance the iterators
}
for (auto layer = layers.rbegin(); layer != layers.rend(); ++layer)
(*layer)->gradient_update(lr, momentum);
return loss / (float) batch_size;
}
Model::Model(Loss_Function loss_function)
{
this->loss_function = loss_function;
}
void Model::operator+=(Layer* layer)
{
//if (layers.empty() or layers.back()->get_out_shape() == layer.get_in_shape())
layers.push_back(layer);
//else
//yeet "This layer's input shape is not compatible";
}
Matrix Model::feed (const Matrix& input) const
{
Matrix tmp = (*(layers.begin()))->feedforward(input);
// feedforward through the neural network
auto layer = layers.begin();
for (++layer; layer != layers.end(); ++layer)
tmp = (*layer)->feedforward(tmp);
return tmp;
} |
5,015 | /****************************************************************************80
Array element addition using CUDA on GPUs
Note: changes from the C++ / CPU-only file marked intentionally w/ "CUDA"
supposed to be *annoyingly* commented for (self-)educational purposes
"host" is assumed to be the CPU running the C program.
"device" is assumed to be the GPU running the kernel.
MIT License
Copyright (c) 2017 Marcelo Novaes
******************************************************************************/
#include <chrono>
#include <iostream>
#include <math.h>
#include <cuda_runtime.h>
/****************************************************************************80
PRINT_TIME calculates how many seconds since the start of the program
******************************************************************************/
using namespace std::chrono;
const system_clock::time_point start = system_clock::now();
float print_time()
{
// elapsed time
system_clock::time_point end = system_clock::now();
duration<double> elapsed_seconds = end - start;
std::time_t end_time = system_clock::to_time_t(end);
float elapsed = elapsed_seconds.count();
// print
printf("@ %06.3fs > ", elapsed);
return elapsed;
}
/****************************************************************************80
ADD_ARRAYS adds elements of two arrays on GPU
******************************************************************************/
// CUDA: kernel defined by __global__ declaration specifier, accessed by host & device
// see: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#kernels
__global__ void add_arrays(int n, float *x, float *y, float *z)
{
// loop over the array index adding elements *in parallel*
// CUDA: threads have an ID, reside within a block, reside within a grid
// see: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#thread-hierarchy
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
z[i] = x[i] + y[i];
}
}
/****************************************************************************80
MAIN is the main program for array element addition on GPU example
******************************************************************************/
int main(void)
{
print_time();
// set up array dimensions and byte size
printf("Setting up array dimension and size...\n");
int N = pow(10, 8); // 100M elements per array
size_t sizeN = N * sizeof(float);
print_time();
// allocate memory for arrays
// CUDA: this allocation is happening in the host, thus renamed as `h?`
printf("Allocating memory for arrays...\n");
float *hx = (float *)malloc(sizeN);
float *hy = (float *)malloc(sizeN);
float *hz = (float *)malloc(sizeN);
// CUDA: this allocation is happening in the device, thus renamed as `d?`
// CUDA: allocate Unified Memory to be bridged between the CPU and GPU
// see: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#heterogeneous-programming
// see: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-explicit-allocation
float *dx = NULL;
cudaMalloc(&dx, sizeN);
float *dy = NULL;
cudaMalloc(&dy, sizeN);
float *dz = NULL;
cudaMalloc(&dz, sizeN);
print_time();
// initialize x, y, z arrays
// CUDA: initializing in the host
printf("Initializing arrays...\n");
for (int i = 0; i < N; i++)
{
hx[i] = 2.0f;
hy[i] = 3.0f;
hz[i] = 0.0f;
//printf("i = %d, hx = %0.6f, hy = %0.6f, hz = %0.6f\n", i, hx[i], hy[i], hz[i]);
}
print_time();
// CUDA: copies the arrays from host to device
// CUDA: cudaMemcpy(from, to, size, direction);
printf("Copying arrays from host to device...\n");
cudaMemcpy(dx, hx, sizeN, cudaMemcpyHostToDevice);
cudaMemcpy(dy, hy, sizeN, cudaMemcpyHostToDevice);
cudaMemcpy(dz, hz, sizeN, cudaMemcpyHostToDevice);
print_time();
// add elements (on GPU)
// CUDA: run the kernel call, <<<...>>> notation defines # of CUDA blocks/threads
// e.g.: <<<16, 256>>> means 16 blocks of 256 threads
// this alone doesn't parallelize it, must make changes in the kernel function
// see: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#kernels
// see: http://users.wfu.edu/choss/CUDA/docs/Lecture%205.pdf
int nThreads = 256; // CUDA: Threads per block
int nBlocks = N / nThreads; // CUDA: Number of blocks on total grid
printf("Adding %d elements from both arrays...\n", N);
printf("Kernel using %d threads for each of %d blocks.\n", nThreads, nBlocks);
add_arrays<<<nBlocks, nThreads>>>(N, dx, dy, dz); // CUDA: kernel call<<<blocks, threads>>>(args);
// CUDA: wait the GPU to finish commands in all streams of all host threads
// see: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#explicit-synchronization
cudaDeviceSynchronize();
print_time();
// CUDA: copies the arrays from device back to host
// CUDA: cudaMemcpy(from, to, size, direction);
printf("Copying arrays from device back to host...\n");
cudaMemcpy(hz, dz, sizeN, cudaMemcpyDeviceToHost);
print_time();
// sum errors to check for problems
printf("Aggregating deltas over all elements to check for errors...\n");
float sumOfErrors = 0.0f;
for (int i = 0; i < N; i++)
{
sumOfErrors += fabs(hx[i] + hy[i] - hz[i]);
}
printf("Sum of errors: %.6f\n", sumOfErrors);
// print first and last z array contents
printf("hz[0] = %.6f\n", hz[0]);
printf("hz[N-1] = %.6f\n", hz[N-1]);
print_time();
// free mem when done
printf("Freeing memory...\n");
delete [] hx;
delete [] hy;
delete [] hz;
// CUDA: device memory can be allocated as linear memory or as CUDA arrays
// linear memory usually allocated using cudaMalloc(). freed using cudaFree()
// see: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#device-memory
cudaFree(dx);
cudaFree(dy);
cudaFree(dz);
// C'est fini
print_time();
printf("Done.\n");
return 0;
}
|
5,016 | #include<iostream>
#include<cstdlib>
#include<cmath>
#include<time.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10000000
#define MAX_ERR 1e-6
using namespace std;
__global__ void vector_add(float *out, float *a, float *b, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i<N){
out[i]=a[i]+b[i];
}
}
int main(){
float *a, *b, *out,*cpu_out;
float *d_a, *d_b, *d_out;
// Allocate host memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
cpu_out = (float*)malloc(sizeof(float) * N);
// Initialize host arrays
for(int i = 0; i < N; i++){
a[i] = i*1.0f;
b[i] = i*1.0f;
}
// Allocate device memory
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
// Transfer data from host to device memory
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// Executing kernel
int block_size = 256;
int grid_size = ((N + block_size) / block_size);
vector_add<<<grid_size,block_size>>>(d_out, d_a, d_b, N);
// Transfer data back to host memory
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
clock_t t=clock();
for(int i=0;i<N;i++){
cpu_out[i] = a[i]+b[i];
}
t=clock()-t;
cout<<"\nCPU Time Elapsed: "<<((double)t)<<"\n";
// Verification
for(int i = 0; i < N; i++){
assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
}
printf("PASSED\n");
// for(int i=0;i<N;i++)
// printf("%lf ",out[i]);
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
} |
5,017 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
#include<cuda.h>
#define INPUT_SIZE 100000000
#define PRIME_RANGE 1000000
#define BLOCK_SIZE 32
typedef unsigned long long int uint64_c;
void initializeInput(char* , int );
int generate_seed_primes(char*, int*, uint64_c);
void copy_seed_primes(uint64_c *,int *,int);
void print_primelist(uint64_c *, uint64_c);
void print_inputlist(uint64_c *input_list,uint64_c range);
void initializing_inputlist(uint64_c *input_list, uint64_c start,uint64_c range);
void calculatePrime(uint64_c* , uint64_c* , uint64_c ,uint64_c);
void appending_prime(uint64_c* input_list, uint64_c* prime_list, uint64_c range, uint64_c prev_number_of_primes, uint64_c number_of_primes);
uint64_c counting_primes(uint64_c*, uint64_c, uint64_c);
//KERNAL CODE GOES HERE!!
__global__ void prime_generator(uint64_c* d_input_list, uint64_c* d_prime_list, uint64_c* d_range,uint64_c* d_number_of_primes)
{
int p= blockIdx.x * blockDim.x + threadIdx.x;
int prime = d_prime_list[p];
for(uint64_c i=0;i<d_range[0];i++){
if(d_input_list[i] % prime ==0)
{
d_input_list[i]=0;
}
}
}
//KERNAL CODE ENDS HERE!!!
int main()
{
cudaSetDevice(1);
// This code is just to generate the seed prime numbers
int input_size=100;
char *input;
uint64_c n= 10 ;// seed prime list.
int *seed_primelist;
input=(char *)malloc(input_size*sizeof(char));
initializeInput(input, input_size);
seed_primelist=(int *)malloc(input_size*sizeof(int));
int num_of_seed = generate_seed_primes(input,seed_primelist,n);
uint64_c* input_list;
uint64_c* prime_list;
uint64_c number_of_primes= num_of_seed;
prime_list=(uint64_c *)malloc(number_of_primes*sizeof(uint64_c));
copy_seed_primes(prime_list,seed_primelist,num_of_seed);
uint64_c* d_input_list;
uint64_c* d_prime_list;
uint64_c* d_number_of_primes;
uint64_c* d_range;
for(int i=0;i<3;i++){
uint64_c start=n;
uint64_c end=pow(n,2);
printf("CALCULATING PRIMES FROM 0 - %llu\n",end);
uint64_c range=end-start;
input_list=(uint64_c *)malloc(range*sizeof(uint64_c));
initializing_inputlist(input_list,start,range);
if(cudaMalloc((void **)&d_input_list,range*sizeof(uint64_c))!=cudaSuccess)
{
printf("Error: 1\n");
}
if(cudaMemcpy(d_input_list,input_list,range*sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("copy Error: 1\n");
}
if(cudaMalloc((void **)&d_prime_list,number_of_primes*sizeof(uint64_c))!=cudaSuccess)
{
printf("Error: 2\n");
}
if(cudaMemcpy(d_prime_list,prime_list,number_of_primes*sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("copy Error: 2\n");
}
if(cudaMalloc((void **)&d_range,sizeof(uint64_c))!=cudaSuccess)
{
printf("Error: 3\n");
}
if(cudaMemcpy(d_range,&range,sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("copy Error: 3\n");
}
if(cudaMalloc((void **)&d_number_of_primes,sizeof(uint64_c))!=cudaSuccess)
{
printf("Error: 4\n");
}
if(cudaMemcpy(d_number_of_primes,&number_of_primes,sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("copy Error: 4\n");
}
// calculatePrime(input_list,prime_list,range,number_of_primes);
prime_generator<<<43,32>>>(d_input_list,d_prime_list,d_range,d_number_of_primes);
if(cudaMemcpy(input_list,d_input_list,range*sizeof(uint64_c),cudaMemcpyDeviceToHost)!=cudaSuccess)
{
printf("copy Host Error: 1\n");
exit(0);
}
//print_inputlist(input_list,range);
uint64_c previous_number_of_primes= number_of_primes;
number_of_primes = counting_primes(input_list, range, number_of_primes)+previous_number_of_primes;
printf("THE NUMBER OF PRIMES ARE: %llu\n",number_of_primes);
prime_list=(uint64_c *)realloc(prime_list,number_of_primes*sizeof(uint64_c));
appending_prime(input_list, prime_list, range, previous_number_of_primes, number_of_primes);
print_primelist(prime_list,number_of_primes);
n=pow(n,2);
printf("******************************************\n\n");
}
return 0;
}
int generate_seed_primes(char *input,int *primelist, uint64_c n)
{
for (int p=2; p*p<=n; p++)
{
if (input[p] == 'P')
{
for (int i=p*2; i<=n; i += p)
input[i] = 'N';
}
}
int i=0;
for (int p=2; p<=n; p++){
if (input[p]=='P')
{
primelist[i]=p;
i++;
}
}
return i;
}
void initializeInput(char *input, int input_size)
{
for(int i=0;i<input_size;i++)
{
input[i]='P';
}
}
void initializing_inputlist(uint64_c *input_list, uint64_c start,uint64_c range){
for(uint64_c i=0;i<range;i++)
{
input_list[i]=start+i;
}
}
void print_inputlist(uint64_c *input_list,uint64_c range)
{
for(uint64_c i=0;i<range;i++)
{
printf("%llu\t--->\t%llu\n", i,input_list[i]);
}
}
void print_primelist(uint64_c *prime_list,uint64_c number_of_primes)
{
for(uint64_c i=0;i<number_of_primes;i++)
{
printf("%llu\n",prime_list[i]);
}
}
void copy_seed_primes(uint64_c *prime_list,int * seed_primelist,int num_of_seed)
{
for(int i=0;i<num_of_seed;i++)
{
prime_list[i]=seed_primelist[i];
}
}
void calculatePrime(uint64_c* input_list, uint64_c* prime_list, uint64_c range,uint64_c number_of_primes)
{
// print_primelist(prime_list,number_of_primes);
for(uint64_c i=0;i<range;i++)
{
for(uint64_c j=0;j<number_of_primes;j++){
if(input_list[i] % prime_list[j]==0)
{
input_list[i]=0;
}
}
}
}
void appending_prime(uint64_c* input_list, uint64_c* prime_list, uint64_c range, uint64_c prev_number_of_primes, uint64_c number_of_primes)
{
for(uint64_c i=0;i<range;i++)
{
if(input_list[i]>0){
//printf("XXXXXXXXX>>> %llu\n", input_list[i]);
prime_list[prev_number_of_primes]=input_list[i];
prev_number_of_primes++;
}
}
// printf("the number he he ha ha... %llu\n",prev_number_of_primes);
//exit(0);
}
uint64_c counting_primes(uint64_c* input_list,uint64_c range,uint64_c number_of_primes)
{
int prime=0;
for(uint64_c i=0;i<range;i++)
{
if(input_list[i]>0)
{
prime++;
}
}
return prime;
}
|
5,018 |
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/scan.h>
#include <thrust/device_vector.h>
#include <stdio.h>
__global__ void voxelOccupancy(int* occupancy, int granularity) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
// Detect out of bounds
if (x >= granularity || y >= granularity || z >= granularity) {
return;
}
// Determine if this thread's block is solid
int base = z * granularity * granularity + y * granularity + x;
int solid = 1 - (threadIdx.y % 2);
if (threadIdx.x % 2 == threadIdx.z % 2) {
solid = threadIdx.y % 2;
}
occupancy[base] = solid;
}
__global__ void compactVoxels(int* compact, int* occupancy, int* scanned, int granularity) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
// Detect out of bounds
if (x >= granularity || y >= granularity || z >= granularity) {
return;
}
// Resolve elements of the compacted array
int base = z * granularity * granularity + y * granularity + x;
if (occupancy[base] == 1) {
compact[scanned[base]] = base;
}
}
__global__ void generateVoxels(int* dCompact, float3* pos, float3* norm, int granularity, int maxVert) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int i = z * granularity * granularity + y * granularity + x;
// Detect out of bounds
if (x >= granularity || y >= granularity || z >= granularity || i > maxVert) {
return;
}
// Compute relative spatial locations of this voxel
int spatialIndex = dCompact[i];
int relz = spatialIndex / (granularity * granularity);
int rely = (spatialIndex - relz * (granularity * granularity)) / granularity;
int relx = spatialIndex - relz * (granularity * granularity) - rely * granularity;
float length = 1.0f / granularity;
float half = 0.5f * granularity;
float3 rel = { (relx - (half - 0.5f)) * 2.0f * length,
(rely - (half - 0.5f)) * 2.0f * length,
(relz - (half - 0.5f)) * 2.0f * length, };
// Hardcoded cube data (should be replaced using instancing)
int base = 36 * i;
pos[base] = { -length, -length, -length };
pos[base + 1] = { -length, -length, length };
pos[base + 2] = { -length, length, length };
pos[base + 3] = { length, length, -length };
pos[base + 4] = { -length, -length, -length };
pos[base + 5] = { -length, length, -length };
pos[base + 6] = { length, -length, length };
pos[base + 7] = { -length, -length, -length };
pos[base + 8] = { length, -length, -length };
pos[base + 9] = { length, length, -length };
pos[base + 10] = { length, -length, -length };
pos[base + 11] = { -length, -length, -length };
pos[base + 12] = { -length, -length, -length };
pos[base + 13] = { -length, length, length };
pos[base + 14] = { -length, length, -length };
pos[base + 15] = { length, -length, length };
pos[base + 16] = { -length, -length, length };
pos[base + 17] = { -length, -length, -length };
pos[base + 18] = { -length, length, length };
pos[base + 19] = { -length, -length, length };
pos[base + 20] = { length, -length, length };
pos[base + 21] = { length, length, length };
pos[base + 22] = { length, -length, -length };
pos[base + 23] = { length, length, -length };
pos[base + 24] = { length, -length, -length };
pos[base + 25] = { length, length, length };
pos[base + 26] = { length, -length, length };
pos[base + 27] = { length, length, length };
pos[base + 28] = { length, length, -length };
pos[base + 29] = { -length, length, -length };
pos[base + 30] = { length, length, length };
pos[base + 31] = { -length, length, -length };
pos[base + 32] = { -length, length, length };
pos[base + 33] = { length, length, length };
pos[base + 34] = { -length, length, length };
pos[base + 35] = { length, -length, length };
norm[base] = { -1, 0, 0 };
norm[base + 1] = { -1, 0, 0 };
norm[base + 2] = { -1, 0, 0 };
norm[base + 3] = { 0, 0, -1 };
norm[base + 4] = { 0, 0, -1 };
norm[base + 5] = { 0, 0, -1 };
norm[base + 6] = { 0, -1, 0 };
norm[base + 7] = { 0, -1, 0 };
norm[base + 8] = { 0, -1, 0 };
norm[base + 9] = { 0, 0, -1 };
norm[base + 10] = { 0, 0, -1 };
norm[base + 11] = { 0, 0, -1 };
norm[base + 12] = { -1, 0, 0 };
norm[base + 13] = { -1, 0, 0 };
norm[base + 14] = { -1, 0, 0 };
norm[base + 15] = { 0, -1, 0 };
norm[base + 16] = { 0, -1, 0 };
norm[base + 17] = { 0, -1, 0 };
norm[base + 18] = { 0, 0, 1 };
norm[base + 19] = { 0, 0, 1 };
norm[base + 20] = { 0, 0, 1 };
norm[base + 21] = { 1, 0, 0 };
norm[base + 22] = { 1, 0, 0 };
norm[base + 23] = { 1, 0, 0 };
norm[base + 24] = { 1, 0, 0 };
norm[base + 25] = { 1, 0, 0 };
norm[base + 26] = { 1, 0, 0 };
norm[base + 27] = { 0, 1, 0 };
norm[base + 28] = { 0, 1, 0 };
norm[base + 29] = { 0, 1, 0 };
norm[base + 30] = { 0, 1, 0 };
norm[base + 31] = { 0, 1, 0 };
norm[base + 32] = { 0, 1, 0 };
norm[base + 33] = { 0, 0, 1 };
norm[base + 34] = { 0, 0, 1 };
norm[base + 35] = { 0, 0, 1 };
for (int j = 0; j < 36; j++) {
pos[base + j].x += rel.x;
pos[base + j].y += rel.y;
pos[base + j].z += rel.z;
}
}
void launchVoxelOccupancy(dim3 grid, dim3 block, int *occupancy, int granularity) {
voxelOccupancy<<<grid, block>>>(occupancy, granularity);
}
void launchOccupancyScan(int numVoxels, int* scanned, int* occupancy) {
thrust::exclusive_scan(thrust::device_ptr<int>(occupancy),
thrust::device_ptr<int>(occupancy + numVoxels),
thrust::device_ptr<int>(scanned));
}
void launchCompactVoxels(dim3 grid, dim3 block, int* dCompact, int* dOccupancy, int* dScanned, int granularity) {
compactVoxels<<<grid, block>>>(dCompact, dOccupancy, dScanned, granularity);
}
void launchGenerateVoxels(dim3 grid, dim3 block, int* dCompact, float3* dVoxels, float3* dNormals, int granularity, int maxVert) {
generateVoxels<<<grid, block>>>(dCompact, dVoxels, dNormals, granularity, maxVert);
} |
5,019 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKSIZE 256
__global__ void MatrixAddI(int *matrix1, int *matrix2, int *matrix3, int m, int n)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < m*n)
{
matrix3[x] = matrix1[x] + matrix2[x];
}
}
__global__ void MatrixAddF(float *matrix1, float *matrix2, float *matrix3, int m, int n)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < m*n)
{
matrix3[x] = matrix1[x] + matrix2[x];
}
}
void read_imatrix(char *filename, int *m, int *n, int **values)
{
FILE* name;
int i, j, k;
int t1, t2, t3;
name = fopen(filename, "r+");
if(name != NULL)
{
k = 0;
fscanf(name, "%d %d\n", &t1, &t2);
*m = t1;
*n = t2;
*values = (int *)calloc(t1*t2, sizeof(int));
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
fscanf(name, "%d,", &t3);
*(*values+k) = t3;
k++;
}
else
{
fscanf(name, "%d\n", &t3);
*(*values+k) = t3;
k++;
}
}
}
fclose(name);
}
else
{
printf("File read failed\n");
exit(1);
}
}
void read_fmatrix(char *filename, int *m, int *n, float **values)
{
FILE* name;
int i, j, k;
int t1, t2;
float t3;
name = fopen(filename, "r+");
if(name != NULL)
{
k = 0;
fscanf(name, "%d %d\n", &t1, &t2);
*m = t1;
*n = t2;
*values = (float *)calloc(t1*t2, sizeof(float));
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
fscanf(name, "%f,", &t3);
*(*values+k) = t3;
k++;
}
else
{
fscanf(name, "%f\n", &t3);
*(*values+k) = t3;
k++;
}
}
}
fclose(name);
}
else
{
printf("File read failed\n");
exit(1);
}
}
void write_imatrix(char *filename, int *m, int *n, int **values)
{
FILE* name;
int i, j, k;
int t1, t2, t3;
name = fopen(filename, "w+");
if(name != NULL)
{
k = 0;
t1 = *m;
t2 = *n;
fprintf(name, "%d %d\n", t1, t2);
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
t3 = *(*values+k);
fprintf(name, "%d,", t3);
k++;
}
else
{
t3 = *(*values+k);
fprintf(name, "%d\n", t3);
k++;
}
}
}
fclose(name);
}
else
{
printf("File write failed\n");
exit(1);
}
}
void write_fmatrix(char *filename, int *m, int *n, float **values)
{
FILE* name;
int i, j, k;
int t1, t2;
float t3;
name = fopen(filename, "w+");
if(name != NULL)
{
k = 0;
t1 = *m;
t2 = *n;
fprintf(name, "%d %d\n", t1, t2);
for(i = 1; i <= t1; i++)
{
for(j = 1; j <= t2; j++)
{
if(j < t2)
{
t3 = *(*values+k);
fprintf(name, "%f,", t3);
k++;
}
else
{
t3 = *(*values+k);
fprintf(name, "%f\n", t3);
k++;
}
}
}
fclose(name);
}
else
{
printf("File write failed\n");
exit(1);
}
}
void matrix_check(int m1, int n1, int m2, int n2)
{
if ((m1-m2)+(n1-n2) != 0)
{
printf("Matrix dimensions must be PxQ and PxQ respectively\n");
exit(1);
}
}
int main(int argc, char *argv[])
{
int m1, n1, m2, n2;
if (argc != 5)
{
printf("Usage: ./matrix-addition matrix1.mat matrix2.mat matrix3.mat float/int \n");
exit(1);
}
if (strcmp(argv[4], "float") == 0)
{
float *hostmatrix1, *hostmatrix2, *hostmatrix3;
float *devicematrix1, *devicematrix2, *devicematrix3;
int GRIDSIZE;
read_fmatrix(argv[1], &m1, &n1, &hostmatrix1);
read_fmatrix(argv[2], &m2, &n2, &hostmatrix2);
matrix_check(m1, n1, m2, n2);
size_t matrix_size = m1*n1*sizeof(float);
hostmatrix3 = (float *)calloc(matrix_size, sizeof(float));
cudaMalloc(&devicematrix1, matrix_size);
cudaMalloc(&devicematrix2, matrix_size);
cudaMalloc(&devicematrix3, matrix_size);
cudaMemcpy(devicematrix1, hostmatrix1, matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(devicematrix2, hostmatrix2, matrix_size, cudaMemcpyHostToDevice);
GRIDSIZE = (int)ceil((float)(m1*n1)/BLOCKSIZE);
dim3 dimGrid(GRIDSIZE, 1, 1);
dim3 dimBlock(BLOCKSIZE, 1, 1);
MatrixAddF <<< dimGrid, dimBlock >>> (devicematrix1, devicematrix2, devicematrix3, m1, n1);
cudaMemcpy(hostmatrix3, devicematrix3, matrix_size, cudaMemcpyDeviceToHost);
write_fmatrix(argv[3], &m1, &n1, &hostmatrix3);
cudaFree(devicematrix1);
cudaFree(devicematrix2);
cudaFree(devicematrix3);
free(hostmatrix1);
free(hostmatrix2);
free(hostmatrix3);
}
if (strcmp(argv[4], "int") == 0)
{
int *hostmatrix1, *hostmatrix2, *hostmatrix3;
int *devicematrix1, *devicematrix2, *devicematrix3;
int GRIDSIZE;
read_imatrix(argv[1], &m1, &n1, &hostmatrix1);
read_imatrix(argv[2], &m2, &n2, &hostmatrix2);
matrix_check(m1, n1, m2, n2);
size_t matrix_size = m1*n1*sizeof(int);
hostmatrix3 = (int *)calloc(m1*n1, sizeof(int));
cudaMalloc(&devicematrix1, matrix_size);
cudaMalloc(&devicematrix2, matrix_size);
cudaMalloc(&devicematrix3, matrix_size);
cudaMemcpy(devicematrix1, hostmatrix1, matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(devicematrix2, hostmatrix2, matrix_size, cudaMemcpyHostToDevice);
GRIDSIZE = (int)ceil((float)(m1*n1)/BLOCKSIZE);
dim3 dimGrid(GRIDSIZE, 1, 1);
dim3 dimBlock(BLOCKSIZE, 1, 1);
MatrixAddI <<< dimGrid, dimBlock >>> (devicematrix1, devicematrix2, devicematrix3, m1, n1);
cudaMemcpy(hostmatrix3, devicematrix3, matrix_size, cudaMemcpyDeviceToHost);
write_imatrix(argv[3], &m1, &n1, &hostmatrix3);
cudaFree(devicematrix1);
cudaFree(devicematrix2);
cudaFree(devicematrix3);
free(hostmatrix1);
free(hostmatrix2);
free(hostmatrix3);
}
return 0;
}
|
5,020 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void print_threadIds_blockIds_gridDim()
{
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d,\
blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d,\
gridDim.x: %d, gridDim.y: %d, gridDim.z: %d \n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
int nx, ny, nz;
nx = 4;
ny = 4;
nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx / block.x, ny / block.y, nz / block.z);
print_threadIds_blockIds_gridDim<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
5,021 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
__global__ void RankSortKernel(float* DataIn, float* DataOut, int* rank, int size)
{
// Retrieve our coordinates in the block
int tx = (blockIdx.x * 512) + threadIdx.x;
rank[tx] = 0;
if(tx < size)
{
for(int i=0;i<size;i++)
{
if(DataIn[tx]>=DataIn[i])
{
rank[tx]++;
}
}
DataOut[(rank[tx]-1)] = DataIn[tx];
}
}
__global__ void OddEvenSortKernel(float* Array, int size, bool Odd_Phase)
{
int tx = (blockIdx.x * 512) + threadIdx.x;
float temp;
int index = 2*tx;
if(tx < (size/2))
{
if(Odd_Phase == false)
{
if((index+1) < size)
{
if(Array[index]>Array[index+1])
{
temp = Array[index];
Array[index] = Array[index+1];
Array[index+1] = temp;
}
}
}
else
{
if((index+2) < size)
{
if(Array[index+1]>Array[index+2])
{
temp = Array[index+1];
Array[index+1] = Array[index+2];
Array[index+2] = temp;
}
}
}
}
}
bool RankSortGPU( float* InputArray, float* SortedArray, int size)
{
int blocksize, gridsize;
// Error return value
cudaError_t status;
// Number of bytes
int bytes = size * sizeof(float);
// Pointers to the device arrays
float *DataIn, *DataOut;
int *rank;
int bytes1 = size * sizeof(float);
// Allocate memory on the device
cudaMalloc((void**) &DataIn, bytes);
cudaMalloc((void**) &DataOut, bytes);
cudaMalloc((void**) &rank, bytes1);
// Copy the host input data to the device
cudaMemcpy(DataIn, InputArray, bytes, cudaMemcpyHostToDevice);
// Specify the size of the grid and the size of the block
dim3 dimBlock(512, 1);
dim3 dimGrid((int)ceil((float)size/512), 1);
// Launch the kernel on a size-by-size block of threads
RankSortKernel<<<dimGrid, dimBlock>>>(DataIn, DataOut, rank, size);
// Wait for completion
cudaThreadSynchronize();
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess)
{
std::cout << "Kernel failed 1: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(DataIn);
cudaFree(DataOut);
return false;
}
// Retrieve the result matrix
cudaMemcpy(SortedArray, DataOut, bytes, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(DataIn);
cudaFree(DataOut);
// Success
return true;
}
bool OddEvenSortGPU( float* InputArray, float* SortedArray, int size)
{
int blocksize, gridsize;
// Error return value
cudaError_t status;
// Number of bytes
int bytes = size * sizeof(float);
// Pointers to the device arrays
float *Array;
bool Odd_Phase;
// Allocate memory on the device
cudaMalloc((void**) &Array, bytes);
// Copy the host input data to the device
cudaMemcpy(Array, InputArray, bytes, cudaMemcpyHostToDevice);
int new_size = size/2;
// Specify the size of the grid and the size of the block
dim3 dimBlock(512, 1);
dim3 dimGrid((int)ceil((float)new_size/512), 1);
for(int i=0;i<size;i++)
{
//even phase
Odd_Phase = false;
// Launch the kernel on a size-by-size block of threads
OddEvenSortKernel<<<dimGrid, dimBlock>>>(Array, size, Odd_Phase);
// Wait for completion
cudaThreadSynchronize();
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess)
{
std::cout << "Kernel failed 2: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(Array);
return false;
}
//odd phase
Odd_Phase = true;
// Launch the kernel on a size-by-size block of threads
OddEvenSortKernel<<<dimGrid, dimBlock>>>(Array, size, Odd_Phase);
// Wait for completion
cudaThreadSynchronize();
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess)
{
std::cout << "Kernel failed 3: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(Array);
return false;
}
}
// Retrieve the result matrix
cudaMemcpy(SortedArray, Array, bytes, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Array);
// Success
return true;
}
|
5,022 |
#include "CNextStateLookupTable.cuh"
#include "CStateLookupTable.cuh"
CNextStateLookupTable::CNextStateLookupTable(unsigned int const p_cnK) :
CStateLookupTable(p_cnK
#ifdef _USE_CUDA_
, LookupTableType_Next // Set table type
#endif
)
{
}
CNextStateLookupTable::~CNextStateLookupTable(void)
{
}
unsigned int CNextStateLookupTable::Shift(unsigned int const p_cnState, unsigned int const p_cnInput) const
{
// Shift right once and put the input in the MSB
return (p_cnState >> 1) + ((p_cnInput == 0) ? (0) : (1 << (m_cnMemory - 1)));
}
CNextStateLookupTable* CNextStateLookupTable::Create(unsigned int const p_cnK)
{
CNextStateLookupTable* l_pcInstance = new CNextStateLookupTable(p_cnK);
l_pcInstance->InitializeStateLookupTable();
return l_pcInstance;
}
|
5,023 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void convolution_1D(float *N , float *M , float *P , int Mask_width,int width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
float pvalue = 0.0;
int N_start_point = i - ((Mask_width)/2);
for(int j =0;j<Mask_width;j++)
{
if(((N_start_point+j)>=0)&& ((N_start_point + j)<width))
{
pvalue += N[N_start_point + j] * M[j];
}
}
P[i] = pvalue;
}
int main()
{
float *a , *m , *t;
float *d_a, *d_m, *d_t;
int width , mask_width;
printf("Enter the size of array \n");
scanf("%d",&width);
a = (float*)malloc(sizeof(float)*width);
t = (float*)malloc(sizeof(float)*width);
printf("Enter the array \n");
int i = 0;
for(i = 0;i<width;i++)
{
scanf("%f",&a[i]);
}
printf("Enter the size of mask \n");
scanf("%d",&mask_width);
m = (float*)malloc(sizeof(float)*mask_width);
printf("Enter the mask \n");
for(i = 0;i<mask_width;i++)
{
scanf("%f",&m[i]);
}
int size1 = sizeof(float)*width;
int size2 = sizeof(float)*mask_width;
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_m,size2);
cudaMalloc((void**)&d_t,size1);
cudaMemcpy(d_a,a,size1,cudaMemcpyHostToDevice);
cudaMemcpy(d_m,m,size2,cudaMemcpyHostToDevice);
dim3 dimGrid((width-1)/mask_width + 1,1,1);
dim3 dimBlock(mask_width,1,1);
convolution_1D<<<dimGrid,dimBlock>>>(d_a,d_m,d_t,mask_width,width);
cudaMemcpy(t,d_t,size1,cudaMemcpyDeviceToHost);
printf("The result array is \n");
for(int i = 0;i<width;i++)
{
printf("%f ",t[i]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_m);
cudaFree(d_t);
} |
5,024 | #include <iostream>
#include <fstream>
#include <stdlib.h>
#include <cstring>
#include <limits> // radi definiranja beskonačnosti
#include <ctime> // radi mjerenja vremena izvršavanja
#include <cmath> // radi "strop" funkcije
using namespace std;
/* Definiramo beskonačnost kao najveći mogući integer broj. */
#define infty std::numeric_limits<int>::max()
void printMatrix (int* G, unsigned int dim) {
cout << "\r\n";
for (int i = 0; i < dim*dim; i++) {
if (G[i] < infty) {
cout << G[i] << "\t";
}
else {
cout << "∞" << "\t";
}
/* Ako je ispisao sve za jedan vrh, prijeđi na sljedeći u novi redak. */
if ((i+1)%dim == 0) {
cout << "\r\n";
}
}
}
/* Kernel za device koji paralelizira unutarnje dvije for petlje Floyd-Warshall algoritma. */
__global__ void FW_Cuda(int k, int* D, int* PI, unsigned int dim) {
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < dim && j < dim && D[i*dim+k] < INT_MAX && D[k*dim+j] < INT_MAX) {
if (D[i*dim+j] > D[i*dim+k] + D[k*dim+j]) {
D[i*dim+j] = D[i*dim+k] + D[k*dim+j];
PI[i*dim+j] = PI[k*dim+j];
}
}
}
void Floyd_Warshall_Cuda (int* W, int* D, int* PI, unsigned int dim) {
unsigned int n = dim*dim;
/* Error varijabla za handleanje CUDA errora. */
cudaError_t err = cudaSuccess;
/* Alociranje device varijabli matrica D i PI. */
int* d_D = NULL;
err = cudaMalloc((void**) &d_D, n*sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno alociranje matrice D (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int* d_PI = NULL;
err = cudaMalloc((void**) &d_PI, n*sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno alociranje matrice PI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Kopiranje podataka iz host matrica u device. */
err = cudaMemcpy(d_D, D, n*sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno kopiranje matrice D iz hosta u device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_PI, PI, n*sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno kopiranje matrice PI iz hosta u device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Pozivanje CUDA kernela. */
int blockDim = 32; /* Ukoliko je dimenzija bloka 32, imamo 1024 threadova po bloku. */
int gridDim = ceil((float)dim/(float)blockDim); /* Računa se kolika mora biti dimenzija grida ovisno o dimenziji blokova. */
cout << "CUDA kernel se pokreće sa " << gridDim*gridDim << " blokova i " << blockDim*blockDim << " threadova po bloku.\r\n";
/* Vanjsku petlju Floyd-Warshall algoritma vrtimo na CPU, unutarnje dvije paraleliziramo. */
for (int k = 0; k < dim; k++) {
FW_Cuda<<<dim3(gridDim, gridDim, 1), dim3(blockDim, blockDim, 1)>>> (k, d_D, d_PI, dim);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno pokrenuta kernel metoda (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Sinkronizacija threadova kako bi se završila k-ta iteracija, te kako bi se prešlo na (k+1). iteraciju. */
cudaThreadSynchronize();
}
/* Kopiranje podataka iz device matrica u host. */
err = cudaMemcpy(D, d_D, n*sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno kopiranje matrice D iz devicea u host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(PI, d_PI, n*sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno kopiranje matrice PI iz devicea u host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Dealociranje device varijabli matrica D i PI. */
err = cudaFree(d_D);
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno dealociranje matrice D (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_PI);
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno dealociranje matrice PI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Reset CUDA devicea i završavanje CUDA Floyd-Warshalla. */
err = cudaDeviceReset();
if (err != cudaSuccess) {
fprintf(stderr, "Neuspješno resetiranje devicea (završavanje sa CUDA FW, priprema za sljedeće pokretanje)! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Metoda koja rekonstruira težinu najkraćeg puta za dani par vrhova koristeći matricu
prethodnika PI i matricu inicijalnih težina W. */
int getPath (int* W, int* PI, int i, int j, unsigned int dim) {
if (i == j) {
return 0;
}
else if (PI[i*dim+j] == -1) {
return infty;
}
else {
int recursivePath = getPath(W, PI, i, PI[i*dim+j], dim);
if (recursivePath < infty) {
return recursivePath + W[PI[i*dim+j]*dim+j];
}
else {
return infty;
}
}
}
/* Za svaki par vrhova pokreće getPath metodu koja rekonstruira težinu najkraćeg puta
između njih koristeći matricu prethodnika PI. Tu težinu onda uspoređuje sa dobivenom težinom
za isti par vrhova u matrici najkraćih putova D. */
bool checkSolutionCorrectness (int* W, int* D, int* PI, unsigned int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if (getPath(W, PI, i, j, dim) != D[i*dim+j]) {
return false;
}
}
}
return true;
}
int main() {
/*
V - broj vrhova
E - broj bridova
u - prvi vrh pri učitavanju grafa iz datoteke
v - drugi vrh pri učitavanju grafa iz datoteke
w - težina između v1 i v2 pri učitavanju grafa iz datoteke
*/
unsigned int V, E;
int u, v, w;
ifstream inputGraphFile; inputGraphFile.open("graphFile.txt");
ofstream outputFile; outputFile.open("output_cuda.txt");
inputGraphFile >> V >> E;
cout << "V = " << V << ", E = " << E << "\r\n";
unsigned int n = V*V;
/* Inicijalizacija grafova u memoriji. */
int* W = (int*)malloc(n*sizeof(int));
int* D = (int*)malloc(n*sizeof(int));
int* PI = (int*)malloc(n*sizeof(int));
/* Postavljanje inicijalnih vrijednosti za matricu prethodnika PI(0),
matricu težina W i matricu najkraćih putova D(0). */
fill_n(W, n, infty);
fill_n(PI, n, -1);
for (int i = 0; i < E; i++) {
inputGraphFile >> u >> v >> w;
//cout << u << " <-- " << w << " --> " << v << "\r\n";
W[u*V+v] = w;
if (u != v) {
PI[u*V+v] = u;
}
}
for (int i = 0; i < V; i++) {
W[i*V+i] = 0;
}
/* D(0) = W na početku. */
memcpy (D, W, n*sizeof(int));
// printMatrix(W, V); printMatrix(D, V); printMatrix(PI, V);
/* Početak mjerenja izvršavanja Floyd-Warshall algoritma. */
clock_t begin = clock();
/* Pozivamo Floyd-Warshall CPU algoritam nad učitanim grafom. */
Floyd_Warshall_Cuda(W, D, PI, V);
/* Kraj mjerenja izvršavanja Floyd-Warshall algoritma. */
clock_t end = clock();
double elapsedTime = double(end - begin) / CLOCKS_PER_SEC;
//printMatrix(W, V); printMatrix(D, V); printMatrix(PI, V);
/* Ispis rezultata u datoteku. */
outputFile << "|V| = " << V << ", |E| = " << E << "\r\n\r\n";
for (int i = 0; i < n; i++) {
if (i%V==0) outputFile << "\r\n";
if (D[i] < infty)
outputFile << D[i] << "\t";
else
outputFile << "∞" << "\t";
}
outputFile << "\r\n\r\n";
for (int i = 0; i < n; i++) {
if (i%V==0) outputFile << "\r\n";
outputFile << PI[i] << "\t";
}
cout << "Vrijeme izvršavanja Floyd-Warshall algoritma: " << elapsedTime << "s.\r\n";
if (checkSolutionCorrectness(W, D, PI, V) == true)
cout << "Svi najkraći putevi su točno izračunati!\r\n";
else
cout << "Najkraći putevi nisu točno izračunati.\r\n";
inputGraphFile.close();
outputFile.close();
free(W); free(D); free(PI);
return 0;
} |
5,025 | // compile with -std=c++11 -O3 -lcurand
#include <iostream>
#include <cstdio>
#include <curand.h>
using std::cout;
using std::endl;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template <class T>
struct SharedMemory {
__device__ inline operator T*() {
extern __shared__ int __smem[];
return (T*) __smem;
}
__device__ inline operator const T*() const {
extern __shared__ int __smem[];
return (T*) __smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template <>
struct SharedMemory<double> {
__device__ inline operator double*() {
extern __shared__ double __smem_d[];
return (double*) __smem_d;
}
__device__ inline operator const double*() const {
extern __shared__ double __smem_d[];
return (double*) __smem_d;
}
};
template <class T>
__device__ void reduce(T* g_idata, T* sdata) {
// load shared mem
unsigned int tid = threadIdx.x;
sdata[tid] = g_idata[tid];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// result is now in sdata[0]
}
// This kernel requires blockDim * sizeof(T) Bytes of shared memory.
// Each block processes `c * blockDim` elements.
// The buffers, which should be affected by the call to `__threadfence_system`
// must be volatile, as it is described in the CUDA C programming guide.
template <class T, bool COMMUNICATION_ON>
__global__ void producer_kernel(T* data, volatile T* partial_data, volatile unsigned* counter,
const unsigned c) {
const unsigned global_start = blockIdx.x * blockDim.x * c;
T* sdata = SharedMemory<T>();
for (unsigned i = 0; i < c; ++i) {
const unsigned offset = i * blockDim.x;
const auto curr_start = data + global_start + offset;
reduce(curr_start, sdata);
// now we have the sum of blockDim elements in sdata[0]
if (threadIdx.x == 0) {
// save the mean of recently processed elements
partial_data[blockIdx.x * c + i] = sdata[0] / (T) blockDim.x;
if (COMMUNICATION_ON) {
__threadfence_system();
++counter[blockIdx.x]; // mark this block as processed
}
}
}
}
template<class T, bool COMMUNICATION_ON>
__global__ void consumer_kernel(T* data, const volatile T* partial_data,
const volatile unsigned* counter,
const unsigned c) {
__shared__ T mean;
const unsigned global_start = blockIdx.x * blockDim.x * c;
for (unsigned i = 0; i < c; ++i) {
const unsigned offset = i * blockDim.x;
if (COMMUNICATION_ON) {
if (threadIdx.x == 0) {
while (counter[blockIdx.x] < blockDim.x) {}
mean = partial_data[blockIdx.x * c + i];
}
__syncthreads();
data[offset + global_start + threadIdx.x] = mean;
}
else { // no communication
data[offset + global_start + threadIdx.x] = threadIdx.x;
}
}
}
int runMeasurement(
const unsigned num_threads,
const unsigned num_blocks,
const unsigned c)
{
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
using DataT = float;
//const unsigned num_threads = 32;
//const unsigned num_blocks = 2;
const size_t sh_mem_size = num_threads * sizeof(DataT);
//const unsigned c = 2; // number of results per block
//const size_t N = num_threads * num_blocks * c; // total number of elements
const size_t size = c * num_threads * num_blocks * sizeof(DataT); // total size in Bytes
//cout << " - size = " << size / 1e9 << " GB" << endl;
//cout << " - num_threads = " << num_threads << endl;
//cout << " - num_blocks = " << num_blocks << endl;
//cout << " - sh_mem_size = " << sh_mem_size << endl;
//cout << " - N = " << N << endl;
//cout << " - c = " << c << endl;
DataT* in_data;
DataT* out_data;
DataT* partial_data;
unsigned* counter;
cudaSetDevice(0);
cudaMalloc(&in_data, size);
cudaMallocManaged(&partial_data, num_blocks * c * sizeof(DataT));
curandGenerateUniform(gen, (float*) in_data, size / sizeof(float)); // fill with random bits
cudaDeviceSynchronize();
cudaSetDevice(1);
cudaMallocManaged(&counter, num_blocks * sizeof(unsigned));
cudaMemAdvise(counter, num_blocks * sizeof(unsigned),
cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId);
cudaMalloc(&out_data, size);
cudaMemset(out_data, 0, size);
cudaSetDevice(0);
// cout << " - Going to start the kernel" << endl;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
producer_kernel<DataT, false><<<num_blocks, num_threads, sh_mem_size>>>(in_data, partial_data, counter, c);
cudaSetDevice(1);
consumer_kernel<DataT, false><<<num_blocks, num_threads>>>(out_data, partial_data, counter, c);
cudaSetDevice(0);
gpuErrchk(cudaEventRecord(stop, 0));
gpuErrchk(cudaEventSynchronize(stop));
float time_in_ms;
gpuErrchk(cudaEventElapsedTime(&time_in_ms, start, stop));
gpuErrchk(cudaSetDevice(0));
gpuErrchk(cudaDeviceSynchronize());
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(in_data);
cudaFree(partial_data);
cudaFree(counter);
curandDestroyGenerator(gen);
//cout << "time:" << time_in_ms << endl;
cout << time_in_ms;
return 0;
}
int main()
{ cout << "np.array(";
int tArray[] = {32,256,512,1024};
int tLength = 4;
int bArray[] = {1,2,4,8};
int bLength = 4;
int cArray[] = {256, 1024, 4096, 16384, 65536};
int cLength = 5;
cout << "(";
for (int t = 0; t < tLength; t++)
{
cout << "(";
for (int b = 0; b < bLength; b++)
{
cout << "(";
for (int c = 0; c < cLength; c++)
{
//run kernel
runMeasurement(tArray[t], bArray[b], cArray[c]);
if(c < cLength - 1)
{
cout << ",";
}
}
cout << ")";
if(b < bLength - 1)
{
cout << ",";
}
}
cout << ")";
if(t < tLength - 1)
{
cout << ",";
}
}
cout << ")";
// runMeasurement(512,4,4096); //b, t, c
cout << ")" << std::endl;
}
|
5,026 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
#include <iostream>
// nvcc -std=c++14 -O3 tarefa2.cu -o t2 && ./t2
struct fillRand
{
thrust::uniform_real_distribution<double> dist;
thrust::minstd_rand rng;
fillRand(thrust::uniform_real_distribution<double> dist, thrust::minstd_rand rng) : dist(dist), rng(rng) {}
__host__ __device__
double operator()(const double &i){
rng.seed(i*100000);
return dist(rng);
}
};
int main(){
thrust::minstd_rand rng;
thrust::uniform_real_distribution<double> dist(25, 40);
thrust::device_vector<double> vetor(10, 0);
thrust::counting_iterator<int> iter(0);
thrust::transform(iter, iter + vetor.size(), vetor.begin(), fillRand(dist, rng));
thrust::host_vector<double> host(vetor);
for (auto i = host.begin(); i != host.end(); i++)
std::cout << *i << " ";
printf("\n");
} |
5,027 | #include <iostream>
#include <string.h>
#include <stdio.h>
#include <math.h>
using namespace std;
namespace myNamespace_00_01{
static double* hmem_i;
static double* hmem_o;
static double* dmem_i;
static double* dmem_o;
static cudaStream_t stream;
static int nb = 1; //1024*1024*64*2; // max 1024*1024*64*2
static int nthre = 1; // max 65535
static int nthre_total = nb*nthre;
static int nword = 1024*1024;
static int mem_size = sizeof(double) * nword;
static int mem_size_o = nthre_total*sizeof(double);
__device__ double myDeviceFunc(double* in, int nword)
{
double z=0.0;
for(int i=0; i<nword; i++)
z += in[i];
return (z);
}
__global__ void kernel(double* in, double* out, int nword)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int index = blockDim.x*bid + tid;
double z = myDeviceFunc(in, nword);
out[index] = z;
}
void initialize()
{
static bool is_first = true;
if(false == is_first) return;
// setup stream
cudaStreamCreate(&stream);
// input buffer (Host)
hmem_i = (double*) malloc(mem_size);
for(int i=0; i<nword; i++) hmem_i[i] = (double)i;
// input buffer (GPU)
cudaMalloc( (void**) &dmem_i, mem_size);
cudaMemcpyAsync(dmem_i, hmem_i, mem_size, cudaMemcpyHostToDevice, stream);
// output buffer (Host/GPU)
cudaMalloc( (void**) &dmem_o, mem_size_o);
hmem_o = (double*) malloc(mem_size_o);
printf("stream #: %d\n", stream);
printf("# threads: %d \n", nthre_total);
printf("mem_size: %d MB\n", mem_size >> 20);
printf("mem_size_o: %d kB\n", mem_size_o >> 10);
is_first = false;
}
void run(int n_run)
{
kernel<<< nb, nthre, 0, stream >>>(dmem_i, dmem_o, nword);
cudaMemcpyAsync(hmem_o, dmem_o, mem_size_o, cudaMemcpyDeviceToHost, stream);
/*
for(int i=0; i<nthre_total; i++){
double z = hmem_o[i];
if(i>(nthre_total-4)) printf("%d, %f\n", i, z);
}
*/
printf("%d: %d, %f\n", stream, nthre_total-1, hmem_o[nthre_total-1]);
// if(n_run % 32 == 31) cudaStreamSynchronize(stream);
return;
}
void finalize(){
free(hmem_i);
free(hmem_o);
cudaFree(dmem_i);
cudaFree(dmem_o);
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
}
}
|
5,028 | #include "includes.h"
using namespace std;
//Check for edges valid to be part of augmented path
//Update frontier
__global__ void k2(const int N, bool* visited, int* frontier, bool* new_frontier) {
int count = 0;
for(int i=0;i<N;i++) {
if(new_frontier[i]) {
new_frontier[i] = false;
frontier[++count] = i;
visited[i] = true;
}
}
frontier[0] = count;
} |
5,029 | #include "csv_data.cuh"
CSV_Data::CSV_Data(string fileName, bool printInfo) {
(this->resultFile).open(fileName, ios::out);
(this->resultFile) << "Target,#Threads,#ThreadBlks,ExecTime\n";
this->printInfo = printInfo;
}
CSV_Data::~CSV_Data() {
resultFile.close();
}
void CSV_Data::AddData(string Target, int numThreads, int numThreadBlks, float ExecTime) {
if (printInfo) {
printf("[INFO] Target: %10s, numThreads: %5d, numThreadBlks: %5d, ExecTime: %.5f\n",
Target.c_str(), numThreads, numThreadBlks, ExecTime);
}
(this->resultFile) << Target << "," << numThreads << "," << numThreadBlks << "," << ExecTime << "\n";
}
bool CSV_Data::CompareDeviceData(void *device_result, size_t device_result_size) {
if (this->host_result == NULL) {
printf("[WARN] Host result data is empty.\n");
return false;
}
if (this->host_result_size == device_result_size) {
if (memcmp(device_result, this->host_result, device_result_size) == 0) {
return true;
} else {
printf("[WARN] Data is not matching with host's and device's.\n");
return false;
}
} else {
printf("[WARN] Data size is not matching with host's and device's.\n"
" Device: %lu, Host: %lu\n", device_result_size, this->host_result_size);
return false;
}
}
void CSV_Data::AddHostData(float ExecTime, void *host_result, size_t host_result_size) {
this->host_exec_time = ExecTime;
if (this->host_result) {
free(this->host_result);
}
this->host_result = (void *)malloc(host_result_size);
this->host_result_size = host_result_size;
memcpy(this->host_result, host_result, host_result_size);
this->AddData("host", 1, 1, ExecTime);
}
void CSV_Data::AddDeviceData(int numThreads, int numThreadBlks, float ExecTime, void *device_result, size_t device_result_size) {
this->CompareDeviceData(device_result, device_result_size);
this->AddData("device", numThreads, numThreadBlks, ExecTime);
}
|
5,030 | #include <stdio.h>
#include <iostream>
#include <vector>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
const int block_num = 512;
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
const int threadsPerBlock = sizeof(unsigned long long) * 8;
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingKernel(int b,int n,int c,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
float p1, p2;
for (int l=0;l<c;l++){
p1 = dataset[i*n*c+old*c+l];
p2 = dataset[i*n*c+k*c+l];
d += (p2-p1) * (p2-p1);
}
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingwithdistKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
d = dataset[i * n * n + old * n + k];
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
template <unsigned int BlockSize>
__global__ void farthestpointsamplingwithpreidxKernel(int b,int n,int c,int m,int m1,const float * __restrict__ dataset,const int * __restrict__ preidx,float * __restrict__ temp,int * __restrict__ idxs){
// b: batch_size, n: ndataset, c: channel_num, m: points_num after fps, m1: preidx number
// dataset: [b, n, c] preidx: [b, m1], temp: [b, n], idxs: [b, m]
if (m<=0)
return;
// const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
int pre_idx;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
// update temp metrics
float pre_best = 1e38;
float pre_p1, pre_p2;
for (int k=0; k<m1; k++){
pre_idx = preidx[i * m1 + k];
float pre_d = 0;
for (int l=0; l < c; l++){
pre_p1 = dataset[i * n * c + pre_idx * c + l];
pre_p2 = dataset[i * n * c + j * c + l];
pre_d += (pre_p2 - pre_p1) * (pre_p2 - pre_p1);
}
pre_best = min(pre_best, pre_d);
}
temp[blockIdx.x*n+j] = pre_best;
}
// then find current smallest distance as current old
__syncthreads();
int old=0;
float pre_best = -1;
for (int j=0; j<n; j++){
if (pre_best < temp[blockIdx.x*n+j]){
pre_best = temp[blockIdx.x*n+j];
old = j;
}
}
if (threadIdx.x==0)
idxs[i*m+0]=old;
//initialize temp
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float d = 0;
float p1, p2;
for (int l=0;l<c;l++){
p1 = dataset[i*n*c+old*c+l];
p2 = dataset[i*n*c+k*c+l];
d += (p2-p1) * (p2-p1);
}
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
// inp: [b, n, c] idx: [b, m]
// out: [b, m, c]
__global__ void gatherpointKernel(int b,int n,int m,int c,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
int loop_time = b * m * c;
CUDA_1D_KERNEL_LOOP(index, loop_time){
int cur_batch_size = index / (m * c);
int cur_point_idx = index / c;
int cur_channel = index % c;
int a=idx[cur_point_idx];
int current_idx = cur_batch_size * (n * c) + a * c + cur_channel;
out[index] = inp[current_idx];
}
}
// out_g: [b, m, c] idx: [b, m]
// inp_g: [b, n, c]
__global__ void scatteraddpointKernel(int b,int n,int m,int c,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
int loop_time = b * m * c;
CUDA_1D_KERNEL_LOOP(index, loop_time){
int cur_batch_size = index / (m * c);
int cur_point_idx = index / c;
int cur_channel = index % c;
int a = idx[cur_point_idx];
int current_idx = cur_batch_size * n * c + a * c + cur_channel;
atomicAdd(&inp_g[current_idx],out_g[index]);
}
}
// inp: [b, n, c] mask: [b, n]
// out: [b, proposal_num, c]
__global__ void GatherByMaskKernel(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){
for (int cur_batch=blockIdx.x; cur_batch<b; cur_batch+=gridDim.x){
const float *cur_inp = inp + cur_batch * n * c;
const float *cur_mask = mask + cur_batch * n;
float* cur_out = out + cur_batch * proposal_num * c;
int proposal_cnt = 0;
int loop_time, tmp_channel_idx;
for (int cur_pts=0; cur_pts<n; cur_pts++){
if(int(cur_mask[cur_pts]) == 0) continue;
if(proposal_cnt == proposal_num) break;
// a valid proposal
if (proposal_cnt == 0){
loop_time = proposal_num * c;
for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){
tmp_channel_idx = i % c;
cur_out[i] = cur_inp[cur_pts * c + tmp_channel_idx];
}
__syncthreads();
}
else {
loop_time = c;
for (int i=threadIdx.x; i<loop_time; i+=blockDim.x){
cur_out[proposal_cnt * c + i] = cur_inp[cur_pts * c + i];
}
__syncthreads();
}
proposal_cnt += 1;
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
cumsumKernel<<<32,512>>>(b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
cumsumKernel<<<32,512>>>(b,n,inp_p,temp);
binarysearchKernel<<<dim3(32,8,1),512>>>(b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int c,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<1024><<<b,1024>>>(b,n,c,m,inp,temp,out);
}
//require 32*n working space
void farthestpointsamplingwithdistLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingwithdistKernel<1024><<<b,1024>>>(b,n,m,inp,temp,out);
}
//require 32*n working space
void farthestpointsamplingwithpreidxLauncher(int b,int n,int c,int m,int m1,const float * inp, const int* preidx,float * temp,int * out){
farthestpointsamplingwithpreidxKernel<1024><<<b,1024>>>(b,n,c,m,m1,inp,preidx,temp,out);
}
void gatherpointLauncher(int b,int n,int m,int c,const float * inp,const int * idx,float * out){
gatherpointKernel<<<block_num,threadsPerBlock>>>(b,n,m,c,inp,idx,out);
//int thread_num = 512 / b;
// gatherpointKernel<<<dim3(256,8,1),512>>>(b,n,m,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,int c,const float * out_g,const int * idx,float * inp_g){
scatteraddpointKernel<<<block_num,threadsPerBlock>>>(b,n,m,c,out_g,idx,inp_g);
}
void GatherByMaskLauncher(int b,int n,int c,int proposal_num,const float *inp,const float *mask,float *out){
GatherByMaskKernel<<<block_num,threadsPerBlock>>>(b,n,c,proposal_num,inp,mask,out);
}
|
5,031 | #include <stdio.h>
#include "time.h"
#include <stdlib.h>
#include <limits.h>
/* The old-fashioned CPU-only way to add two vectors */
void add_vectors_host(int *result, int *a, int *b, int n) {
for (int i=0; i<n; i++)
result[i] = a[i] + b[i];
}
/* The kernel that will execute on the GPU */
__global__ void add_vectors_kernel(int *result, int *a, int *b, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// If we have more threads than the magnitude of our vector, we need to
// make sure that the excess threads don't try to save results into
// unallocated memory.
if (idx < n)
result[idx] = a[idx] + b[idx];
}
/* This function encapsulates the process of creating and tearing down the
* environment used to execute our vector addition kernel. The steps of the
* process are:
* 1. Allocate memory on the device to hold our vectors
* 2. Copy the vectors to device memory
* 3. Execute the kernel
* 4. Retrieve the result vector from the device by copying it to the host
* 5. Free memory on the device
*/
void add_vectors_dev(int *result, int *a, int *b, int n) {
// Step 1: Allocate memory
int *a_dev, *b_dev, *result_dev;
// Since cudaMalloc does not return a pointer like C's traditional malloc
// (it returns a success status instead), we provide as it's first argument
// the address of our device pointer variable so that it can change the
// value of our pointer to the correct device address.
cudaMalloc((void **) &a_dev, sizeof(int) * n);
cudaMalloc((void **) &b_dev, sizeof(int) * n);
cudaMalloc((void **) &result_dev, sizeof(int) * n);
// Step 2: Copy the input vectors to the device
cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice);
// Step 3: Invoke the kernel
// We allocate enough blocks (each 512 threads long) in the grid to
// accommodate all `n` elements in the vectors. The 512 long block size
// is somewhat arbitrary, but with the constraint that we know the
// hardware will support blocks of that size.
dim3 dimGrid((n + 512 - 1) / 512, 1, 1);
dim3 dimBlock(512, 1, 1);
add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n);
// Step 4: Retrieve the results
cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost);
// Step 5: Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(result_dev);
}
void print_vector(int *array, int n) {
int i;
for (i=0; i<n; i++)
printf("%d ", array[i]);
printf("\n");
}
int main(void) {
const int CONST_VEC = 4;
// It looks like the crossover is just above this. At 2.6e8, GPU calculation time cuts in half (roughly).
int* a = (int *)malloc(CONST_VEC * sizeof(int));
int* b = (int *)malloc(CONST_VEC * sizeof(int));
int* host_result = (int *)malloc(CONST_VEC * sizeof(int));
int* device_result = (int *)malloc(CONST_VEC * sizeof(int));
clock_t ar = clock();
srand((unsigned)time(0));
for(int i=0; i<CONST_VEC; i++){
a[i] = 1;
b[i] = 1;
}
ar = clock() - ar;
printf ("It took me %ld clicks (%f seconds).\n",ar,((float)ar)/CLOCKS_PER_SEC);
printf("The CPU's answer: ");
clock_t t = clock();
add_vectors_host(host_result, a, b, CONST_VEC);
t = clock() - t;
//print_vector(host_result, CONST_VEC);
printf ("It took me %ld clicks (%f seconds).\n",t,((float)t)/CLOCKS_PER_SEC);
printf("The GPU's answer: ");
clock_t t2 = clock();
add_vectors_dev(device_result, a, b, CONST_VEC);
t2 = clock() - t2;
//print_vector(device_result, CONST_VEC);
printf ("It took me %ld clicks (%f seconds).\n",t2,((float)t2)/CLOCKS_PER_SEC);
for(int i=0; i<CONST_VEC; i++) {
printf("%i", device_result[i]);
printf(" ");
}
printf("\n\n");
for(int i=0; i<CONST_VEC; i++) {
printf("%i",host_result[i]);
printf(" ");
}
return 0;
}
|
5,032 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define PI 3.1415
void gauss (int sigma, int gauss_matrix[][5]);
void gpuComputing(int gauss_matrix[][5], int** image_matrix, int** final_matrix, int height, int width);
__global__ void kernel(int* image, int* final, int* gauss, int pitch, int pitch_i, int pitch_f, int height, int width)
{
int abs_Pos, c, r;
int x, aux, aux2;
int gauss_element;
int image_element;
int tid = threadIdx.x;
float result;
/** Todos los threads de un bloque comparten la misma fila **/
__shared__ int fila;
fila = blockIdx.x + 2;
/** Los threads van recorriendo la fila cada 512 casilleros **/
while(tid < width - 4)
{
result = 0;
/** Posicion absoluta del thread en el arreglo con la imagen **/
abs_Pos = fila * width + (tid +2);
/** Posicion x en la matriz **/
x = abs_Pos % width;
/** Recorre la matriz de gauss y simultaneamente copia los valores necesarios de
** la matriz de imagen para realizar el calculo **/
for (r = 0; r < 5; ++r) {
aux = r - 2;
int* gauss_row = (int*)((char*)gauss + r * pitch);
int* image_row = (int*)((char*)image + ((fila + aux) * pitch));
for (c = 0; c < 5; ++c)
{
aux2 = c - 2;
gauss_element = gauss_row[c];
image_element = image_row[x + aux2];
result += (gauss_element * image_element)/273;
}
}
/** Guarda el valor a la celda correspondiente en la imagen final **/
int* final_row = (int*)((char*)final + blockIdx.x * pitch_f);
final_row[tid] = result;
/** Incremento para realizar todas las operaciones de una fila en un bloque **/
tid += 512;
}
return;
}
int main(int argc, char *argv[])
{
int width, height, i, j;
int **image_matrix, **final_matrix, **auxiliar_matrix;
int gauss_matrix[5][5];
int sigma = (int)strtod(argv[2], NULL);
int *temp, *temp2, *temp3;
/** Lectura del archivo contenedor de los datos **/
FILE *in_f = fopen(argv[1], "r");
/** Lee el ancho de la matriz **/
fscanf(in_f, "%d", &width);
/** Lee el alto de la matriz **/
fscanf(in_f, "%d", &height);
/** Arreglo dinamico 2D para almacenar la matriz **/
image_matrix = (int**)malloc(width * sizeof(int*));
temp = (int*)malloc(width * height * sizeof(int));
for(i = 0; i < width; i++)
image_matrix[i] = temp + (i * height);
/** Arrelgo dinamico 2D con 4 filas y columnas adicionales **/
auxiliar_matrix = (int**)malloc((width + 4) * sizeof(int*));
temp2 = (int*)malloc((width + 4) * (height + 4) * sizeof(int));
for(i = 0; i < (width + 4); i++)
auxiliar_matrix[i] = temp2 + (i * (height + 4));
/** Arreglo dinamico 2D para almacenar el resultado final **/
final_matrix = (int**)malloc(width * sizeof(int*));
temp3 = (int*)malloc(width * height * sizeof(int));
for(i = 0; i < width; i++)
final_matrix[i] = temp3 + (i * height );
/** Lee los valores del archivo y los almacena en image_matrix **/
for (i = 0; i < width; i++)
{
for(j = 0; j < height; j++)
fscanf(in_f, "%d", &image_matrix[i][j]);
}
fclose(in_f);
/** Calculo de la matriz con los valores del filtro **/
gauss(sigma, gauss_matrix);
/** Desplazamiento de la matriz 2 filas hacia arriba y 2 hacia la izquierda **/
for(i = 2; i < width + 2; i++)
{
for(j = 2; j < height + 2; j++){
auxiliar_matrix[i][j] = image_matrix[i-2][j-2];
}
}
/** Reflexion de filas **/
for(i = 2; i < width + 2; i++)
{
auxiliar_matrix[i][0] = auxiliar_matrix[i][4];
auxiliar_matrix[i][1] = auxiliar_matrix[i][3];
auxiliar_matrix[i][height + 2] = auxiliar_matrix[i][height];
auxiliar_matrix[i][height + 3] = auxiliar_matrix[i][height-1];
}
/** Reflexion de columnas **/
for(i = 0; i < height + 4; i++)
{
auxiliar_matrix[0][i] = auxiliar_matrix[4][i];
auxiliar_matrix[1][i] = auxiliar_matrix[3][i];
auxiliar_matrix[width + 2][i] = auxiliar_matrix[width][i];
auxiliar_matrix[width + 3][i] = auxiliar_matrix[width-1][i];
}
gpuComputing(gauss_matrix, auxiliar_matrix, final_matrix, height, width);
/** Abre archivo que contendra los valores finales **/
FILE *out = fopen("salida", "w");
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
{
if ( j != width - 1)
fprintf(out, "%d\t", final_matrix[i][j]);
else if (i != height-1)
fprintf(out, "%d\n", final_matrix[i][j]);
else
fprintf(out, "%d", final_matrix[i][j]);
}
}
fclose(out);
/** Liberacion de memoria correspondiente a las 3 matrices **/
free(temp);
free(temp2);
free(temp3);
return 0;
}
void gauss(int sigma, int gauss_matrix[][5])
{
int i, j;
int x = -2, y = 2;
double u, v, varianza;
double operation;
for(i = 0; i < 5; i++)
{
for(j = 0; j < 5; j++)
{
u = pow(x,2);
v = pow(y,2);
varianza = pow(sigma,2);
operation = 273.0*exp((-u-v)/(2.0*varianza))/(2.0*PI*varianza);
gauss_matrix[i][j] = (int)ceil(operation);
x++;
}
y--;
x = -2;
}
return;
}
void gpuComputing(int gauss_matrix[][5], int** image_matrix, int** final_matrix, int height, int width)
{
int *d_image, *d_final, *d_gauss;
int r_height = height + 4;
int r_width = width + 4;
size_t pitch, pitch_i, pitch_f;
/** Metodo que entrega la direccion del arreglo en el dispositivo **/
cudaMallocPitch(&d_gauss, &pitch, 5 * sizeof(int), 5);
cudaMallocPitch(&d_image, &pitch_i, r_width * sizeof(int), r_height);
cudaMallocPitch(&d_final, &pitch_f, width * sizeof(int), height);
/** Traspaso de los datos desde el host al dispositivo **/
cudaMemcpy2D(d_gauss, pitch, *gauss_matrix, 5 * sizeof(int), 5 * sizeof(int), 5, cudaMemcpyHostToDevice);
cudaMemcpy2D(d_image, pitch_i, *image_matrix, r_width * sizeof(int), r_width * sizeof(int), r_height, cudaMemcpyHostToDevice);
/** Llamada al kernel, si bien su implementacion pudo ser mas eficiente, los tiempos permanecen bajos **/
kernel<<<height, 512>>>(d_image, d_final, d_gauss, pitch, pitch_i, pitch_f, r_height, r_width);
/** Traspaso del resultado final **/
cudaMemcpy2D(*final_matrix, width*sizeof(int), d_final, pitch_f, width*sizeof(int), height, cudaMemcpyDeviceToHost);
cudaFree(d_final);
cudaFree(d_image);
cudaFree(d_gauss);
return;
}
|
5,033 | extern "C" __global__ void kernelFunction(int *input)
{
input[threadIdx.x] = 32 - threadIdx.x;
} |
5,034 | #include "lsystem.cuh"
|
5,035 | #include "includes.h"
#define ITER 10000000000 // Number of bins
#define NUMBLOCKS 13 // Number of thread blocks
#define NUMTHREADS 192 // Number of threads per block
int tid;
float pi;
// Kernel
// Main
__global__ void pic(float *sum, int nbin, float step, int nthreads, int nblocks) {
int i;
float x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
} |
5,036 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#define THREADS 512
#ifdef __cplusplus
extern "C"
{
#endif
int cuda_sort(int number_of_elements, float *a)
{
return 0;
}
#ifdef __cplusplus
}
#endif
|
5,037 | //
// Created by kindr on 2021/5/8.
//
#include "multiKernelConcurrent.cuh"
#include "../../common/utils.cuh"
#include <cstdio>
const int N = 1 << 25;
__global__
void math_kernel1(int n) {
double sum = 0;
for (int i = 0; i < n; i++) sum += tan(0.1) * tan(0.1);
printf("sum=%g\n", sum);
}
__global__
void math_kernel2(int n) {
double sum = 0;
for (int i = 0; i < n; i++) sum += tan(0.1) * tan(0.1);
printf("sum=%g\n", sum);
}
void multiKernelConcurrent() {
int n_stream = 4;
size_t nStreamBytes = n_stream * sizeof(cudaStream_t);
auto *stream = static_cast<cudaStream_t *>(malloc(nStreamBytes));
for (int i = 0; i < n_stream; i++) {
cudaStreamCreate(&stream[i]);
}
CHECK(cudaGetLastError());
for (int i = 0; i < n_stream; i++) {
math_kernel1<<<1, 1, 0, stream[i]>>>(N);
math_kernel2<<<1, 1, 0, stream[i]>>>(N);
}
for (int i = 0; i < n_stream; i++) {
math_kernel1<<<1, 1>>>(N);
math_kernel2<<<1, 1>>>(N);
}
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
for (int i = 0; i < n_stream; i++) {
cudaStreamDestroy(stream[i]);
}
free(stream);
}
|
5,038 | /* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/* kernel to calculate new displacements */
__global__ void GenerateDisplacements(float* dev_Data, int oldStart,
int currentStart, int newStart,
float courantSquared, int numberOfNodes) {
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
// ignore both left and right boundary conditions (already set on GPU)
while (index < numberOfNodes - 1) {
if (index > 0) {
// compute next displacement:
// y(x,t+1) = 2 * y(x,t) - y(x,t-1) + courantSquared(y(x+1,t)
// - 2 * y(x,t) + y(x-1,t))
dev_Data[newStart + index] = 2 * dev_Data[currentStart + index]
- dev_Data[oldStart + index] + courantSquared *
(dev_Data[currentStart + index + 1] -
2 * dev_Data[currentStart + index] +
dev_Data[currentStart + index - 1]);
}
index += blockDim.x * gridDim.x; // increment index to next value
}
}
/* helper function to call the kernel */
void kernelCall(float* dev_Data, int oldStart, int currentStart, int newStart,
float courantSquared, int numberOfNodes,
unsigned int blocks, unsigned int threadsPerBlock) {
GenerateDisplacements <<<blocks, threadsPerBlock>>> (dev_Data, oldStart,
currentStart, newStart, courantSquared, numberOfNodes);
}
|
5,039 | // CUDA kernel in C
extern "C" __global__ void sincos_kernel(int nx, int ny, int nz, float* x, float* y, float* xy)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if ((i >= nx) || (j >= ny) || (k >= nz)) return;
int index = i + j * nx + k * nx * ny;
xy[index] = sin(x[index]) + cos(y[index]);
}
|
5,040 | #ifndef uint32_t
#define uint32_t unsigned int
#endif
#define H0 0x6a09e667
#define H1 0xbb67ae85
#define H2 0x3c6ef372
#define H3 0xa54ff53a
#define H4 0x510e527f
#define H5 0x9b05688c
#define H6 0x1f83d9ab
#define H7 0x5be0cd19
__device__
uint rotr(uint x, int n) {
if (n < 32) return (x >> n) | (x << (32 - n));
return x;
}
__device__
uint ch(uint x, uint y, uint z) {
return (x & y) ^ (~x & z);
}
__device__
uint maj(uint x, uint y, uint z) {
return (x & y) ^ (x & z) ^ (y & z);
}
__device__
uint sigma0(uint x) {
return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22);
}
__device__
uint sigma1(uint x) {
return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25);
}
__device__
uint gamma0(uint x) {
return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3);
}
__device__
uint gamma1(uint x) {
return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10);
}
__constant__ uint K[64]={
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
__device__
uint get_global_id() {
uint blockId, threadsPerBlock;
blockId = blockIdx.z * gridDim.x * gridDim.y
+ blockIdx.y * gridDim.x
+ blockIdx.x;
threadsPerBlock = blockDim.x;
return threadIdx.x + threadsPerBlock * blockId;
}
__global__ void crypt_kernel(ulong start, uint *prefix, ulong plen, uint mask, uint *match){
int t;
uint W[80], rnd, id, A,B,C,D,E,F,G,H,T1,T2;
uint Ws[16];
id = get_global_id();
//if (id == 0) {
// printf("%08x\n", start);
//}
// brutforce is build up as: prefix | thr_id:04x | <rnd>:04x | start:08x
for (t = 0; t < plen; ++t) {
Ws[t] = prefix[t];
// printf("%04x", prefix[t]);
}
// printf("%04x\n", id);
T1 = (id & 0xf) | (((id >> 4) & 0xf) << 8) | (((id >> 8) & 0xf) << 16) | (((id >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)(start >> 32);
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 2] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)start;
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 3] = T1 + 0x30303030 + T2 * 0x27;
Ws[plen + 4] = 0x80000000;
for (t = plen + 5; t < 15; ++t) {
Ws[t] = 0;
}
Ws[15] = 128 + 32 * plen;
// preparing buffer done
/*
if (id == 0) {
printf("%016x: ", start);
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
}
*/
for (rnd = 0; rnd < 0x10000; ++rnd) {
uint digest[8] = {H0, H1, H2, H3, H4, H5, H6, H7};
#pragma unroll
for (t = 0; t < 16; ++t) {
W[t] = Ws[t];
}
T1 = (rnd & 0xf) | (((rnd >> 4) & 0xf) << 8) | (((rnd >> 8) & 0xf) << 16) | (((rnd >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
W[plen + 1] = T1 + 0x30303030 + T2 * 0x27;
A = digest[0] = H0;
B = digest[1] = H1;
C = digest[2] = H2;
D = digest[3] = H3;
E = digest[4] = H4;
F = digest[5] = H5;
G = digest[6] = H6;
H = digest[7] = H7;
for (t = 16; t < 64; t++) {
W[t] = gamma1(W[t - 2]) + W[t - 7] + gamma0(W[t - 15]) + W[t - 16];
}
for (t = 0; t < 64; t++) {
T1 = H + sigma1(E) + ch(E, F, G) + K[t] + W[t];
T2 = sigma0(A) + maj(A, B, C);
H = G; G = F; F = E; E = D + T1; D = C; C = B; B = A; A = T1 + T2;
}
digest[0] += A;
if ((digest[0] & mask) == 0) {
/*
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
*/
match[0] = 1;
match[1] = id;
match[2] = rnd;
}
}
} |
5,041 | extern "C" {
__device__ int KerSobel(int a1, int a2, int a3, int a4, int a5, int a6)
{
return(a1 + 2 * a2 + a3 - (a4 + 2 * a5 + a6));
}
__global__ void laplacian_filter(unsigned int *lpSrc,unsigned int *lpDst, int width, int height,int* gc_weight, int amplitude)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = width * y + x;
int xy[9];
int dr, dg, db;
int powR, powG, powB;
if(x >= 1 && x < width - 1 && y >= 1 && y < height - 1)
{
/*lpDst[idx] = 8 * lpSrc[idx] -
lpSrc[idx - 1] - lpSrc[idx + 1] -
lpSrc[idx - width] - lpSrc[idx + width] -
lpSrc[idx - width - 1] - lpSrc[idx + width + 1] -
lpSrc[idx - width + 1] - lpSrc[idx + width - 1];
*/
xy[0]= lpSrc[idx - width - 1];
xy[1]= lpSrc[idx - width];
xy[2]= lpSrc[idx - width + 1];
xy[3]= lpSrc[idx - 1];
xy[4]= lpSrc[idx];
xy[5]= lpSrc[idx + 1];
xy[6]= lpSrc[idx + width - 1];
xy[7]= lpSrc[idx + width];
xy[8]= lpSrc[idx + width + 1];
dr = dg = db = 0;
for(int i = 0; i < 9; i ++)
{
dr += gc_weight[i] * (0xFF & (xy[i] >> 16));
dg += gc_weight[i] * (0xFF & (xy[i] >> 8));
db += gc_weight[i] * (0xFF & (xy[i]));
}
/* Calculate power */
powR = amplitude * dr * dr >> 10; // * amplitude / 1024
powG = amplitude * dg * dg >> 10;
powB = amplitude * db * db >> 10;
if(powR > 255) { powR = 255; }
if(powG > 255) { powG = 255; }
if(powB > 255) { powB = 255; }
lpDst[y * width + x] = (powR << 16) | (powG << 8) | (powB);
}
}
} |
5,042 | #include "includes.h"
__global__ void forwardDifference2DKernel(const int cols, const int rows, const float* data, float* dx, float* dy) {
for (auto idy = blockIdx.y * blockDim.y + threadIdx.y + 1; idy < cols - 1;
idy += blockDim.y * gridDim.y) {
for (auto idx = blockIdx.x * blockDim.x + threadIdx.x + 1;
idx < rows - 1; idx += blockDim.x * gridDim.x) {
const auto index = idx + rows * idy;
dx[index] = data[index + 1] - data[index];
dy[index] = data[index + rows] - data[index];
}
}
} |
5,043 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define _USE_MATH_DEFINES
#include <math.h>
__global__ void kernel(unsigned char* src) {
__shared__ float temp[16][16];
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
const float period = 128.0f;
temp[threadIdx.x][threadIdx.y] = 255 *
(sinf(x * 2.0f * M_PI/period) + 1.0f) *
(sinf(y * 2.0f * M_PI/period) + 1.0f) / 4.0f;
__syncthreads();
src[offset * 4] = 0;
src[offset * 4 + 1] = temp[15 - threadIdx.x][15 - threadIdx.y];
src[offset * 4 + 2] = 0;
src[offset * 4 + 3] = 255;
}
extern "C" void GenerateBitmap(unsigned char* dst, int dimension) {
int size = dimension * dimension * 4;
cudaError_t status;
unsigned char* src;
status = cudaMalloc(&src, size);
dim3 blocks(dimension / 16, dimension / 16);
dim3 threads(16, 16);
kernel<<<blocks, threads>>>(src);
cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost);
cudaFree(src);
} |
5,044 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
/*
struct CDP {
char name[256];
size_t totalGlobalMem;
size_t sharedMemPerBlock;
int regsPerBlock;
int warpSize;
size_t memPitch;
int maxThreadsPerBlock;
int maxThreadsDim[3];
int maxGridSize[3];
size_t totalConstMem;
int major;
int minor;
int clockRate;
size_t textureAlignment;
int deviceOverlap;
int multiProcessorCount;
int kernelExecTimeoutEnabled;
int integrated;
int canMapHostMemory;
int computeMode;
int maxTexture1D;
int maxTexture2D[2];
int maxTexture3D[3];
int maxTexture2DArray[3];
int concurrentKernels;
};
*/
int main( void ) {
cudaDeviceProp prop;
int i;
int count;
cudaGetDeviceCount (&count);
for(i =0; i<count; ++i) {
cudaGetDeviceProperties( &prop, i );
printf(" ---- General Information for Device %d ---\n", i);
printf("Name : %s\n", prop.name);
printf("Compute Capability : %d.%d/n", prop.major, prop.minor);
printf("Clock Rate : %d\n", prop.clockRate);
printf("Device Copy Overlap: " );
if(prop.deviceOverlap)
printf( " Enabled\n");
else
printf( " Disabled\n");
printf("Kernel Execution Timeout : ");
if(prop.kernelExecTimeoutEnabled)
printf( " Enabled\n");
else
printf( " Disabled\n");
printf("\n ---- Memory Information for Device %d ---\n", i);
printf("Total Global Memory: %ld\n", prop.totalGlobalMem);
printf("Total Constant Memory: %ld\n", prop.totalConstMem);
printf("Max mem pitch: %ld\n", prop.memPitch);
printf("Texture Alignment: %ld\n", prop.textureAlignment);
printf("\n --- MP Information for Device %d ---\n", i);
printf("Multiprocessor Count: %d\n", prop.multiProcessorCount);
printf("Shared mem per mp: %ld\n", prop.sharedMemPerBlock);
printf("Registers per mp : %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d %d %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max Grid Dimensions: (%d %d %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\n");
}
}
|
5,045 |
__device__ void left_to_right(int j0, int j1, int *d_rows_mp, int *d_aux_mp, int *d_low, int m, int p){
// Compute symmetric difference of supp(j0) and supp(j1) and store in d_aux
// If rows are initially sorted, this returns a sorted list
int idx0 = j0*p;
int idx1 = j1*p;
int idx0_MAX = (j0+1)*p;
int idx1_MAX = (j1+1)*p;
int idx = idx1;
bool idx0_ok = d_rows_mp[idx0] != -1 && idx0 < idx0_MAX;
bool idx1_ok = d_rows_mp[idx1] != -1 && idx1 < idx1_MAX;
while (idx0_ok || idx1_ok){
if (idx0_ok && idx1_ok){
if (d_rows_mp[idx0] < d_rows_mp[idx1]){
d_aux_mp[idx++] = d_rows_mp[idx0++];
}else if (d_rows_mp[idx1] < d_rows_mp[idx0]){
d_aux_mp[idx++] = d_rows_mp[idx1++];
}else{
idx0++;
idx1++;
if (idx0 == idx0_MAX || idx1 == idx1_MAX){
//printf("WARNING: Reached memalloc limit\n"); // needs thurst import
//asm("trap;");
}
}
}else{
if (idx0_ok){
d_aux_mp[idx++] = d_rows_mp[idx0++];
}
if (idx1_ok){
d_aux_mp[idx++] = d_rows_mp[idx1++];
}
}
idx0_ok = d_rows_mp[idx0] != -1 && idx0 < idx0_MAX;
idx1_ok = d_rows_mp[idx1] != -1 && idx1 < idx1_MAX;
}
int low_j1 = -1;
// At least one value was written in d_aux_mp
idx1 = j1*p;
while(d_aux_mp[idx1] != -1 && idx1 < idx1_MAX){
d_rows_mp[idx1] = d_aux_mp[idx1];
d_aux_mp[idx1] = -1;
if (d_rows_mp[idx1] > -1)
low_j1 = d_rows_mp[idx1];
idx1++;
}
while(d_rows_mp[idx1] != -1 && idx1 < idx1_MAX){
d_rows_mp[idx1] = -1;
idx1++;
}
d_low[j1] = low_j1;
}
__device__ void clear_column(int j, int *d_rows_mp, int p){
int idx = j*p;
int idx_MAX = (j+1)*p;
while (idx < idx_MAX && d_rows_mp[idx] != -1){
d_rows_mp[idx++] = -1;
}
}
__global__ void reduce_col(int j, int *d_rows_mp, int *d_aux_mp, int *d_low, int *d_arglow, int m, int p){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(j == tid && j < m){
int j0 = -1;
int low_j = d_low[j]; // low_j = -1, 0, 1, ..., m-1
while (low_j > -1 && d_arglow[low_j] != -1){
j0 = d_arglow[low_j];
left_to_right(j0, j, d_rows_mp, d_aux_mp, d_low, m, p);
low_j = d_low[j];
}
low_j = d_low[j];
if (low_j > -1){
d_arglow[low_j] = j;
}
}
}
__global__ void update_classes_final(int *d_classes, int *d_low, int *d_ess, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid < m){
if (d_low[tid] > -1){
d_classes[tid] = -1;
d_classes[d_low[tid]] = 1;
}else{
if (d_ess[tid] == 1){
d_classes[tid] = 2;
}
}
}
}
__global__ void update_classes(int *d_classes, int *d_low, int *d_arglow, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid < m){
if (d_arglow[tid] > -1){
d_classes[d_arglow[tid]] = -1;
d_classes[tid] = 1;
}
}
}
__global__ void ess_hat_final(int *d_essential_hat, int *d_low, int *d_arglow, int m){
int j = threadIdx.x + blockDim.x*blockIdx.x;
if(j < m){
if (d_low[j] > -1){
d_essential_hat[j] = 0;
d_essential_hat[d_low[j]] = 0;
}
}
}
__global__ void ess_hat(int *d_essential_hat, int *d_low, int *d_arglow, int m){
int j = threadIdx.x + blockDim.x*blockIdx.x;
if(j < m){
if (d_low[j] > -1){
d_essential_hat[d_low[j]] = 0;
}
if (d_arglow[j] > -1){
d_essential_hat[d_arglow[j]] = 0;
d_essential_hat[j] = 0;
}
}
}
|
5,046 | #include <stdio.h>
// Exmple doesn't work
__global__ void print_kernel() {
if (threadIdx.x == 1) {
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
}
int main() {
print_kernel<<<100, 10>>>();
//cudaDeviceSynchronize();
} |
5,047 | #include "includes.h"
__global__ void axpb_y_i32 (int a, int* x, int b, int* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] *= a * x[idx] + b;
}
} |
5,048 | #include "includes.h"
__global__ void MultiplyAdd(float *d_Result, float *d_Data, int width, int height)
{
const int x = __mul24(blockIdx.x, 16) + threadIdx.x;
const int y = __mul24(blockIdx.y, 16) + threadIdx.y;
int p = __mul24(y, width) + x;
if (x<width && y<height)
d_Result[p] = d_ConstantA[0]*d_Data[p] + d_ConstantB[0];
__syncthreads();
} |
5,049 | __global__ void kernel_add(float *proj1, float *proj, int iv, int na, int nb, float weight){
int ia = 16 * blockIdx.x + threadIdx.x;
int ib = 16 * blockIdx.y + threadIdx.y;
if (ia >= na || ib >= nb)
return;
proj1[ia + ib * na] += proj[ia + ib * na + iv * na * nb] * weight;
}
// __global__ void kernel_add(cudaArray *proj1, cudaArray *proj, int iv, int na, int nb, float weight){
// int ia = 16 * blockIdx.x + threadIdx.x;
// int ib = 16 * blockIdx.y + threadIdx.y;
// if (ia >= na || ib >= nb)
// return;
// proj1[ia + ib * na] += proj[ia + ib * na + iv * na * nb] * weight;
// }
|
5,050 |
/*
* Device code
*/
__global__
void GaussSolve(
int const Nsize,
double* d_Aug,
double* d_Piv)
{
// Assign matrix elements to blocks and threads
int i = blockDim.x*blockIdx.x + threadIdx.x;
// Parallel forward elimination
for (int k = 0; k < Nsize-1; k++)
{
d_Piv[i] = d_Aug[i%Nsize+k*Nsize]/d_Aug[k*(Nsize+1)];
__syncthreads();
if (((i%Nsize)>k) && ((i/Nsize/*+1*/)>=k) && ((i/Nsize/*+1*/)<=Nsize))
d_Aug[i] -= d_Piv[i]*d_Aug[i-(i%Nsize)+k];
__syncthreads();
}
}
|
5,051 | #include<stdlib.h>
#include<math.h>
#include<iostream>
#include<time.h>
#define omega 1.5
using namespace std;
__global__ void calculateU(double* u, double* f, double* pu, int N, double h2, int rb, double * e)
{
__shared__ double s_u[10][10];
e[0]=0;
int i = blockIdx.x*blockDim.x + threadIdx.x; // ""
int j = blockIdx.y*blockDim.y + threadIdx.y; // ""
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
int k = j*N + i; // ""
if(i<N && j<N)
{
s_u[ty][tx] = u[k];
if(ty==1 && j>0)
s_u[ty-1][tx] = u[k-N];
if(ty==8 && j<N-1)
s_u[ty+1][tx] = u[k+N];
if(tx==1 && i>0)
s_u[ty][tx-1] = u[k-1];
if(tx==8 && i<N-1)
s_u[ty][tx+1] = u[k+1];
if(i==0)
s_u[ty][tx-1] = u[k+N-2];
if(i==N-1)
s_u[ty][tx+1] = u[k-N+2];
}
__syncthreads();
if((i>0)&&(i<N-1)&&(j>0)&&(j<N-1)&&(k<N*N)&&(i+j)%2==rb){
u[k] = omega*0.25*(h2*f[k] +s_u[ty-1][tx] + s_u[ty+1][tx] + s_u[ty][tx-1] + s_u[ty][tx+1] - 4*s_u[ty][tx])+ s_u[ty][tx];
if(e[0]<abs(pu[k]-u[k]))
e[0] = abs(pu[k]-u[k]);
}
}
int main( int argc, char * argv [] )
{
double h =(double)1/(double)1024;
double h2=h*h;
int numBytes = sizeof(double)*1024*1024;
double* U;
double* f;
double* pU;
double* eps;
U = (double*)malloc(numBytes);
f = (double*)malloc(numBytes);
pU = (double*)malloc(numBytes);
eps = (double*)malloc(sizeof(double));
eps[0]=1;
//
for(int i=0; i<1024; i++)
{
double x = i*h;
for(int j=0; j<1024; j++)
{
double y = j*h;
f[i*1024+j] = 4;
U[i*1024+j] = 0;
pU[i*1024+j] = -x*x -y*y +x +y;
}
}
//
for(int i=0; i<1024 ; i++)
{
double x = i*h;
U[i*1024] = - x*x + x;
U[i] = - x*x + x;
U[i*1024+1023] = - x*x + x;
U[1023*1024+i] = - x*x + x;
pU[i*1024] = - x*x + x;
pU[i] = - x*x + x;
pU[i*1024+1023] = - x*x + x;
pU[1024*1023+i] = - x*x + x;
}
// allocate device memory
double * devU = NULL;
double * devpU = NULL;
double * devF = NULL;
double * devE = NULL;
cudaMalloc ( (void**)&devU, numBytes );
cudaMalloc ( (void**)&devpU, numBytes );
cudaMalloc ( (void**)&devF, numBytes );
cudaMalloc ( (void**)&devE, sizeof(double));
// set kernel launch configuration
dim3 threads = dim3(128, 128,1);
dim3 blocks = dim3(8, 8,1);
cudaMemcpy(devU,U,numBytes,cudaMemcpyHostToDevice);
cudaMemcpy(devpU,pU,numBytes,cudaMemcpyHostToDevice);
cudaMemcpy(devF,f,numBytes,cudaMemcpyHostToDevice);
cudaMemcpy(devE,eps,sizeof(double),cudaMemcpyHostToDevice);
//
int count = 0;
clock();
for(int i = 0; (i<50000)&&(eps[0]>0.0005); i++){
//eps[0]=0;
calculateU<<<threads,blocks,0>>>(devU,devF,devpU,1024,h2,0,devE);
calculateU<<<threads,blocks,0>>>(devU,devF,devpU,1024,h2,1,devE);
cudaMemcpy(eps,devE,sizeof(double),cudaMemcpyDeviceToHost);
count = i;
}
//
cudaMemcpy(U,devU,numBytes,cudaMemcpyDeviceToHost);
cudaMemcpy(pU,devpU,numBytes,cudaMemcpyDeviceToHost);
cudaMemcpy(f,devF,numBytes,cudaMemcpyDeviceToHost);
clock_t c2 = clock();
cerr << "Время: " << ((double)c2)/CLOCKS_PER_SEC << "мс" << endl << "Погрешность: " << eps[0] << ", кол-во итераций:" <<count << endl;;
for(int i=0; i<1024 ; i++)
{
for(int j=0; j<1024 ; j++){
cout << i*h << " " << j*h << " " << U[i*1024+j]<< "\n";
}
}
return 0;
}
|
5,052 | #include "shared.cuh"
struct ParticleRef {
Point pos;
Point dir;
double nextdist;
};
inline __device__ ParticleRef make_ref(const ParticleView &view, int i) {
return {view.get_pos(i), view.get_dir(i), view.get_nextdist(i)};
}
__device__ inline void saxpy(double *__restrict__ x, double *__restrict__ y,
double *__restrict__ z,
const double *__restrict__ u,
const double *__restrict__ v,
const double *__restrict__ w, double distance) {
*x += *u * distance;
*y += *v * distance;
*z += *w * distance;
}
__device__ inline void move_impl(const ParticleRef ref) {
saxpy(ref.pos.x, ref.pos.y, ref.pos.z, ref.dir.x, ref.dir.y, ref.dir.z,
ref.nextdist);
}
__global__ void move(ParticleView view) {
int i = thread_id();
if (i >= view.size) return;
move_impl(make_ref(view, i));
}
|
5,053 | #include "includes.h"
__global__ void histDupeKernel(const float* data1, const float* data2, const float* confidence1, const float* confidence2, int* ids1, int* ids2, int* results_id1, int* results_id2, float* results_similarity, int* result_count, const int N1, const int N2, const int max_results) {
const unsigned int thread = threadIdx.x; // Thread index within block
const unsigned int block = blockIdx.x; // Block index
const unsigned int block_size = blockDim.x; // Size of each block
const unsigned int block_start = block_size * block; // Index of the start of the block
const unsigned int index = block_start + thread; // Index of this thread
//__shared__ float conf[64]; // Shared array of confidence values for all histograms owned by this block
//conf[thread] = confidence1[index]; // Coalesced read of confidence values
float conf = confidence1[index];
int id = ids1[index];
__shared__ float hists[128 * 64]; // Shared array of all histograms owned by this block
for (unsigned int i = 0; i < 64; i++) {
hists[i * 128 + thread] = data1[(block_start + i) * 128 + thread]; // Coalesced read of first half of histogram
hists[i * 128 + thread + 64] = data1[(block_start + i) * 128 + 64 + thread]; // Coalesced read of second half of histogram
}
__shared__ float other[128]; // Histogram to compare all owned histograms against parallely
for (unsigned int i = 0; i < N2 && *result_count < max_results; i++) {
other[thread] = data2[i * 128 + thread]; // Coalesced read of first half of other histogram
other[thread + 64] = data2[i * 128 + thread + 64]; // Second half
__syncthreads(); // Ensure all values read
if (index < N1) {
float d = 0;
for (unsigned int k = 0; k < 128; k++) { // Compute sum of distances between thread-owned histogram and shared histogram
d += fabsf(hists[thread * 128 + k] - other[k]);
}
d = 1 - (d / 8); // Massage the difference into a nice % similarity number, between 0 and 1
int other_id = ids2[i];
if (other_id != id && d > fmaxf(conf, confidence2[i])) { // Don't compare against self, only compare using highest confidence
int result_index = atomicAdd(result_count, 1); // Increment result count by one atomically (returns value before increment)
if (result_index < max_results) {
// Store resulting pair
results_similarity[result_index] = d;
results_id1[result_index] = id;
results_id2[result_index] = other_id;
}
}
}
__syncthreads(); // Ensure all threads have finished before looping and reading new shared histogram
}
} |
5,054 | /*
* GPUKernels.cu
*
* Created on: Oct 19, 2010
* Author: yiding
*/
#include <cuda_runtime.h>
#include <math.h>
#include <cuda.h>
#define NOID 0xFFFFFFFF
#define CUDA_MAJOR_VER 1
#define CUDA_MINOR_VER 3
typedef unsigned int CoordType;
typedef unsigned int IdType;
typedef unsigned short CapType;
typedef unsigned int SizeType;
typedef float CostType;
//Define CUDA Kernels in this file
|
5,055 | #include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifndef D
#define D 10000
#endif
#ifndef N_FILES
#define N_FILES 21000
#endif
#define ARG_COUNT 4
#define MAX_FILE_NAME 100
#define GMEM_GRANULARITY 128
#define INV_DICT_WIDTH ((unsigned int)(ceil(N_FILES / (float)(sizeof(int)*8))))
#define INV_DICT_WIDTH_PAD ((INV_DICT_WIDTH/GMEM_GRANULARITY+1)*GMEM_GRANULARITY)
#define INV_DICT_SIZE (D*INV_DICT_WIDTH_PAD)
#define BLOCK_SIZE 64
#define N_BLOCKS ((unsigned int)(ceil(N_FILES/ (float)(sizeof(int)*8*BLOCK_SIZE))))
#define N_THREADS (BLOCK_SIZE*N_BLOCKS)
unsigned int *inv_dictionary;
char files[N_FILES][MAX_FILE_NAME];
char query[D];
unsigned int query_ones[D];
unsigned int matches[N_FILES];
// inv_dict_width = width of inv dictionary in number of elements
// int_size = size of an integer in bytes
__global__ void queryKernel(unsigned int * __restrict__ inv_dictionary, unsigned int inv_dict_width, unsigned int inv_dict_width_pad, unsigned int * __restrict__ query_ones, unsigned int ones_cnt, unsigned int * matches, unsigned int int_size, unsigned int n_threads) {
unsigned short curr_bit = 0;
unsigned int match_pos;
unsigned int match_cnt = 0;
// unsigned int match_idx;
__shared__ unsigned int match_idx_s[BLOCK_SIZE];
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < inv_dict_width){ //inv_dict_width = 657
match_idx_s[threadIdx.x] = 0xFFFFFFFF;
for(int i = 0; i < ones_cnt; i++) {
match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] & *(inv_dictionary + query_ones[i]*inv_dict_width_pad + index);
}
match_cnt = 0;
for(int j=1; j<=(int_size*8) && match_idx_s[threadIdx.x]>0; j++) {
curr_bit = match_idx_s[threadIdx.x] & 1;
match_idx_s[threadIdx.x] = match_idx_s[threadIdx.x] >> 1;
if(curr_bit==1) {
match_pos = (index + 1)*int_size*8 - j + 1; // starts indexing at 1 not 0
matches[index*int_size*8+match_cnt] = match_pos;
match_cnt++;
}
}
}
}
__host__ void load_inv_dictionary(char file[]){
unsigned int num_read;
FILE *fp = NULL;
fp = fopen(file, "r");
if (fp == NULL){
printf("Error while opening %s\n", file);
}
int i;
for(int j = 0; j<D; j++) {
for(i=0; i<INV_DICT_WIDTH-1; i++){
// printf("%d:\t%d:\t", j, i);
num_read = fscanf(fp, "%u ", inv_dictionary + j*INV_DICT_WIDTH_PAD + i);
// printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i));
if (num_read != 1)
printf("ERROR!\t fscanf did not fill all arguments\n");
}
// printf("%d:\t%d:\t", j, i);
num_read = fscanf(fp, "%u\n", inv_dictionary + j*INV_DICT_WIDTH_PAD + i);
// printf("%u\n", *(inv_dictionary + j*INV_DICT_WIDTH_PAD + i));
if (num_read != 1)
printf("ERROR!\t fscanf did not fill all arguments\n");
}
fclose(fp);
}
__host__ void load_query(char file[]){
unsigned int num_read;
FILE *fp = NULL;
fp = fopen(file, "r");
if (fp == NULL){
printf("Error while opening %s\n", file);
}
for (int i=0; i<D; i++) {
num_read = fscanf(fp, "%c", &query[i]);
if (num_read != 1)
printf("ERROR!\t fscanf did not fill all arguments\n");
}
// printf("Loaded query: ");
// for (int i=0; i<D; i++) {
// printf("%c", query[i]);
// }
// printf("\n");
fclose(fp);
}
__host__ void printMatches(){
for (int i=0; i<N_FILES; i++) {
printf("Match %d:\t%u\n", i, matches[i]);
}
}
__host__ void load_files(char file[]) {
FILE *fp = NULL;
fp = fopen(file, "r");
int num_read = 0;
if (fp == NULL){
printf("Error while opening %s\n", file);
}
for(int i = 0; i<N_FILES; i++) {
num_read = fscanf(fp, "%s", files[i]);
if (num_read != 1)
printf("Error reading file: %s\n", file);
// printf("%d: %s\n", i, files[i]);
}
fclose(fp);
}
unsigned int findQueryOnes(char* query){
unsigned int ones_cnt = 0;
for(unsigned int i=0; i<D; i++) {
if(query[i] == '1')
{
query_ones[ones_cnt] = i;
printf("One position:\t%d\n", i);
ones_cnt++;
}
}
return ones_cnt;
}
__host__ void reportQuery(char* report_f){
FILE *rep = fopen(report_f, "w+");
for (int i=0; i < N_FILES; i++){
if (matches[i] != 0)
{
fprintf(rep, "%s\n", files[matches[i]-1]);
}
}
fclose(rep);
}
int main(int argc, char** argv){
// Command-line arguments:
// D, itemmem input file, dictionary input file, query
if(argc != (ARG_COUNT+1)){
printf("Requires arguments: <files file> <dictionary input file> <query input file> <output directory>\n");
return 1;
}
char dict_file[MAX_FILE_NAME];
char files_file[MAX_FILE_NAME];
char query_file[MAX_FILE_NAME];
char output_dir[MAX_FILE_NAME];
char report_file[MAX_FILE_NAME];
sprintf(files_file, "%s", argv[1]);
sprintf(dict_file, "%s", argv[2]);
sprintf(query_file, "%s", argv[3]);
sprintf(output_dir, "%s", argv[4]);
printf("INV_DICT_SIZE:\t%u\nINV_DICT_WIDTH:\t%u\nINV_DICT_WIDTH_PAD:\t%u\nN_BLOCKS:\t%u\nBLOCK_SIZE:\t%d\n", INV_DICT_SIZE, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, N_BLOCKS, BLOCK_SIZE);
sprintf(report_file, "%s/cu_query_report.txt", output_dir);
unsigned int *d_inv_dict, *d_matches;
unsigned int *d_query_ones;
unsigned int ones_cnt;
inv_dictionary = (unsigned int *)malloc(INV_DICT_SIZE*sizeof(unsigned int));
load_inv_dictionary(dict_file);
load_query(query_file);
ones_cnt = findQueryOnes(query);
#ifdef PROFILING
for (int i = 0; i < 100; ++i)
{
#endif
// const int num_streams = 8;
// cudaStream_t streams[num_streams];
cudaMalloc((void **)&d_inv_dict, INV_DICT_SIZE*sizeof(unsigned int));
cudaMalloc((void **)&d_query_ones, ones_cnt*sizeof(unsigned int));
cudaMalloc((void **)&d_matches, N_FILES*sizeof(unsigned int));
cudaMemset(d_matches, 0, N_FILES*sizeof(unsigned int));
// printf("Allocated arrays...\n");
cudaMemcpy(d_inv_dict, inv_dictionary, INV_DICT_SIZE*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaError_t err = cudaMemcpy(d_query_ones, query_ones, ones_cnt*sizeof(unsigned int), cudaMemcpyHostToDevice);
printf("Err: %d\n", err);
// printf("Query...\n");
// for(int i=0; i < num_streams; i++) {
// cudaStreamCreate(&streams[i]);
// }
// for(int i=0; i < num_streams; i++) {
queryKernel<<<N_BLOCKS, BLOCK_SIZE>>>((unsigned int *)d_inv_dict, INV_DICT_WIDTH, INV_DICT_WIDTH_PAD, d_query_ones, ones_cnt, d_matches, sizeof(int), N_THREADS);
cudaMemcpy(matches, d_matches, N_FILES*sizeof(unsigned int), cudaMemcpyDeviceToHost);
// }
// printMatches();
load_files(files_file);
reportQuery(report_file);
cudaFree(d_inv_dict);
cudaFree(d_query_ones);
cudaFree(d_matches);
#ifdef PROFILING
}
#endif
free(inv_dictionary);
printf("Ended! :)\n");
return 0;
} |
5,056 | #include "includes.h"
__global__ void VecAdd(int *a, int *b, int *c, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n) {
c[i] = a[i] + b[i];
}
} |
5,057 | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
template <unsigned int BLOCK_SIZE>
__global__ static void oob(int* data, int size)
{
auto id = threadIdx.x + BLOCK_SIZE * blockIdx.x;
if (id >= size) return;
data[id + 1] = 0;
}
static void intraBlockTest(int N)
{
// Compute launch arguments
const unsigned int BLOCK_SIZE = 128;
const unsigned int BLOCKS = N / BLOCK_SIZE;
thrust::device_vector<int> d_data(N);
oob<BLOCK_SIZE><<<BLOCKS, BLOCK_SIZE>>>(d_data.data().get(), N);
cudaDeviceSynchronize();
}
// nvcc -arch=sm_61 -g main.cu -o segfault
int main(int argc, char* argv[])
{
intraBlockTest(1024 * 2);
std::cout << "Done." << std::endl;
}
|
5,058 | #include <stdio.h>
#include <stdlib.h>
inline void check_cuda_errors(const char *filename, const int line_number){
#ifdef DEBUG
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
printf("CUDA error at %s:%i: %s\n", filename, line_number, cudaGetErrorString(error));
exit(-1);
}
#endif
}
__global__ void foo(int *ptr){
*ptr = 7;
}
int main(){
foo<<<1,1>>>(0);
check_cuda_errors(__FILE__, __LINE__);
return 0;
} |
5,059 | #include<stdio.h>
#define NUM_THREADS_PER_BLOCK 256
__global__
void print_hello()
{
int idx = threadIdx.x;
printf("Hello World! My threadId is %d\n", idx);
}
int main()
{
print_hello<<<1, NUM_THREADS_PER_BLOCK>>>();
cudaDeviceSynchronize();
return 0;
} |
5,060 | // Array reversing in CUDA using shared memory.
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
__global__ void reverseKernel(float* A, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
extern __shared__ float B[];
int r = N - i - 1;
B[i] = A[i];
__syncthreads();
A[i] = B[r];
}
}
void displayArray(float* a, int N) {
for (int i = 0; i < N; i++) {
std::cout << a[i];
if (i + 1 != N)
std::cout << " ";
}
std::cout << "\n";
}
int main(int argc, char** argv) {
if (argc == 2) {
const int N = atoi(argv[1]);
size_t size = N * sizeof(float);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* h_A = (float*)malloc(size);
if (h_A == NULL) {
std::cerr << "Failed malloc for h_A!\n";
return 1;
}
float* h_B = (float*)malloc(size);
if (h_B == NULL) {
std::cerr << "Failed malloc for h_B!\n";
return 2;
}
for (int i = 0; i < N; i++) {
h_A[i] = i + 1;
}
auto begin = std::chrono::high_resolution_clock::now();
for (int i = 0; i < N; i++) {
h_B[i] = h_A[N - i - 1];
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> cputime = end - begin;
std::cout << "CPU Elapsed Time: " << cputime.count() << " ms" << std::endl;
// displayArray(h_A, N);
// displayArray(h_B, N);
float* d_A = NULL;
cudaMalloc((void**)&d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
const int BLOCK_SIZE = 1024;
const int GRID_SIZE = (N - 1) / BLOCK_SIZE + 1;
cudaEventRecord(start);
reverseKernel<<<GRID_SIZE, BLOCK_SIZE, size>>>(d_A, N);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float msecs = 0;
cudaEventElapsedTime(&msecs, start, stop);
std::cout << "GPU Elapsed Time: " << msecs << " ms.\n";
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
// displayArray(h_A, N);
for (int i = 0; i < N; i++) {
if (h_A[i] != h_B[i]) {
std::cerr << "TEST FAILED...\n";
return 5;
}
}
std::cout << "TEST PASSED!\n";
cudaFree(d_A);
free(h_A);
}
return 0;
}
|
5,061 | /*------------vec_add.cu------------------------------------------------------//
*
* Purpose: This is a simple cuda file for vector addition
*
*-----------------------------------------------------------------------------*/
#include <iostream>
#include <math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n){
// First we need to find our global threadID
int id = blockIdx.x*blockDim.x + threadIdx.x;
// Make sure we are not out of range
if (id < n){
c[id] = a[id] + b[id];
}
}
int main(){
// size of vectors
int n = 1000;
// Host vectors
double *h_a, *h_b, *h_c;
// Device vectors
double *d_a, *d_b, *d_c;
// allocating space on host and device
h_a = (double*)malloc(sizeof(double)*n);
h_b = (double*)malloc(sizeof(double)*n);
h_c = (double*)malloc(sizeof(double)*n);
// Allocating space on GPU
cudaMalloc(&d_a, sizeof(double)*n);
cudaMalloc(&d_b, sizeof(double)*n);
cudaMalloc(&d_c, sizeof(double)*n);
//initializing host vectors
for (int i = 0; i < n; ++i){
h_a[i] = 1;
h_b[i] = 1;
}
// copying these components to the GPU
cudaMemcpy(d_a, h_a, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(double)*n, cudaMemcpyHostToDevice);
// Creating blocks and grid ints
int threads, grid;
threads = 64;
grid = (int)ceil((float)n/threads);
vecAdd<<<grid, threads>>>(d_a, d_b, d_c, n);
// Now to copy c back
cudaMemcpy(h_c, d_c, sizeof(double)*n, cudaMemcpyDeviceToHost);
double sum = 0;
for (int i = 0; i < n; ++i){
sum += h_c[i];
}
std::cout << "Sum is: " << sum << '\n';
// Release memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
5,062 | #include <stdio.h>
#include <stdlib.h>
//#include <sys/time.h>
#define NUM_PARTICLES 10000 // Third argument
#define NUM_ITERATIONS 100 // Second argument
#define BLOCK_SIZE 16 // First argument
typedef struct
{
float3 position;
float3 velocity;
} Particle;
__global__ void timeStep(Particle *particles, int time) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NUM_PARTICLES) {
particles[i].velocity.x = (time % 3 == 0) ? time % 5 / 1.7 : -time % 5 / 2.8;
particles[i].velocity.y = (time % 7 == 0) ? time % 4 / 4.5: -time % 3 / 2.3;
particles[i].velocity.z = (time % 2 == 0) ? time % 3 * 1.6 : -time % 7 / 1.2;
particles[i].position.x += particles[i].velocity.x;
particles[i].position.y += particles[i].velocity.y;
particles[i].position.z += particles[i].velocity.z;
}
}
void timeStepCPU(Particle *particles, int time) {
for (int i = 0; i < NUM_PARTICLES; ++i) {
particles[i].velocity.x = (time % 3 == 0) ? time % 5 / 1.7 : -time % 5 / 2.8;
particles[i].velocity.y = (time % 7 == 0) ? time % 4 / 4.5: -time % 3 / 2.3;
particles[i].velocity.z = (time % 2 == 0) ? time % 3 * 1.6 : -time % 7 / 1.2;
particles[i].position.x += particles[i].velocity.x;
particles[i].position.y += particles[i].velocity.y;
particles[i].position.z += particles[i].velocity.z;
}
}
int main(int argc, char *argv[]){
int numParticles, numIterations, blockSize;
if (argc == 1) {
numParticles = NUM_PARTICLES;
numIterations = NUM_ITERATIONS;
blockSize = BLOCK_SIZE;
} else if (argc == 2) {
numParticles = NUM_PARTICLES;
numIterations = NUM_ITERATIONS;
blockSize = atoi(argv[1]);
} else if (argc == 3) {
numParticles = NUM_PARTICLES;
numIterations = atoi(argv[2]);
blockSize = atoi(argv[1]);
} else {
numParticles = atoi(argv[3]);
numIterations = atoi(argv[2]);
blockSize = atoi(argv[1]);
}
//struct timeval start, end;
Particle *particles = (Particle*)malloc(numParticles * sizeof(Particle));
Particle *particles_GPU = (Particle*)malloc(numParticles * sizeof(Particle));
Particle *d_particles;
for (int i = 0; i < numParticles; ++i) {
particles[i].velocity.x = 1;
particles[i].velocity.y = 1;
particles[i].velocity.z = 1;
particles[i].position.x = 0;
particles[i].position.y = 0;
particles[i].position.z = 0;
}
cudaMalloc(&d_particles, numParticles * sizeof(Particle));
cudaMemcpy(d_particles, particles, numParticles * sizeof(Particle), cudaMemcpyHostToDevice);
//gettimeofday(&start, NULL);
for (int t = 0; t < numIterations; ++t){
timeStepCPU(particles, t);
}
//gettimeofday(&end, NULL);
//printf("Computing %d iterations on the CPU... Done in %ld microseconds!\n", numIterations, ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)));
//gettimeofday(&start, NULL);
for (int t = 0; t < numIterations; ++t){
timeStep<<<blockSize, numParticles / blockSize + 1>>>(d_particles, t);
}
//gettimeofday(&end, NULL);
//printf("Computing %d iterations on the GPU... Done in %ld microseconds!\n", numIterations, ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)));
cudaMemcpy(particles_GPU, d_particles, numParticles * sizeof(Particle), cudaMemcpyDeviceToHost);
int sameArray = 1;
for (int i = 0; i < numParticles; ++i) {
if ((particles[i].position.x != particles_GPU[i].position.x) || (particles[i].position.y != particles_GPU[i].position.y)|| (particles[i].position.z != particles_GPU[i].position.z)) {
printf("Comparing the output for each implementation… Wrong at %d!\n", i);
printf("GPU.x: %f, CPU.x: %f\n", particles[i].position.x, particles_GPU[i].position.x);
sameArray = 0;
break;
}
}
if (sameArray == 1) {
printf("Comparing the output for each implementation… Correct!\n");
}
free(particles);
free(particles_GPU);
cudaFree(d_particles);
} |
5,063 | // #include <bits/stdc++.h>
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
#include<vector>
#include<algorithm>
#include <climits>
#include <thrust/swap.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
typedef long long ll;
#define CUDA_ERROR(err) { \
if (err != cudaSuccess) { \
fprintf(stderr, "ERROR: CUDA failed in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
return(1); \
} \
} \
struct abs_comparator{
__host__ __device__ bool operator()(double x, double y) {
return fabs(x) < fabs(y);
}
};
__global__ void kernel_compute_L (double* data, int n, int i) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int offsetX = gridDim.x * blockDim.x;
for (int j = idx + i + 1; j < n; j += offsetX) {
data[j + i * n] /= data[i + i * n];
}
}
__global__ void kernel_compute_U (double* data, int n, int i) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = gridDim.x * blockDim.x;
int offsetY = gridDim.y * blockDim.y;
for (int j = idx + i + 1; j < n; j += offsetX) {
for (int q = idy + i + 1; q < n; q += offsetY) {
data[j + q * n] -= data[j + i * n] * data[i + q * n];
}
}
}
// for (int j = i; j < n; ++j) {
// for (int q = 0; q < i; ++q) {
// data[i + j * n] -= data[i + q * n] * data[q + j * n];
// }
// }
__global__ void kernel_swap(double* data, int n, int i, int max_idx) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int offsetX = gridDim.x * blockDim.x;
for (int j = idx; j < n; j += offsetX) {
thrust::swap(data[i + j * n], data[max_idx + j * n]);
}
}
int main(int argc, char const *argv[]) {
ios_base::sync_with_stdio(false);
cin.tie(nullptr);
cout.tie(nullptr);
int n;
cin >> n;
double* data, *dev_data;
CUDA_ERROR(cudaMalloc((void**)&dev_data, sizeof(double) * n * n));
int* p = (int*)malloc(sizeof(int) * n);
data = (double*)malloc(sizeof(double) * n * n);
// Data inputing + p initializing
for (int i = 0; i < n; ++i) {
p[i] = i;
for (int j = 0; j < n; ++j) {
cin >> data[i + j * n];
}
}
// cudaEvent_t start, stop;
// float gpu_time = 0.0;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, 0);
CUDA_ERROR(cudaMemcpy(dev_data, data, sizeof(double) * n * n, cudaMemcpyHostToDevice));
// fprintf(stderr, "Got data\n");
dim3 BLOCKS(32, 32);
dim3 THREADS(32, 32);
int max_idx;
// double mx;
abs_comparator cmp;
thrust::device_ptr<double> data_ptr;
thrust::device_ptr<double> max_ptr;
for (int i = 0; i < n - 1; ++i) {
max_idx = i;
// Find max in column from i to n (cast to data_ptr == start)
data_ptr = thrust::device_pointer_cast(dev_data + i * n);
// Pointer e.x. largest == data + 3
max_ptr = thrust::max_element(data_ptr + i, data_ptr + n, cmp);
max_idx = max_ptr - data_ptr;
// mx = fabs(*max_ptr);
// fprintf(stderr, "Find max idx=%d\n", max_idx);
// fprintf(stderr, "MAX=%f\n", mx);
p[i] = max_idx;
if (max_idx != i) {
kernel_swap<<<32, 32>>>(dev_data, n, i, max_idx);
CUDA_ERROR(cudaGetLastError());
}
kernel_compute_L<<<32, 32>>>(dev_data, n, i);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaThreadSynchronize());
kernel_compute_U<<<BLOCKS, THREADS>>>(dev_data, n, i);
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaThreadSynchronize());
fprintf(stderr, "Iter=%d\n", i);
}
CUDA_ERROR(cudaMemcpy(data, dev_data, sizeof(double) * n * n, cudaMemcpyDeviceToHost));
CUDA_ERROR(cudaFree(dev_data));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
// cout << data[i + j * n] << " ";
printf("%.10e ", data[i + j * n]);
}
printf("\n");
}
for (int j = 0; j < n; ++j) {
// cout << p[j] << " ";
printf("%d ", p[j]);
}
printf("\n");
free(data);
free(p);
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&gpu_time, start, stop);
// fprintf(stderr, "Time %f\n", gpu_time);
return 0;
}
|
5,064 | #include "includes.h"
__global__ void kRotate180(float* filters, float* targets, const int filterSize) {
// __shared__ float shFilter[16][16];
const int filtIdx = blockIdx.x;
const int readStart = MUL24(MUL24(filterSize, filterSize), filtIdx);
filters += readStart;
targets += readStart;
for(int y = threadIdx.y; y < filterSize; y += 16) {
for(int x = threadIdx.x; x < filterSize; x += 16) {
const int writeX = filterSize - 1 - x;
const int writeY = filterSize - 1 - y;
targets[MUL24(writeY, filterSize) + writeX] = filters[MUL24(y, filterSize) + x];
}
}
} |
5,065 | #include "includes.h"
__global__ void forwardDifference2DAdjointKernel(const int cols, const int rows, const float* dx, const float* dy, float* target) {
for (auto idy = blockIdx.y * blockDim.y + threadIdx.y + 1; idy < cols - 1;
idy += blockDim.y * gridDim.y) {
for (auto idx = blockIdx.x * blockDim.x + threadIdx.x + 1;
idx < rows - 1; idx += blockDim.x * gridDim.x) {
const auto index = idx + rows * idy;
target[index] =
-dx[index] + dx[index - 1] - dy[index] + dy[index - rows];
}
}
} |
5,066 | #include "cuda_runtime.h"
#include "stdio.h"
__device__ float devData[5];
__global__ void checkGlobalVariable(){
devData[threadIdx.x] += 2.0f;
}
int main(void){
float value[5] = {3.14, 3.14, 3.14, 3.14, 3.14};
cudaMemcpyToSymbol(devData, &value, sizeof(float)*5);
printf("Copy \n");
checkGlobalVariable<<<1, 5>>>();
cudaMemcpyFromSymbol(&value, devData, sizeof(float)*5);
for(int i=0; i<5; i++){
printf("%d num is %f \n", i, value[i]);
}
cudaDeviceReset();
return EXIT_SUCCESS;
} |
5,067 | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
__device__ int iterate_pixel(float x, float y, float c_re, float c_im)
{
int c=0;
float z_re=x;
float z_im=y;
while (c<255) {
float re2=z_re*z_re;
float im2=z_im*z_im;
if ((re2+im2) > 4) break;
z_im=2*z_re*z_im + c_im;
z_re=re2-im2 + c_re;
c++;
}
return c;
}
__global__ void calc_fractal(int width, int height, float c_re, float c_im, unsigned char* dest)
{
int x;
int y;
x = blockIdx.x * blockDim.x + threadIdx.x ;
y = blockIdx.y * blockDim.y + threadIdx.y;
float f_x=(float)(x*0.8)/(float)(width)-0.8;
float f_y=(float)(y*0.8)/(float)(height)-0.8;
dest[y*width+x]=iterate_pixel(f_x,f_y,c_re,c_im);
}
/*void calc_fractal(int width, int height, float c_re, float c_im, unsigned char* dest)
{
int x;
int y;
for (y=0;y<height;y++)
for (x=0;x<width;x++)
{
float f_x=(float)(x*0.8)/(float)(width)-0.8;
float f_y=(float)(y*0.8)/(float)(height)-0.8;
dest[x+y*width]=iterate_pixel(f_x,f_y,c_re,c_im);
}
}*/
// Write a width by height 8-bit color image into File "filename"
void write_ppm(unsigned char* data,unsigned int width,unsigned int height,char* filename)
{
if (data == NULL) {
printf("Provide a valid data pointer!\n");
return;
}
if (filename == NULL) {
printf("Provide a valid filename!\n"); return;
}
if ( (width>4096) || (height>4096)) {
printf("Only pictures upto 4096x4096 are supported!\n");
return;
}
FILE *f=fopen(filename,"wb");
if (f == NULL)
{
printf("Opening File %s failed!\n",filename);
return;
}
if (fprintf(f,"P6 %i %i 255\n",width,height) <= 0) {
printf("Writing to file failed!\n");
return;
};
int i;
for (i=0;i<height;i++) {
unsigned char buffer[4096*3];
int j;
for (j=0;j<width;j++) {
int v=data[i*width+j];
int s;
s= v << 0;
s=s > 255? 255 : s;
buffer[j*3+0]=s;
s= v << 1;
s=s > 255? 255 : s;
buffer[j*3+1]=s;
s= v << 2;
s=s > 255? 255 : s;
buffer[j*3+2]=s;
}
if (fwrite(buffer,width*3,1,f) != 1) {
printf("Writing of line %i to file failed!\n",i);
return;
}
}
fclose(f);
}
int main(int argc, char** args)
{
unsigned char image [256*256];//???????????????????????
unsigned char* image_GPU;
int size_image = sizeof(char) * 256*256;
int Xnum;
int Ynum;
switch (argc){
case 2:
Xnum = atoi(args[1]);
Ynum = 1;
break;
case 3:
Xnum = atoi(args[1]);
Ynum = atoi(args[2]);
break;
default:
printf("ERROR!!! Correct usage: ./output <**> (<**>)\n");
}
cudaMalloc((void**)&image_GPU, size_image);
// NOTE: one thread to process one pixel,
// say we have 16 block, then we need 256*256/16 = 4096 = 16*16*16 threads per block
//
dim3 threadsPerBlock(Xnum,Ynum);
dim3 numBlocks(256/Xnum,256/Ynum);
//assert(image != NULL);//will terminate the program if image == null
//calc_fractal(256,256,0.28,0.008,image);//gonna change
calc_fractal <<<numBlocks, threadsPerBlock>>> (256,256,0.28,0.008,image_GPU);
cudaMemcpy(image, image_GPU, size_image, cudaMemcpyDeviceToHost);
write_ppm(image,256,256,"julia.ppm");//not changed
//free(image);
return 0;
}
|
5,068 | // Copyright (c) 2020 Saurabh Yadav
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
#include <stdio.h>
#include <cuda_runtime.h>
#define NUM_OF_ELEMENTS 40000U
#define ARRAY_A_ELEMENT ((int) 'A')
#define ARRAY_B_ELEMENT ((int) 'B')
//Compute vector sum C = A+B
//Each thread performs one pair-wise addition
__global__
void vecAddKernel(const int *arr_A, const int *arr_B, int *arr_C, int n) {
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index < n) {
arr_C[index] = arr_A[index] + arr_B[index];
}
}
int main() {
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t arr_size = NUM_OF_ELEMENTS * sizeof(int);
// Allocate the host input and outout vectors
int *host_arr_A = (int *)malloc(arr_size);
int *host_arr_B = (int *)malloc(arr_size);
int *host_arr_C = (int *)malloc(arr_size);
// Verify that all allocations succeeded
if (host_arr_A == NULL || host_arr_B == NULL || host_arr_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < NUM_OF_ELEMENTS; ++i)
{
host_arr_A[i] = ARRAY_A_ELEMENT;
host_arr_B[i] = ARRAY_B_ELEMENT;
}
// Allocate the device input vector A
int *dev_arr_A = NULL;
err = cudaMalloc((void **)&dev_arr_A, arr_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
int *dev_arr_B = NULL;
err = cudaMalloc((void **)&dev_arr_B, arr_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
int *dev_arr_C = NULL;
err = cudaMalloc((void **)&dev_arr_C, arr_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(dev_arr_A, host_arr_A, arr_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(dev_arr_B, host_arr_B, arr_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(NUM_OF_ELEMENTS + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vecAddKernel<<<blocksPerGrid, threadsPerBlock>>>(dev_arr_A, dev_arr_B, dev_arr_C, NUM_OF_ELEMENTS);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(host_arr_C, dev_arr_C, arr_size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < NUM_OF_ELEMENTS; ++i)
{
if (fabs(host_arr_A[i] + host_arr_B[i] - host_arr_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(dev_arr_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(dev_arr_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(dev_arr_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(host_arr_A);
free(host_arr_B);
free(host_arr_C);
printf("Done\n");
return EXIT_SUCCESS;
} |
5,069 |
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <iostream>
#include <cstring>
using namespace std;
__global__ void multiplyDigits(char* d_str1, char* d_str2, int* d_matrix, int str1_len, int str2_len) {
int row = blockDim.y * blockIdx.x + threadIdx.y;
int col = blockDim.x * blockIdx.y + threadIdx.x;
int idx = row * str1_len + (col + (str2_len * row)) + 1 + (row);
d_matrix[idx] = (d_str2[row] - '0') * (d_str1[col] - '0');
}
__global__ void propagateCarries(int* d_matrix, int numCols) {
int idx = blockDim.x * blockIdx.x + threadIdx.x * numCols;
int carry = 0;
for (int i = numCols - 1; i >= 0; i--) {
int rowVal = (d_matrix[idx + i] + carry) % 10;
carry = (d_matrix[idx + i] + carry) / 10;
d_matrix[idx + i] = rowVal;
}
}
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
}
__host__ void propagateCarryInFinalResult(int* h_result, int numCols) {
int carry = 0;
for (int i = numCols - 1; i >= 0; i--) {
int rowVal = (h_result[i] + carry) % 10;
carry = (h_result[i] + carry) / 10;
h_result[i] = rowVal;
}
}
int main() {
char* h_str1 = "111111";
char* h_str2 = "111111";
char* d_str1;
char* d_str2;
int* h_matrix;
int* h_result;
int* d_matrix;
int* d_result;
int row = strlen(h_str2);
int col = strlen(h_str1) + row;
h_matrix = new int[row * col];
h_result = new int[col];
cudaMalloc(&d_str1, sizeof(char) * strlen(h_str1));
cudaMalloc(&d_str2, sizeof(char) * strlen(h_str2));
cudaMalloc(&d_matrix, sizeof(int) * (row * col));
cudaMemset(&d_matrix, 0, sizeof(int) * (row * col));
cudaMalloc(&d_result, sizeof(int) * col);
cudaMemcpy(d_str1, h_str1, sizeof(char) * strlen(h_str1), cudaMemcpyHostToDevice);
cudaMemcpy(d_str2, h_str2, sizeof(char) * strlen(h_str2), cudaMemcpyHostToDevice);
dim3 gridDim(strlen(h_str1) / 2, strlen(h_str2) / 2);
dim3 blockDim(2, 2);
//multiplyDigits<<<gridDim, dim3(strlen(h_str1) / 2, strlen(h_str2) / 2)>>>(d_str1, d_str2, d_matrix, strlen(h_str1), strlen(h_str2));
multiplyDigits<<<gridDim, blockDim>>>(d_str1, d_str2, d_matrix, strlen(h_str1), strlen(h_str2));
propagateCarries<<<row / 2, 2>>>(d_matrix, col);
sumCols<<<col / 2, 2>>>(d_matrix, d_result, row, col);
cudaMemcpy(h_result, d_result, sizeof(int) * col, cudaMemcpyDeviceToHost);
propagateCarryInFinalResult(h_result, col);
for (int i = 0; i < col; i++) {
cout << h_result[i];
}
cout << endl;
cudaFree(d_str1);
cudaFree(d_str2);
cudaFree(d_result);
delete[] h_matrix;
delete[] h_result;
cin.get();
return 0;
} |
5,070 | #include "includes.h"
__global__ void global_max(int *values, int *max, int *reg_maxes, int num_regions, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int region = i % num_regions;
if(i < n)
{
int val = values[i];
if(atomicMax(®_maxes[region], val) < val)
{
atomicMax(max, val);
}//end of if statement
}//end of if i < n
} |
5,071 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <device_launch_parameters.h>
#define ARRAY_SIZE 1024*1024
#define NUM_THREADS 1024
// Saxpi 1 - Versin en C
void saxpi_c(int n, float a, float* x, float* y)
{
for (int i = 0; i < n; i++)
y[i] = a * x[i] + y[i];
}
__global__ void saxpi_1(int n, float a, float* x, float* y)
{
for (int i = 0; i < n; i++)
y[i] = a * x[i] + y[i];
}
__global__ void saxpi_1Block(int n, float a, float* x, float* y)
{
int idx = threadIdx.x;
int numElem = ARRAY_SIZE / NUM_THREADS;
int offset = idx * numElem;
if (offset + numElem < n) {
for (int i = 0; i < numElem; i++)
y[offset + i] = a * x[offset + i] + y[offset + i];
}
}
__global__ void saxpi_nBlock(int n, float a, float* x, float* y)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx < n) {
y[idx] = a * x[idx] + y[idx];
}
}
int main(void)
{
float* h_x, * h_y; // Apuntadores en host
float* d_x, * d_y; // Apuntadores en device
cudaEvent_t inicioG, finG;
cudaEventCreate(&inicioG);
cudaEventCreate(&finG);
size_t sz = ARRAY_SIZE * sizeof(float);
clock_t inicio, fin;
// Asigna memoria
h_x = (float*)malloc(sz);
h_y = (float*)malloc(sz);
cudaMalloc(&d_x, sz);
cudaMalloc(&d_y, sz);
// inicializa arreglos
for (int i = 0; i < ARRAY_SIZE; i++) {
h_x[i] = 1.0f;
h_y[i] = 2.0f;
}
cudaMemcpy(d_x, h_x, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, sz, cudaMemcpyHostToDevice);
cudaEventRecord(inicioG);
//saxpi_1Block <<<1, NUM_THREADS>>> (ARRAY_SIZE, 2.0, d_x, d_y);
saxpi_nBlock <<< ARRAY_SIZE / NUM_THREADS, NUM_THREADS>>> (ARRAY_SIZE, 2.0, d_x, d_y);
cudaEventRecord(finG);
/*
inicio = clock();
saxpi_c(ARRAY_SIZE, 2.0, h_x, h_y);
fin = clock();
double t_exec = (double)(fin - inicio) / CLOCKS_PER_SEC;
*/
cudaMemcpy(h_y, d_y, sz, cudaMemcpyDeviceToHost);
cudaEventSynchronize(finG);
float t_exec = 0;
cudaEventElapsedTime(&t_exec, inicioG, finG);
printf("Algunos resultados: ");
for (int i = 0; i < 10; i++) {
printf("%3.2f, ", h_y[i]);
}
printf("\n Tiempo de ejecucion: %2.7f\n", t_exec);
// Free memory
free(h_x);
free(h_y);
cudaFree(d_x);
cudaFree(d_y);
return 0;
} |
5,072 | /**********************************************************************\
* Author: Jose A. Iglesias-Guitian *
* C/C++ code *
* Introduction to CUDA *
/**********************************************************************/
// Instructions: How to compile this program.
// nvcc 0_hello_world.cu -L /usr/local/cuda/lib -lcudart -o 0_hello-world
#include<stdio.h>
int main(void) {
printf("Hello World! \n");
return 0;
}
|
5,073 | #include <cassert>
#include <cstdlib>
#include <iostream>
#define MASK_DIM 7
#define MASK_OFFSET (MASK_DIM / 2)
__constant__ int mask[7 * 7];
__global__ void conv2d(int *matrix, int *result, int N) {
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = y - MASK_OFFSET;
int s_x = x - MASK_OFFSET;
int temp = 0;
for (int i = 0; i < MASK_DIM; i++) {
for (int j = 0; j < MASK_DIM; j++) {
if((s_y + i) >= 0 && (s_y + i) < N) {
if((s_x + j) >= 0 && (s_x + j) < N) {
temp += matrix[N * (s_y + i) + (s_x + j)] * mask[MASK_DIM * i + j];
}
}
}
}
result[N * y + x] = temp;
}
void init_matrix(int *m, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
m[n * i + j] = rand() % 100;
}
}
}
void verify_result(int *matrix, int *result, int *mask, int N) {
int temp, o_y, o_x;
for(int y = 0; y < N; y++) {
for(int x = 0; x < N; x++) {
temp = 0;
for (int i = 0; i < MASK_DIM; i++) {
o_y = y - MASK_OFFSET + i;
for (int j = 0; j < MASK_DIM; j++) {
o_x = x - MASK_OFFSET + j;
if(o_y >= 0 && o_y < N) {
if(o_x >=0 && o_x < N) {
temp += matrix[N * o_y + o_x] * mask[MASK_DIM * i + j];
}
}
}
}
assert(result[N * y + x] == temp);
}
}
printf("result[%d] = %d == %d = temp \n", N * N - 1, result[N * N - 1], temp);
}
int main() {
int N = 1 << 10;
size_t bytes_n = sizeof(int) * N * N;
size_t bytes_m = sizeof(int) * MASK_DIM * MASK_DIM;
int *matrix = new int[N * N];
init_matrix(matrix, N);
int *result = new int[N * N];
int *h_mask = new int[MASK_DIM * MASK_DIM];
init_matrix(h_mask, MASK_DIM);
int *d_matrix, *d_result;
cudaMalloc(&d_matrix, bytes_n); cudaMalloc(&d_result, bytes_n);
cudaMemcpy(d_matrix, matrix, bytes_n, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(mask, h_mask, bytes_m);
int n_threads = 32;
int n_blocks = (N + n_threads - 1) / n_threads;
printf("%d, %d; ", n_threads, n_blocks);
dim3 block_dim(n_threads, n_threads);
dim3 grid_dim(n_blocks, n_blocks);
conv2d<<<grid_dim, block_dim>>>(d_matrix, d_result, N);
cudaMemcpy(result, d_result, bytes_n, cudaMemcpyDeviceToHost);
verify_result(matrix, result, h_mask, N);
std::cout << "COMPLETED SUCCESSFULLY! \n";
delete[] matrix; delete[] result; delete[] h_mask;
cudaFree(d_matrix); cudaFree(d_result);
return 0;
}
|
5,074 | #define NSTREAM 4
#include<stdio.h>
__global__ void addVec(int* a, int* b, int* c, int const len){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<len) c[i] = a[i] + b[i];
};
int main(){
int const totalLen = 1<<16;
int const mSize = totalLen*sizeof(int);
int* h_a;
int* h_b;
int* h_c;
cudaHostAlloc((void**)&h_a, mSize, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_b, mSize, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_c, mSize, cudaHostAllocDefault);
for (int i=0; i<totalLen; i++){
h_a[i] = i;
h_b[i] = totalLen - i;
}
int* d_a;
int* d_b;
int* d_c;
cudaMalloc((void**)&d_a, mSize);
cudaMalloc((void**)&d_b, mSize);
cudaMalloc((void**)&d_c, mSize);
int const lenPerStream = totalLen/NSTREAM;
int const mSizePerStream = mSize/NSTREAM;
cudaStream_t lsStream[NSTREAM];
for (int i=0; i<NSTREAM; i++){
cudaStreamCreate(&lsStream[i]);
}
int const block = 256;
int const grid = lenPerStream/block;
for (int i=0; i<NSTREAM; i++){
int offset = i*lenPerStream;
cudaMemcpyAsync(&d_a[offset], &h_a[offset], mSizePerStream, cudaMemcpyHostToDevice, lsStream[i]);
cudaMemcpyAsync(&d_b[offset], &h_b[offset], mSizePerStream, cudaMemcpyHostToDevice, lsStream[i]);
addVec<<<grid, block, 0, lsStream[i]>>>(&d_a[offset], &d_b[offset], &d_c[offset], lenPerStream);
cudaMemcpyAsync(&h_c[offset], &d_c[offset], mSizePerStream, cudaMemcpyDeviceToHost, lsStream[i]);
}
for (int i=0; i<NSTREAM; i++){
cudaStreamSynchronize(lsStream[i]);
}
for (int i=0; i<totalLen; i++){
if (h_c[i]!=totalLen) {
printf("error, %d, %d \n", h_c[i], i);
break;
}
}
for (int i=0; i<NSTREAM; i++){
cudaStreamDestroy(lsStream[i]);
}
return 0;
}
|
5,075 | #include <stdio.h>
#include <string.h>
#include <cmath>
#include <iostream>
#include <fstream>
#include <ctime>
#include <random>
using namespace std;
//use seed from time to generate random values
std::mt19937 rng(time(0));
//used to set time to collision as a high value to indicate no collision
int const NO_VALUE = std::numeric_limits<int>::max();
//type of collision
enum CollType{NOCOLL, LEFTWALL, RIGHTWALL, TOPWALL, BOTTOMWALL, PARTCOLL};
//used to return what kind of collision happened with default values if
//a collision did not happen
struct Collision{
//colliding particle #1 (only one colliding if hitting a wall)
int partId1 = NO_VALUE;
//colliding particle #2
int partId2 = NO_VALUE;
//tells if we collided with a particle, with which wall
//or didn't collide
CollType type = NOCOLL;
//time it took to collide
double t = NO_VALUE;
};
typedef enum {
MODE_PRINT,
MODE_PERF
} simulation_mode_t;
//used to present particles in the simulation
struct particle_t {
int i = -1;
double x = NO_VALUE;
double y = NO_VALUE;
double vx = NO_VALUE;
double vy = NO_VALUE;
int p_collisions = 0;
int w_collisions = 0;
bool collided = false ;
bool stopPart = false;
};
__constant__ int l, r, s;
__constant__ int n;
__managed__ particle_t* particles;
__managed__ Collision wallCol;
__managed__ Collision partCol;
int host_n;
// device method to atomically set minimum float value
__device__ __forceinline__ float atomicMinD (float * addr, float value) {
float old;
old = (value >= 0) ? __int_as_float(atomicMin((int *)addr, __float_as_int(value))) :
__uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(value)));
return old;
}
// device method to calculate wall collision time
__device__ Collision calcCollisionTime(particle_t &p1, int iL, int iR, double t){
Collision rCol;
double l = iL;
double r = iR;
if(p1.vx < 0){
double t_help = (l - r - (l - p1.x))/(-p1.vx);
if(t_help >= 0. && t_help < t){
rCol.t = t_help;
rCol.type = LEFTWALL;
rCol.partId1 = p1.i;
}
}
if(p1.vx > 0){
double t_help = (l - r - p1.x)/p1.vx;
if(t_help >= 0. && t_help < t){
rCol.t = t_help;
rCol.type = RIGHTWALL;
rCol.partId1 = p1.i;
}
}
if(p1.vy < 0){
double t_help = (l - r - (l - p1.y))/(-p1.vy);
if(t_help >= 0. && t_help < t && t_help < rCol.t){
rCol.t = t_help;
rCol.type = BOTTOMWALL;
rCol.partId1 = p1.i;
}
}
if(p1.vy > 0){
double t_help = (l - r - p1.y)/p1.y;
if(t_help >= 0. && t_help < t && t_help < rCol.t){
rCol.t = t_help;
rCol.type = TOPWALL;
rCol.partId1 = p1.i;
}
}
return rCol;
}
// device method to calculate particle collision time
__device__ Collision calcCollisionTime(particle_t &p1, particle_t &p2, double t){
Collision rCol;
double a = (p2.vx-p1.vx)*(p2.vx-p1.vy) + (p2.vy-p1.vy)*(p2.vy-p1.vy);
double b = 2.0*((p2.x-p1.x)*(p2.vx-p1.vx) + (p2.y-p1.y)*(p2.vy-p1.vy));
double c = (p2.x-p1.x)*(p2.x-p1.x) + (p2.y-p1.y)*(p2.y-p1.y) - (2*r)*(2*r);
double det = b*b - 4*a*c;
if (a != 0){
double t_help = (-b - sqrt(det))/(2.0*a);
if (t_help >= 0. && t_help < t){
rCol.type = PARTCOLL;
rCol.t = t_help;
rCol.partId1 = p1.i;
rCol.partId2 = p2.i;
}
}
return rCol;
}
// Method updates fastest wall or particle collision depending on thread block
__global__ void find_first_collisions(int num_threads, double t, int offset)
{
__shared__ int helpN; // collision indexes that only gets a value in block 2
int newN = NO_VALUE;
Collision retCol; // fastest collision within thread
__shared__ float blocktime; // fastest time in block
if (threadIdx.x == 0)
blocktime = NO_VALUE;
helpN = NO_VALUE;
//barrier to ensure blocktime is initialized to a comparable default before continuing
__syncthreads();
int index = threadIdx.x;
if (n > num_threads){
index += offset;
}
//wall collision, checked by 1st block
if(blockIdx.x == 0 && index < n){
//If particle is stopped at a wall it can be ignored
if(!particles[index].stopPart){
retCol = calcCollisionTime(particles[index], l, r, t);
// comparing to remove collisions that are too slow to need atomic comparing
if (retCol.type != NOCOLL && (float) retCol.t < blocktime) {
atomicMinD(&blocktime, (float) retCol.t);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////
//Block2 used to calculate particle-particle collisions
else if(blockIdx.x == 1 && index < n){
//if hasn't collided yet
if(!particles[index].collided){
//find collision time with each particle
for(int j=0; j<n; j++){
if (particles[index].i == particles[j].i) {
continue;
}
//neither particle has collided yet
if(!particles[index].collided && !particles[j].collided){
retCol = calcCollisionTime(particles[index], particles[j], t);
}
}
}
if (retCol.type == PARTCOLL && (float) retCol.t < blocktime){
// atomic compare & replace fastest block time to found time if sooner
// float conversion needed to further convert into int for atomicMin
atomicMinD(&blocktime, (float) retCol.t);
}
}
// barrier must be done outside of conditional
__syncthreads();
// if there was a particle collision
// and this thread's collision time = the block minimum
if(retCol.type == PARTCOLL && (float) retCol.t == blocktime){
// calculate and compare index of collision
newN = retCol.partId1 + retCol.partId2;
//if new N is the same as current (reverse of existing collision or different collision with the same col index), void N
if(atomicMin(&helpN, newN) == newN){
newN = NO_VALUE;
} else {
//printf("th%i p%i t%f N%i compared with N%i\n", threadIdx.x, index, blocktime, newN, helpN);
}
}
// else if particle did collide with a wall (retCol has a value)
// and thread's collision time = the block minimum
else if (retCol.type != NOCOLL && (float) retCol.t == blocktime){
// if kernel is being run the first time (shortcircuits to save global access)
// or global wall collision is later
if(offset == 0 || wallCol.t > retCol.t){
// update rest of wallCol struct
wallCol.t = retCol.t;
wallCol.type = retCol.type;
wallCol.partId1 = retCol.partId1;
}
}
// make sure all the fastest threads in particle block have compared indexes
__syncthreads();
// if this thread has a collision index and it's the chosen one
// not accounting for identical indexes here, only 1 col should execute
if (newN != NO_VALUE && newN == helpN) {
// if kernel is being run the first time (shortcircuits to save global access)
// or global particle collision is later
if(offset == 0 || partCol.t > retCol.t){
// update partCol
partCol.type = PARTCOLL;
partCol.t = retCol.t;
partCol.partId1 = retCol.partId1;
partCol.partId2 = retCol.partId2;
//printf("partCol updated\n");
}
}
}
// advances particles in parallel
__global__ void advanceParticlesP(int num_threads, double t, int offset){
int i = threadIdx.x + blockIdx.x*num_threads + offset;
if (i < n && !particles[i].stopPart){
particles[i].y += particles[i].vy*t;
particles[i].x += particles[i].vx*t;
}
}
__host__ void checkCudaErrors() {
cudaError_t rc;
rc = cudaGetLastError();
if (rc != cudaSuccess)
{
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
}
}
__host__ void print_particles(int step)
{
int i;
for (i = 0; i < host_n; i++) {
printf("%d %d %f %f %f %f\n", step, i, particles[i].x, particles[i].y,
particles[i].vx, particles[i].vy);
}
}
__host__ void print_statistics(int num_step)
{
int i;
for (i = 0; i < host_n; i++) {
printf("%d %d %f %f %f %f %d %d\n", num_step, i, particles[i].x,
particles[i].y, particles[i].vx, particles[i].vy,
particles[i].p_collisions, particles[i].w_collisions);
}
}
// returns soonest out of fastest wall and particle collisions
__host__ Collision whichCollision()
{
//If both collisions happen, choose the earlier one
if(wallCol.type != NOCOLL && partCol.type == PARTCOLL){
//NOW WE PREFER WALLCOL IF BOTH T ARE SAME
if(wallCol.t <= partCol.t){
return wallCol;
}
else {
return partCol;
}
}
//only one happened
if(wallCol.type != NOCOLL){
return wallCol;
}
return partCol;
//if no collisions happened the returned partCol
//will have default info, this is checked later and no collision is done
}
__host__ void resetUnifiedCols(){
wallCol.type = NOCOLL;
wallCol.t = NO_VALUE;
wallCol.partId1 = NO_VALUE;
partCol.type = NOCOLL;
partCol.t = NO_VALUE;
partCol.partId1 = NO_VALUE;
partCol.partId2 = NO_VALUE;
}
__host__ void collideParticles(particle_t &p1, particle_t &p2)
{
//printf("colliding particles %i and %i, at %fx%f and %fx%f\n", p1.i, p2.i, p1.x, p1.y, p2.x, p2.y);
//projecting the velocity unit normal and unit tangent vectors onto the unit
//normal and unit tangent vectors, which is done by taking the dot product
double vnx = (p2.x - p1.x);
double vny = (p2.y - p1.y);
double length = std::sqrt(vnx*vnx + vny*vny);
double vunx = 0;
double vuny = 0;
if (length != 0){
vunx = vnx/length;
vuny = vny/length;
}
double vutx = -vuny;
double vuty = vunx;
//printf("normal unit %fx%f, length %f, tangent unit %fx%f\n", vunx, vuny, length, vutx, vuty);
//dot products
double v1n = vunx*p1.vx + vuny*p1.vy;
double v2n = vunx*p2.vx + vuny*p2.vy;
double v1t = vutx*p1.vx + vuty*p1.vy;
double v2t = vutx*p2.vx + vuty*p2.vy;
//double v1n = vun.dot(p1.v);
//double v2n = vun.dot(p2.v);
//double v1t = vut.dot(p1.v);
//double v2t = vut.dot(p2.v);
//find the new tangential velocities (after the collision).
double v1tPrime = v1t;
double v2tPrime = v2t;
//find the new normal velocities.
//this is where we use the one-dimensional collision formulas.
//for m1 = m2 we get
double v1nPrime = v2n;
double v2nPrime = v1n;
//convert the scalar normal and tangential velocities into vectors.
//multiply the unit normal vector by the scalar normal velocity
double v_v1nPrimex = v1nPrime*vunx;
double v_v1nPrimey = v1nPrime*vuny;
double v_v2nPrimex = v2nPrime*vunx;
double v_v2nPrimey = v2nPrime*vuny;
double v_v1tPrimex = v1tPrime*vutx;
double v_v1tPrimey = v1tPrime*vuty;
double v_v2tPrimex = v2tPrime*vutx;
double v_v2tPrimey = v2tPrime*vuty;
//the final velocity vectors by adding the normal and tangential
//components for each object
p1.vx = v_v1nPrimex + v_v1tPrimex;
p1.vy = v_v1nPrimey + v_v1tPrimey;
p2.vx = v_v2nPrimex + v_v2tPrimex;
p2.vy = v_v2nPrimey + v_v2tPrimey;
//info that collided in cycle and up the collision count
p1.collided = true;
p1.p_collisions += 1;
p2.collided = true;
p2.p_collisions += 1;
}
//collides given particle with the given wall (changes the velociy)
__host__ void collideWall(int i, CollType col)
{
//info that collided in cycle and up the collision count
particles[i].collided = true;
particles[i].w_collisions += 1;
//fabs returns the absolute value
//depending on which wall is hit, either vx or vy will
//change sign
if(col == LEFTWALL){
particles[i].vx = fabs(particles[i].vx);
}
else if (col == RIGHTWALL){
particles[i].vx = -fabs(particles[i].vx);
}
else if (col == TOPWALL){
particles[i].vy = -fabs(particles[i].vy);
}
else if (col == BOTTOMWALL){
particles[i].vy = fabs(particles[i].vy);
}
}
///////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int i, x, y, vx, vy;
int num_blocks, num_threads;
int step, offset;
int host_l, host_r, host_s;
simulation_mode_t mode;
char mode_buf[6];
if (argc == 2){
num_threads = atoi(argv[1]);
} else if (argc == 1){
num_threads = 512;
} else {
printf("Usage:\n%s (optional) num_threads < input\n", argv[0]);
return 1;
}
scanf("%d", &host_n);
scanf("%d", &host_l);
scanf("%d", &host_r);
scanf("%d", &host_s);
scanf("%5s", mode_buf);
// blocks to address all particles with the given number of threads = ratio rounded up
num_blocks = ceil((float)num_threads/(float)host_n);
printf("%d particles, %d threads per block, %d blocks.\n", host_n, num_threads, num_blocks);
cudaMallocManaged(&particles, sizeof(particle_t) * host_n);
for (i = 0; i < host_n; i++) {
particles[i].i = -1;
particles[i].p_collisions = 0;
particles[i].w_collisions = 0;
}
i = 0; //added
while (scanf("%d %d %d %d %d", &i, &x, &y, &vx, &vy) != EOF) {
particles[i].i = i;
particles[i].x = x;
particles[i].y = y;
particles[i].vx = vx;
particles[i].vy = vy;
}
if (particles[0].i == -1) {
//generate a distribution for random values in given range
std::uniform_int_distribution<int> dist_pos(host_r,host_l-host_r);
std::uniform_int_distribution<int> dist_v(host_l/(8*host_r), host_l/4);
//Generate particles and store into data structure
for(int j = 0; j < host_n; j++){
x=dist_pos(rng);
y=dist_pos(rng);
vx=dist_v(rng);
vy=dist_v(rng);
particles[j].i = j;
particles[j].x = x;
particles[j].y = y;
particles[j].vx = vx;
particles[j].vy = vy;
}
}
mode = strcmp(mode_buf, "print") == 0 ? MODE_PRINT : MODE_PERF;
/* Copy to GPU constant memory */
cudaMemcpyToSymbol(n, &host_n, sizeof(n));
cudaMemcpyToSymbol(l, &host_l, sizeof(l));
cudaMemcpyToSymbol(r, &host_r, sizeof(r));
cudaMemcpyToSymbol(s, &host_s, sizeof(s));
//printf("particles%i size%i radius%i steps%i\n", host_n, host_l, host_r, host_s);
//simulation of step
for (step = 0; step < host_s; step++) {
if (mode == MODE_PRINT || step == 0) {
print_particles(step);
}
for (i = 0; i < host_n; i++) {
particles[i].collided = false;
particles[i].stopPart = false;
}
double tMoved = 0.0;
Collision deviceCol;
//loop while within the step
while(tMoved < 1.0){
// if number of particles is less than threads:
// one block for each calculation will suffice
// one block of N threads for wall collisions, one for particle
// kernel is called with 2 blocks, numthreads in 1D each block
// if block1/block2 statement seperates wall and particle collisions
// syncthreads breaks up block seperation, then it returns to update wall/partCol
// if particles exceeds threads:
// a new grid is created with code to start at offset of particles already addressed.
// each section of [numthreads] particles is calculated sequentially.
for (offset = 0; offset < host_n; offset += num_threads) {
/* Call the kernel */
find_first_collisions<<<2, num_threads>>>(num_threads, 1.0-tMoved, offset);
//printf("Step %i tMoved %lf\n", step, tMoved);
/* Barrier */
cudaDeviceSynchronize();
checkCudaErrors();
}
deviceCol = whichCollision();
resetUnifiedCols();
//we had a collision
if(deviceCol.type != NOCOLL && deviceCol.t >= 0){
//printf("returned collision type %i in %f\n", deviceCol.type, deviceCol.t);
//move particles until collision time, using particle index offset if threads < n
for (offset = 0; offset < host_n; offset += num_blocks*num_threads){
advanceParticlesP<<<num_blocks, num_threads>>>(num_threads, deviceCol.t, offset);
cudaDeviceSynchronize();
checkCudaErrors();
//printf("advancing: t%d b%d time%lf, offset%d\n", num_threads, num_blocks, deviceCol.t, offset);
}
//advanceParticles(deviceCol.t);
tMoved += deviceCol.t;
//Collision between 2 particles
if(deviceCol.type == PARTCOLL){
collideParticles(particles[deviceCol.partId1], particles[deviceCol.partId2]);
}
//wall collision
else {
//If particle hasn't collided yet
if(!particles[deviceCol.partId1].collided){
collideWall(deviceCol.partId1, deviceCol.type);
}
//If it has, we need to stop it's movement at the wall
else{
particles[deviceCol.partId1].stopPart = true;
}
}
}
else {
//no remaining collisions so advance to end of step
for (offset = 0; offset < host_n; offset += num_blocks*num_threads){
advanceParticlesP<<<num_blocks, num_threads>>>(num_threads, 1 - tMoved, offset);
cudaDeviceSynchronize();
checkCudaErrors();
}
tMoved = 1;
}
}
}
print_statistics(host_s);
return 0;
}
|
5,076 | #include "includes.h"
__global__ void ComputeBiasTermKernel( float *biasTerm, float cFactor, float *winningFraction, int activeCells, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
biasTerm[threadId] = cFactor * ( 1.00f / activeCells - winningFraction[threadId]);
}
} |
5,077 | #include "includes.h"
__global__ void compute_l(double *dev_w, int n_patch)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int N = n_patch * n_patch;
while (tid < N) {
dev_w[tid] = ((tid % (n_patch + 1) == 0) ? 1.0 : 0.0) - dev_w[tid];
tid += blockDim.x * gridDim.x;
}
} |
5,078 | #include <iostream>
#include <vector>
#include <cmath>
#include <string>
using namespace std::string_literals;
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/count.h>
__global__
void add(unsigned int N, thrust::device_ptr<float> a, thrust::device_ptr<float> b)
{
auto index = blockIdx.x * blockDim.x + threadIdx.x;
auto stride = blockDim.x * gridDim.x;
auto a_raw_ptr = thrust::raw_pointer_cast(a);
auto b_raw_ptr = thrust::raw_pointer_cast(b);
for (auto i = index; i < N; i += stride)
a_raw_ptr[i] += b_raw_ptr[i];
}
void gpgpuWrapper()
{
auto constexpr N = 1'048'576u;
auto constexpr kBLOCK_DIM = 256;
auto constexpr kGRID_DIM = (N + kBLOCK_DIM - 1) / kBLOCK_DIM;
thrust::device_vector<float> device_a(N, 1.f);
thrust::device_vector<float> device_b(N, 2.f);
add<<<kGRID_DIM, kBLOCK_DIM>>>(N, device_a.data(), device_b.data());
auto error_code = cudaDeviceSynchronize();
if (error_code != cudaError::cudaSuccess)
std::cout << "An error is happened: "s << std::string{cudaGetErrorString(error_code)} << '\n';
auto count = thrust::count(device_a.begin(), device_a.end(), 3.f);
std::cout << std::boolalpha << "Range is different: "s << (count != N) << '\n';
}
|
5,079 | // vAdd.cu
//
// driver and kernel call
#include <stdio.h>
#define THREADS_PER_BLOCK 32
__global__ void vAdd_d (int *a_d, int *b_d, int *c_d, int n)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < n)
c_d[x] = a_d[x] + b_d[x];
}
extern "C" void gpuAdd (int *a, int *b, int *c, int arraySize)
{
int *a_d, *b_d, *c_d;
cudaMalloc ((void**) &a_d, sizeof(int) * arraySize);
cudaMalloc ((void**) &b_d, sizeof(int) * arraySize);
cudaMalloc ((void**) &c_d, sizeof(int) * arraySize);
cudaMemcpy (a_d, a, sizeof(int) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy (b_d, b, sizeof(int) * arraySize, cudaMemcpyHostToDevice);
vAdd_d <<< ceil((float) arraySize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (a_d, b_d, c_d, arraySize);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf ("CUDA error: %s\n", cudaGetErrorString(err));
cudaMemcpy (c, c_d, sizeof(int) * arraySize, cudaMemcpyDeviceToHost);
cudaFree (a_d);
cudaFree (b_d);
cudaFree (c_d);
}
|
5,080 | #include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
// It has 16! different ways in which the thread blocks can be run
}
int main(int argc,char **argv)
{
// launch the kernel
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
} |
5,081 | /*
Daniel Sá Barretto Prado Garcia 10374344
Tiago Marino Silva 10734748
Felipe Guilermmo Santuche Moleiro 10724010
Laura Genari Alves de Jesus 10801180
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define THREADS 32
#define INF 0x7fffffff
__global__ void prodEscalar(int* A, int* B, int* somaDosProd, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < dim && j < dim ){
int produto = A[i*dim + j] * B[i*dim + j];
atomicAdd(somaDosProd, produto);
}
}
__global__ void min_max_elementos(int* A, int* B, int* max_comp, int* min_comp, int dim)
{
//Calcula o índice global da thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dim && j < dim)
{
//Faz a soma entre elemento do vetor no índice idx e o conteúdo de soma
int local_max = A[i*dim + j] > B[i*dim + j] ? A[i*dim + j] : B[i*dim + j];
int local_min = A[i*dim + j] < B[i*dim + j] ? A[i*dim + j] : B[i*dim + j];
atomicMax(max_comp, local_max);
atomicMin(min_comp, local_min);
}
}
int main(int argc, char **argv)
{
// Declara as matrizes
int *A, *B;
// Declara as variáveis de índice
int i, j, dim;
FILE *inputfile; // handler para o arquivo de entrada
char *inputfilename; // nome do arquivo de entrada
if (argc < 2) {
printf("Please run with input file name, i.e., num_perf_mpi inputfile.ext\n");
exit(-1);
}
inputfilename = (char *)malloc(256 * sizeof(char));
strcpy(inputfilename, argv[1]);
if ((inputfile = fopen(inputfilename, "r")) == 0) {
printf("Error openning input file.\n");
exit(-1);
}
fscanf(inputfile, "%d\n", &dim); // Lê a dimensão das matrizes
// Aloca as matrizes
A = (int *)malloc(dim * dim * sizeof(int));
B = (int *)malloc(dim * dim * sizeof(int));
// Lê a matriz A
for (i = 0; i < dim; i++) {
for (j = 0; j < dim; j++) {
fscanf(inputfile, "%d ", &(A[i * dim + j]));
}
}
// Lê a matriz B
for (i = 0; i < dim; i++) {
for (j = 0; j < dim; j++) {
fscanf(inputfile, "%d ", &(B[i * dim + j]));
}
}
// fecha o arquivo de entrada
fclose(inputfile);
int *A_d, *B_d;
//aloca matrizes na gpu
cudaMalloc(&A_d,dim * dim * sizeof(int));
cudaMalloc(&B_d,dim * dim * sizeof(int));
cudaMemcpy(A_d, A, dim*dim*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, dim*dim*sizeof(int), cudaMemcpyHostToDevice);
int max = -INF, min = INF, soma = 0;
int *min_D, *max_D, *soma_D;
cudaMalloc(&min_D, sizeof(int));
cudaMalloc(&max_D, sizeof(int));
cudaMalloc(&soma_D, sizeof(int));
cudaMemcpy(max_D, &max, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(min_D, &min, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(soma_D, &soma, sizeof(int), cudaMemcpyHostToDevice);
//Define a quantidade de threads por bloco
dim3 threadsPerBlock(THREADS,THREADS);
//Define a quantidade de blocos por grade
dim3 blocksPerGrid((dim+(threadsPerBlock.x-1)) / threadsPerBlock.x, (dim+(threadsPerBlock.y-1)) / threadsPerBlock.y);
min_max_elementos<<<blocksPerGrid, threadsPerBlock>>>(A_d, B_d, max_D, min_D, dim);
prodEscalar<<<blocksPerGrid, threadsPerBlock>>>(A_d, B_d, soma_D, dim);
//Copia o resultado da soma de volta para o host
//cudaMemcpy(prod_escalar, prod_D, dim * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&max, max_D,sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&min, min_D,sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&soma, soma_D,sizeof(int), cudaMemcpyDeviceToHost);
//Imprime o resultado
printf("%d %d %d\n", soma, min, max);
// Liberação de memória alocada
free(A);
free(B);
cudaFree(B_d);
cudaFree(A_d);
cudaFree(min_D);
cudaFree(max_D);
cudaFree(soma_D);
} |
5,082 | #include "includes.h"
__global__ void HessianPositiveDefiniteKernel( char *d_hessian_pd, float *d_Src, int imageW, int imageH, int imageD )
{
__shared__ float s_Data[HES_BLOCKDIM_Z+2][HES_BLOCKDIM_Y+2][(HES_RESULT_STEPS + 2 * HES_HALO_STEPS) * HES_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * HES_RESULT_STEPS - HES_HALO_STEPS) * HES_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * HES_BLOCKDIM_Y + threadIdx.y-1;
const int baseZ = blockIdx.z * HES_BLOCKDIM_Z + threadIdx.z-1;
const int idx = (baseZ * imageH + baseY) * imageW + baseX;
d_Src += idx; d_hessian_pd += idx;
if(baseZ < 0 || baseZ >= imageD || baseY < 0 || baseY >= imageH) {
for (int i = 0; i < HES_HALO_STEPS + HES_RESULT_STEPS + HES_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X] = 0;
}
return;
}
//Load main data
#pragma unroll
for (int i = HES_HALO_STEPS; i < HES_HALO_STEPS + HES_RESULT_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X] = (float)d_Src[i * HES_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < HES_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X] = (baseX + i * HES_BLOCKDIM_X >= 0) ? (float)d_Src[i * HES_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = HES_HALO_STEPS + HES_RESULT_STEPS; i < HES_HALO_STEPS + HES_RESULT_STEPS + HES_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X] = (baseX + i * HES_BLOCKDIM_X < imageW) ? (float)d_Src[i * HES_BLOCKDIM_X] : 0;
}
// yz edge is no need to compute
if (threadIdx.z == 0 || threadIdx.z == HES_BLOCKDIM_Z+1 || threadIdx.y == 0 || threadIdx.y == HES_BLOCKDIM_Y+1)
return;
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = HES_HALO_STEPS; i < HES_HALO_STEPS + HES_RESULT_STEPS; i++)
{
float xx,xy,xz,yy,yz,zz;
xx = s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X - 1]
+ s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X + 1]
- s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X] * 2;
xy = s_Data[threadIdx.z][threadIdx.y + 1][threadIdx.x + i * HES_BLOCKDIM_X + 1]
+ s_Data[threadIdx.z][threadIdx.y - 1][threadIdx.x + i * HES_BLOCKDIM_X - 1]
- s_Data[threadIdx.z][threadIdx.y + 1][threadIdx.x + i * HES_BLOCKDIM_X - 1]
- s_Data[threadIdx.z][threadIdx.y - 1][threadIdx.x + i * HES_BLOCKDIM_X + 1];
xz = s_Data[threadIdx.z + 1][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X + 1]
+ s_Data[threadIdx.z - 1][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X - 1]
- s_Data[threadIdx.z + 1][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X - 1]
- s_Data[threadIdx.z - 1][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X + 1];
yy = s_Data[threadIdx.z][threadIdx.y + 1][threadIdx.x + i * HES_BLOCKDIM_X]
+ s_Data[threadIdx.z][threadIdx.y - 1][threadIdx.x + i * HES_BLOCKDIM_X]
- s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X] * 2;
yz = s_Data[threadIdx.z + 1][threadIdx.y + 1][threadIdx.x + i * HES_BLOCKDIM_X]
+ s_Data[threadIdx.z - 1][threadIdx.y - 1][threadIdx.x + i * HES_BLOCKDIM_X]
- s_Data[threadIdx.z + 1][threadIdx.y - 1][threadIdx.x + i * HES_BLOCKDIM_X]
- s_Data[threadIdx.z - 1][threadIdx.y + 1][threadIdx.x + i * HES_BLOCKDIM_X];
zz = s_Data[threadIdx.z + 1][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X]
+ s_Data[threadIdx.z - 1][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X]
- s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * HES_BLOCKDIM_X] * 2;
xy *= 0.25; xz *= 0.25; yz *= 0.25;
d_hessian_pd[i * HES_BLOCKDIM_X] = (xx < 0 && xx*yy-xy*xy < 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0) ? 1 : 0;
}
} |
5,083 | #include "includes.h"
__global__ void profilePhaseSolve_kernel() {} |
5,084 | #include <assert.h>
#include <iostream>
#include <cstdlib>
#include<sys/time.h>
#include <cmath>
#include "cuda_runtime.h"
const int LANGE = 16;
__global__ void vecAdd(double *d_a, double *d_b, double *d_c, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < (N / LANGE)) {
int large = N / LANGE;
for (int i = 0; i < LANGE; i++)
if (idx % 2 == 0){
d_c[idx + i * large] = d_a[idx + i * large] + d_b[idx + i *large];
}else{
d_c[idx + i * large] = d_a[idx + i * large] - d_b[idx + i * large];
}
}
}
int main (void) {
int n;
std::cin>>n;
assert(n % LANGE == 0);
double *H_a, *H_b, *H_c;
size_t bytes = n * sizeof(double);
H_a = (double*)malloc(bytes);
H_b = (double*)malloc(bytes);
H_c = (double*)malloc(bytes);
for (int i = 0; i < n; i++) {
H_a[i] = sin(i) * sin(i);
H_b[i] = cos(i) * cos(i);
}
//GPU parper
double *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_a, H_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, H_b, bytes, cudaMemcpyHostToDevice);
int blockSize = 1024;
int gridSize = ((n-1)/LANGE)/blockSize + 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//gpu running
cudaEventRecord(start);
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaMemcpy(H_c, d_c, bytes, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float timeGPU = 0;
cudaEventElapsedTime(&timeGPU, start, stop);
std::cout << "Runtime for GPU is: " << timeGPU<<" ms "<< std::endl;
//CPU running.....
struct timeval startCPU,endCPU;
gettimeofday(&startCPU,NULL);
for (int i = 0; i < n; i++) {
H_c[i] = H_a[i] + H_b[i];
}
gettimeofday(&endCPU,NULL);
double timeCPU = endCPU.tv_sec - startCPU.tv_sec + (double)(endCPU.tv_usec - startCPU.tv_usec)/1000000;
std::cout << "Runtime for CPU is: " << timeCPU <<" ms "<< std::endl;
//summary
if(timeGPU<timeCPU){
std::cout<<"GPU is faster than CPU for "<<timeCPU-timeGPU<<" ms "<<std::endl;
}else if(timeGPU>timeCPU){
std::cout<<"CPU is faster than GPU for "<<timeGPU-timeCPU<<" ms "<<std::endl;
}else{
std::cout<<"same time for GPU and CPU"<<std::endl;
}
free(H_a);
free(H_b);
free(H_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
5,085 | #include <iostream>
#include <cmath>
using namespace std;
__global__
void add(double *x, double *y, int N)
{
int i, ind, stride;
ind = blockIdx.x*blockDim.x + threadIdx.x;
stride = gridDim.x * blockDim.x;
for(i=ind; i<N; i+=stride) {
y[i] += x[i];
}
}
int main()
{
double *d_x, *d_y, *x, *y, err{0.};
int N = 1e6;
int i, size = N*sizeof(double);
cudaMalloc((void **)&d_x, size);
cudaMalloc((void **)&d_y, size);
x = new double[N];
y = new double[N];
for(i=0; i<N; ++i) {
x[i] = 1.;
y[i] = 2.;
}
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
add<<<20,512>>>(d_x, d_y, N);
cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost);
for (i=0; i<N; ++i) {
err += (y[i]-3.)*(y[i]-3.);
}
cout << "err = " << err << endl;
delete [] x; delete [] y;
cudaFree(d_x); cudaFree(d_y);
}
|
5,086 | #include "includes.h"
#define NOMINMAX
const unsigned int BLOCK_SIZE = 512;
__global__ void addKernel(float *c, const float *a, const float *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
5,087 | #include "includes.h"
__global__ void addWalkers ( const int dim, const int nwl, const float *xx0, const float *xxW, float *xx1 ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * dim;
if ( i < dim && j < nwl ) {
xx1[t] = xx0[t] + xxW[t];
}
} |
5,088 | #include <sstream>
#include <iomanip>
#include <cuda.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
using namespace std;
#define NUM_POINTS_PER_THREAD 1000
__global__ void kernel_initializeRand( curandState * randomGeneratorStateArray, unsigned long seed, int totalNumThreads)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if( id >= totalNumThreads){
return;
}
curand_init( seed, id, 0, &randomGeneratorStateArray[id]);
}
__global__ void kernel_generatePoints( curandState * globalState, int* counts, int totalNumThreads)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
float x,y;
if(index >= totalNumThreads){
return;
}
curandState localState = globalState[index];
for(int i = 0; i < NUM_POINTS_PER_THREAD; i++)
{
x = curand_uniform( &localState);
y = curand_uniform( &localState);
if(x*x+y*y <=1){
counts[index]++;
}
}
globalState[index] = localState;
}
int main(int argc, char** argv)
{
if( argc < 2){
std::cerr << "error, incorrect param" << endl;
exit(0);
}
int numThreads;
{
stringstream ss1(argv[1]);
ss1 >> numThreads;
}
dim3 threadsPerBlock(1024,1,1);
dim3 numberofBlocks((numThreads + threadsPerBlock.x-1)/threadsPerBlock.x,1,1);
curandState* devRandomGeneratorStateArray;
cudaMalloc (&devRandomGeneratorStateArray, numThreads*sizeof(curandState));
thrust::host_vector<int> hostCounts(numThreads,0);
thrust::device_vector<int> deviceCounts(hostCounts);
int* dcountsptr = thrust::raw_pointer_cast(&deviceCounts[0]);
kernel_initializeRand <<< numberofBlocks, threadsPerBlock >>> ( devRandomGeneratorStateArray, time(NULL), numThreads);
kernel_generatePoints <<< numberofBlocks, threadsPerBlock >>> (devRandomGeneratorStateArray, dcountsptr, numThreads);
int sum = thrust::reduce(deviceCounts.begin(), deviceCounts.end(), 0, thrust::plus<int>());
std::cout << "our approx of pi = " <<std::setprecision(10) << (float(sum)/(numThreads*NUM_POINTS_PER_THREAD))*4 << std::endl;
}
|
5,089 | __global__ void add_kernel(int *x, int a, int b) {
x[0] = a + b;
}
void add(int *x, int a, int b) {
add_kernel<<<1, 1>>>(x, a, b);
}
|
5,090 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
// #include <stdexcept>
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
__global__ void prepare_function(float * d_out, int n_points,
float x_min, float x_max)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float x = x_min + (x_max - x_min) * id / n_points;
d_out[id] = exp(-pow(x, 2));
// d_out[id] = (float) id;
}
// __global__ void blelloch_reduce(float * d_in, int n_points)
// {
// /* Assuming n_points is a power of two */
// int n_current = 2;
// int id = threadIdx.x + blockIdx.x * blockDim.x;
//
// while(n_current <= n_points)
// {
// if ((id + 1) % n_current == 0)
// {
// d_in[id] += d_in[id - n_current/2];
// }
// __syncthreads();
// n_current = n_current * 2;
// }
//
// }
__global__ void blelloch_reduce_step(float * d_in, int n_points, int stride_step)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ((id + 1) % stride_step == 0)
{
d_in[id] += d_in[id - stride_step/2];
}
}
__global__ void kill_last(float * d_in, int n_points)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id == n_points - 1)
{
d_in[id] = 0;
}
}
/*
__global__ void blelloch_downsweep(float * d_in, int n_points)
{
int n_current = n_points;
int id = threadIdx.x + blockIdx.x * blockDim.x;
float tmp;
if (id == n_points - 1)
{
d_in[id] = 0;
}
__syncthreads();
while(n_current >= 2)
{
if ((id + 1) % n_current == 0)
{
tmp = d_in[id];
d_in[id] += d_in[id - n_current/2];
d_in[id - n_current/2] = tmp;
}
n_current = n_current / 2;
__syncthreads();
}
}*/
__global__ void blelloch_downsweep_step(float * d_in, int n_points, int stride_step)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float tmp;
if ((id + 1) % stride_step == 0)
{
tmp = d_in[id];
d_in[id] += d_in[id - stride_step/2];
d_in[id - stride_step/2] = tmp;
}
// __syncthreads();
}
int main(int argc, char* argv[])
{
float minus_infty = -8;
float x_max = 0;
int n_blocks = 1024;
int n_points_per_block = 1024;
int n_points = n_points_per_block * n_blocks;
int stride_step;
float dx;
float *devFunVals;
float *hostFunVals;
float *hostFunVals2;
if (argc > 1)
{
sscanf(argv[1], "%f", &x_max);
// printf("%f\n", x_max);
if (x_max < minus_infty)
{
printf("0\n");
return 0;
}
}
else
{
printf("Usage: ./scan <number> \n");
return 0;
}
dx = (x_max - minus_infty) / (float) n_points;
// printf("dx: %e\n", dx);
// printf("n_points: %d\n", n_points);
//
if (n_points < 0 || ((n_points & (n_points - 1)) != 0))
{
printf("n_points is not a power of two\n");
return 1;
}
hostFunVals = (float *)calloc(n_points, sizeof(float));
hostFunVals2 = (float *)calloc(n_points, sizeof(float));
CUDA_CALL(cudaMalloc((void **)&devFunVals, n_points*sizeof(float)));
prepare_function<<<n_blocks, n_points_per_block>>>(devFunVals, n_points, minus_infty, x_max);
// blelloch_reduce<<<n_blocks, n_points_per_block>>>(devFunVals, n_points);
stride_step = 2;
while(stride_step <= n_points)
{
blelloch_reduce_step<<<n_blocks, n_points_per_block>>>(devFunVals, n_points, stride_step);
stride_step = stride_step * 2;
}
stride_step = n_points;
CUDA_CALL(cudaMemcpy(hostFunVals, devFunVals, n_points*sizeof(float), cudaMemcpyDeviceToHost));
kill_last<<<n_blocks, n_points_per_block>>>(devFunVals, n_points);
while(stride_step >= 2)
{
blelloch_downsweep_step<<<n_blocks, n_points_per_block>>>(devFunVals, n_points, stride_step);
stride_step = stride_step / 2;
}
CUDA_CALL(cudaMemcpy(hostFunVals2, devFunVals, n_points*sizeof(float), cudaMemcpyDeviceToHost));
// for(int i=0; i<n_points; i++)
// {
// printf("%1.4f \n", hostFunVals[i]);
// }
// printf("=========\n");
//
// for(int i=0; i<n_points; i++)
// {
// printf("%1.4f \n", hostFunVals2[i]);
// }
// printf("\n");
// printf("Func value: %1.5f\n", hostFunVals[n_points - 1]);
printf("Integral value: %1.5e\n", hostFunVals2[n_points - 1] * dx);
free(hostFunVals);
free(hostFunVals2);
CUDA_CALL(cudaFree(devFunVals));
return 0;
}
|
5,091 | #include <stdio.h>
__global__ void kernel1( int *a )
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = 7; // output: 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
}
__global__ void kernel2( int *a )
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = blockIdx.x; // output: 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3
}
__global__ void kernel3( int *a )
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = threadIdx.x; // output: 0 1 2 3 1 2 3 4 0 1 2 3 0 1 2 3
}
int main()
{
int *host_array;
int *dev_array;
host_array = (int *) malloc(sizeof(int)*16);
cudaMalloc(&dev_array, sizeof(int)*16);
cudaMemset(dev_array, 0, 16);
kernel1<<<4, 4>>>(dev_array);
cudaMemcpy(host_array, dev_array, sizeof(int)*16, cudaMemcpyDeviceToHost);
for(int i = 0; i < 16; i++) printf(" %d ", host_array[i]);
printf("\n");
free(host_array);
cudaFree(dev_array);
cudaDeviceReset();
return 0;
}
|
5,092 | #include <cuda.h>
#include <cuda_runtime_api.h>
#define N_MEM_OPS_PER_KERNEL 2
//-----------------------------------------------------------------------------
// Simple test kernel template for memory ops test
// @param d_counters - Simple memory location to exploit for lots of memory accesses
// @param n_threads - Total number of threads per block
//-----------------------------------------------------------------------------
__global__
void max_flops_kernel(float* d_in, float* d_out, int offset) {
// Increment the counter
int it = blockIdx.x * blockDim.x + threadIdx.x + offset;
d_out[it] = d_in[it];
}
|
5,093 | /* Kernel for vector squaring */
__global__ void gpusquare(float in[], float out[], int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
out[i] = in[i] * in[i];
}
} |
5,094 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void scalarDiv_wfcNorm(double2* in, double dr, double* pSum, double2* out){
unsigned int gid = getGid3d3d();
double2 result;
double norm = sqrt((pSum[0])*dr);
result.x = (in[gid].x/norm);
result.y = (in[gid].y/norm);
out[gid] = result;
} |
5,095 | #include "includes.h"
__global__ void cuda_mul(int* A, int* B, int* C, int w)
{
int tid,tx,ty;
//range of tx,ty 0 ~ w
tx = blockDim.x * blockIdx.x + threadIdx.x;
ty = blockDim.y * blockIdx.y + threadIdx.y;
tid = w*ty + tx;
int v = 0;
int a = 0;
int b = 0;
/*
oooo oxo
xxxx X oxo
oooo oxo
oxo
*/
for(int i=0;i< w;i++)
{
a = A[ty * w + i];
b = B[i * w + tx];
v += a+b;
}
C[tid]= v;
} |
5,096 | #include <stdio.h>
// add sera ejecuta en el device
// add será llamada desde el host
// add corre en device asi que a,b y c deben apuntar a memoria del device
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main(void){
int a, b, c; // Copias de a b y c en el host
int *d_a, *d_b, *d_c; // Copias de a, b y c en el device
int size = sizeof(int);
// Obtenemos espacio para las copias de a,b y c en device
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Valores input
a = 2;
b = 7;
// Copiamos inputs a device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Lanzamos add() kernen en la GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copiamos el resultado al host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// Limpiamos
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("%d",c);
return 0;
}
|
5,097 | //nvcc -ptx EM2.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double * x,
double * y,
double * z,
double * vx,
double * vy,
double * vz,
double * r,
double * phi,
double * vr,
int * parDelete,
const int parNum,
const double Rp,
const double Lp,
const double PHIp,
const double dr,
const double dz,
const double dt ) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum || parDelete[n]==1 ){
return;
}
x[n] = x[n] + 0.5 * vx[n] * dt;
y[n] = y[n] + 0.5 * vy[n] * dt;
z[n] = z[n] + 0.5 * vz[n] * dt;
r[n] = sqrt( x[n]*x[n] + y[n]*y[n] );
phi[n] = atan( y[n]/x[n] );
if (r[n] > Rp){
parDelete[n] = 1;
}
double vx1;
while(phi[n]<0){
phi[n] = phi[n] + PHIp;
vx1 = vx[n] * cos(PHIp) - vy[n] * sin(PHIp);
vy[n] = vx[n] * sin(PHIp) + vy[n] * cos(PHIp);
vx[n] = vx1;
}
while(phi[n]>PHIp){
phi[n] = phi[n] - PHIp;
vx1 = vx[n] * cos(PHIp) + vy[n] * sin(PHIp);
vy[n] = -vx[n] * sin(PHIp) + vy[n] * cos(PHIp);
vx[n] = vx1;
}
x[n] = r[n] * cos(phi[n]);
y[n] = r[n] * sin(phi[n]);
if (z[n]>Lp){
z[n] = 2*Lp - z[n];
vz[n] = -vz[n];
}
if (z[n]<0){
z[n] = -z[n];
vz[n] = -vz[n];
}
vr[n] = vx[n]*cos(phi[n]) + vy[n]*sin(phi[n]) ;
}
__global__ void processMandelbrotElement(
double * x,
double * y,
double * z,
double * vx,
double * vy,
double * vz,
double * r,
double * phi,
double * vr,
int * parDelete,
const int parNum,
const double Rp,
const double Lp,
const double PHIp,
const double dr,
const double dz,
const double dt ) {
EM1(x, y, z, vx, vy, vz, r, phi, vr, parDelete, parNum, Rp, Lp, PHIp, dr, dz, dt);
}
|
5,098 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
/*#define M(row, col) *(M.elements + (row) (*) M.width + col)*/
typedef struct {
int width;
int height;
float* elements;
} Matrix;
//a h w B h w C
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < B.width; j++) {
C.elements[i * C.width + j] = 0;
for (int k = 0; k < A.width; k++) {
C.elements[i * C.width + j] +=
A.elements[i * A.width + k]
* B.elements[k * B.width + j];
}
}
}
}
void fill_Matrix(Matrix A)
{
for (int i = 0; i < A.height ; i++) {
for (int j = 0; j < A.width; j++) {
A.elements[i * A.width + j] = rand() / (float)RAND_MAX * 10;
}
}
}
void print_Matrix(Matrix A)
{
/*for (int i = 0; i < A.height; i++) {*/
/*for (int j = 0; j < A.width; j++) {*/
/*printf("%4.1f ", A.elements[i * A.width + j]);*/
/*}*/
/*printf("\n");*/
/*}*/
}
int main(int argc, char **argv)
{
if (argc != 2) {
printf("usage: n\n");
return -1;
}
int nnn = atoi(argv[1]);
int n = 1 << nnn;
srand(time(0));
Matrix A, B, C;
A.width = A.height = n;
A.elements = (float *)malloc(sizeof(float) * n * n);
B.width = B.height = n;
B.elements = (float *)malloc(sizeof(float) * n * n);
C.width = C.height = n;
C.elements = (float *)malloc(sizeof(float) * n * n);
fill_Matrix(A);
print_Matrix(A);
printf("\n");
fill_Matrix(B);
print_Matrix(B);
printf("\n");
MatMul(A, B, C);
print_Matrix(C);
}
|
5,099 | #include <cstdio>
#include <cstdlib>
#include <cmath>
#define N 9999 // number of bodies
#define MASS 0 // row in array for mass
#define X_POS 1 // row in array for x position
#define Y_POS 2 // row in array for y position
#define Z_POS 3 // row in array for z position
#define X_VEL 4 // row in array for x velocity
#define Y_VEL 5 // row in array for y velocity
#define Z_VEL 6 // row in array for z velocity
#define G 10 // "gravitational constant" (not really)
float dt = 0.05; // time interval
// JJ: one could compute the distance matrix using tiles first in pararrel, then compute forces, and finnaly update the velocities and positions. In this naive project, I chose not to implement it.
// each thread computes new position of one body
__global__ void nbody(float *dev_body, float dt) {
// Index for body i
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<N) // if index is larger than number of bodies, no need to calculate.
{
float x_diff, y_diff, z_diff, r;
float Fx_dir=0, Fy_dir=0, Fz_dir=0;
for (int j=0; j<N && j!=i; j++) // force on body i due to body j
{
x_diff = dev_body[j*7+X_POS] - dev_body [i*7+X_POS]; // difference in x direction
y_diff = dev_body[j*7+Y_POS] - dev_body [i*7+Y_POS]; // difference in y direction
z_diff = dev_body[j*7+Z_POS] - dev_body [i*7+Z_POS]; // difference in z direction
// calculate distance between i and j (r)
r = sqrt(x_diff * x_diff + y_diff * y_diff + z_diff * z_diff);
// force between bodies i and x
float F = G * dev_body[i*7+MASS] * dev_body[j*7+MASS] / r; //JJ: It should be r^2 on my opinion. The same for the following.
if (r > 10.0)
{
Fx_dir += F * x_diff / r; // resolve forces in x and y directions
Fy_dir += F * y_diff / r; // and accumulate forces
Fz_dir += F * z_diff / r; //
}
else
{
// if too close, anti-gravitational force
Fx_dir -= F * x_diff / r; // resolve forces in x and y directions
Fy_dir -= F * y_diff / r; // and accumulate forces
Fz_dir -= F * z_diff / r; //
}
}
// update velocities
dev_body[i*7+X_VEL] += Fx_dir * dt / dev_body[i*7+MASS];
dev_body[i*7+Y_VEL] += Fy_dir * dt / dev_body[i*7+MASS];
dev_body[i*7+Z_VEL] += Fz_dir * dt / dev_body[i*7+MASS];
// update positions
dev_body[i*7+X_POS] += dev_body[i*7+X_VEL] * dt;
dev_body[i*7+Y_POS] += dev_body[i*7+Y_VEL] * dt;
dev_body[i*7+Z_POS] += dev_body[i*7+Z_VEL] * dt;
}
}
int main(int argc, char **argv) {
float *body; // host data array of bodies
float *dev_body; // device data array of bodies
int tmax = 0;
if (argc != 2) {
fprintf(stderr, "Format: %s { number of timesteps }\n", argv[0]);
exit (-1);
}
tmax = atoi(argv[1]);
printf("Requested Timesteps: %d.\n", tmax);
// allocate memory size for the body
int bodysize = N * 7 * sizeof(float);
body = (float *)malloc(bodysize);
cudaMalloc((void**) &dev_body, bodysize);
// assign each body a random position
for (int i = 0; i < N; i++) {
body[i * 7 + MASS] = 10;
body[i * 7 + X_POS] = drand48() * 100.0;
body[i * 7 + Y_POS] = drand48() * 100.0;
body[i * 7 + Z_POS] = drand48() * 100.0;
body[i * 7 + X_VEL] = 0.0;
body[i * 7 + Y_VEL] = 0.0;
body[i * 7 + Z_VEL] = 0.0;
}
// print out initial positions in PDB format
FILE * fp;
fp = fopen ("NBody.pdb", "w");
fprintf(fp, "MODEL %8d\n", 0);
for (int i = 0; i < N; i++) {
fprintf(fp, "%s%7d %s %s %s%4d %8.3f%8.3f%8.3f %4.2f %4.3f\n",
"ATOM", i+1, "CA ", "GLY", "A", i+1, body[i * 7 + X_POS], body[i * 7 + Y_POS], body[i * 7 + Z_POS], 1.00, 0.00);
fflush(fp);
}
fprintf(fp, "TER\nENDMDL\n");
// step through each time step
for (int t = 0; t < tmax; t++) {
// copy nbody info over to GPU
cudaMemcpy(dev_body, body, bodysize, cudaMemcpyHostToDevice);
dim3 blockDim(1024);
dim3 gridDim((int)ceil(N / blockDim.x) + 1);
// run nbody calculation
nbody<<<gridDim, blockDim>>>(dev_body, dt);
cudaThreadSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Error: %s when t=%d\n", cudaGetErrorString(err),t);
return -1;
}
// copy nbody info back to CPU
cudaMemcpy(body, dev_body, bodysize, cudaMemcpyDeviceToHost);
// print out positions in PDB format
fprintf(fp, "MODEL %8d\n", t+1);
for (int i = 0; i < N; i++) {
int error = fprintf(fp, "%s%7d %s %s %s%4d %8.3f%8.3f%8.3f %4.2f %4.3f\n","ATOM", i+1, "CA ", "GLY", "A", i+1, body[i * 7 + X_POS], body[i * 7 + Y_POS], body[i * 7 + Z_POS], 1.00, 0.00);
if (error ==-1)
{
printf("printf error when t=%d,i=%d\n", t+1,i);
break;
}
else
fflush(fp);
}
fprintf(fp, "TER\nENDMDL\n");
} // end of time period loop
fclose(fp);
free(body);
cudaFree(dev_body);
}
|
5,100 | // write your code into this file
#define TILE_X 16
#define TILE_Y 8
#define TILE_Z 8
#define PADDING 1
__global__ void compute_cell(int* in_array, int* out_array, int dim);
void solveGPU(int **dCells, int dim, int iters)
{
dim3 threadsPerBlock(TILE_X, TILE_Y, TILE_Z);
dim3 numBlocks((int)ceil(dim/(float)(TILE_X-2)), (int)ceil(dim/(float)(TILE_Y-2)), (int)ceil(dim/(float)(TILE_Z-2)));
int* result_array;
cudaMalloc((void**)&result_array, dim*dim*dim*sizeof(int));
int* tmp;
int* array_in;
int* array_out;
array_in = *dCells;
array_out = (int*)result_array;
cudaFuncSetCacheConfig(compute_cell, cudaFuncCachePreferShared);
for (int i = 0; i < iters; i++)
{
compute_cell<<<numBlocks, threadsPerBlock>>>(array_in, array_out, dim);
result_array = array_out;
tmp = array_in;
array_in = array_out;
array_out = tmp;
}
*dCells = result_array; // result array from loop above
cudaFree(array_out);
}
__global__ void compute_cell(int* in_array, int* out_array, int dim)
{
__shared__ int tile[TILE_Z][TILE_Y][TILE_X];
int mat_idx_x = blockIdx.x*(blockDim.x-2) + threadIdx.x-1;
int mat_idx_y = blockIdx.y*(blockDim.y-2) + threadIdx.y-1;
int mat_idx_z = blockIdx.z*(blockDim.z-2) + threadIdx.z-1;
int dim2 = dim*dim;
unsigned short thread_exceeds_matrix = 1;
if ((mat_idx_x < dim) && (mat_idx_y < dim) && (mat_idx_z < dim) && (mat_idx_x >= 0) && (mat_idx_y >= 0) && (mat_idx_z >= 0))
{
thread_exceeds_matrix = 0;
tile[threadIdx.z][threadIdx.y][threadIdx.x] = in_array[mat_idx_x+(mat_idx_y*dim)+(mat_idx_z*dim2)];
}
else
tile[threadIdx.z][threadIdx.y][threadIdx.x] = 0;
__syncthreads();
int result = 0;
int shared_value = tile[threadIdx.z][threadIdx.y][threadIdx.x];
int is_hull = 1;
if (threadIdx.x > 0 && threadIdx.y > 0 && threadIdx.z > 0
&& threadIdx.x < (TILE_X-1) && threadIdx.y < (TILE_Y-1) && threadIdx.z < (TILE_Z-1) )
{
is_hull = 0;
}
if (!is_hull)
{
for (int i = -1; i < 2; i++)
{
for (int j = -1; j < 2; j++)
{
for (int k = -1; k < 2; k++)
{
result += tile[threadIdx.z+i][threadIdx.y+j][threadIdx.x+k];
}
}
}
result -= shared_value;
if ((result < 4) || (result > 5))
{
result = 0;
}
else if (result == 5)
{
result = 1;
}
else
{
result = shared_value;
}
}
__syncthreads();
// loading back to global
if (!is_hull && !thread_exceeds_matrix)
{
out_array[mat_idx_x+(mat_idx_y*dim)+(mat_idx_z*dim2)] = result;
}
/* __syncthreads();
if (mat_idx_x == 0 && mat_idx_y == 0 && mat_idx_z == 0)
{
printf("gpu:\n");
for (int i = 0; i<TILE_X;i++)
for (int j = 0; j<TILE_Y; j++)
{
for (int k = 0;k< TILE_Z; k++)
printf("%d ", tile[i][j][k]);
printf("\n");
}
}
*/
// cell life computation
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.