serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
8,701 | #include<stdio.h>
#include<cuda_runtime.h>
#include<iostream>
int main(){
int dev = 0;
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, dev);
std::cout << "GPU device " << dev << ": " << devProp.name << std::endl;
std::cout << "Number of Multiprocessors(SM):" << devProp.multiProcessorCount << std::endl;
std::cout << "sharedMemPerBlock:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
std::cout << "maxThreadsPerBlock:" << devProp.maxThreadsPerBlock << std::endl;
std::cout << "maxThreadsPerMultiProcessor(SM):" << devProp.maxThreadsPerMultiProcessor << std::endl;
} |
8,702 | /**************************************************
*******PROGRAM FOR ARRAY REDUCTION IN CUDA*********
***************************************************
* EXTERNAL PROGRAMS INCLUDED: *
* *
* SUMS UPTO 2^24 INTS BEFORE REACHING BLOCK LIMIT *
* ARRAY MUST CONTAIN POWER OF TWO ELEMENTS *
* PROGRAM INCLUDES INTS,FLOATS AND DOUBS *
* *
* Call INTS: cuda_asum_intm(ipt/opt array, N#s) *
* Call FLTS: cuda_asum_fltm(ipt/opt array, N#s) *
* Call DOUBS: cuda_asum_doubm(ipt/opt array, N#s) *
* *
***************************************************/
// *** INCLUDED LIBRARIES*** //
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
// ***CUDA ERROR HANDLER*** //
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
// ********************************************************************* //
// **********************INTEGER ARRAY SUMMATION************************ //
// ********************************************************************* //
// ****************************************** //
// ***INTEGERS DEVICE SIDE KERNEL PROGRAM**** //
// ****************************************** //
template <unsigned int blockSize>
__global__ void cuda_asum_int(int *d_idat, int *d_odat)
{
extern __shared__ int sdata[];
//LOAD ELEMENT FROM GLOBAL TO SHARED MEM
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + tid;
sdata[tid] = d_idat[i] + d_idat[i + blockDim.x];
__syncthreads();
//REDUCTION
if (blockSize >= 512)
{
if (tid < 256) {sdata[tid] += sdata[tid + 256];} __syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128) {sdata[tid] += sdata[tid + 128];} __syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64) {sdata[tid] += sdata[tid + 64];} __syncthreads();
}
if (tid < 32)
{
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
//WRITE RESULT TO GLOB MEM
if(tid == 0) d_odat[blockIdx.x] = sdata[0];
}
// ****************************************** //
// ***INTS HOST SIDE KERNEL CALLER PROGRAM*** //
// ****************************************** //
//DEVICE ARRAY (d_idat) NEEDS TO BE DEFINED IN PARENT PROGRAM
extern void cuda_asum_intm(int *d_idat, int N_elem) //sqrt(N_elem) must be an int
{
//THREAD AND BLOCK SIZES
unsigned int THREAD_SIZE = 512/2;
unsigned int BLOCK_SIZE = N_elem/512;
unsigned int FINAL_THREAD;
if (N_elem > 512)
{
//SUM THE ARRAY THROUGH MULTIPLE KERNEL CALLS
while(BLOCK_SIZE > 1)
{
//RUN REDUCTIONS
switch (THREAD_SIZE)
{
case 512:
cuda_asum_int<512><<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(int)>>>(d_idat,d_idat); break;
case 256:
cuda_asum_int<256><<<BLOCK_SIZE,THREAD_SIZE,256*sizeof(int)>>>(d_idat,d_idat); break;
case 128:
cuda_asum_int<128><<<BLOCK_SIZE,THREAD_SIZE,128*sizeof(int)>>>(d_idat,d_idat); break;
case 64:
cuda_asum_int< 64><<<BLOCK_SIZE,THREAD_SIZE,64*sizeof(int)>>>(d_idat,d_idat); break;
case 32:
cuda_asum_int< 32><<<BLOCK_SIZE,THREAD_SIZE,32*sizeof(int)>>>(d_idat,d_idat); break;
case 16:
cuda_asum_int< 16><<<BLOCK_SIZE,THREAD_SIZE,16*sizeof(int)>>>(d_idat,d_idat); break;
case 8:
cuda_asum_int< 8><<<BLOCK_SIZE,THREAD_SIZE,8*sizeof(int)>>>(d_idat,d_idat); break;
case 4:
cuda_asum_int< 4><<<BLOCK_SIZE,THREAD_SIZE,4*sizeof(int)>>>(d_idat,d_idat); break;
case 2:
cuda_asum_int< 2><<<BLOCK_SIZE,THREAD_SIZE,2*sizeof(int)>>>(d_idat,d_idat); break;
case 1:
cuda_asum_int< 1><<<BLOCK_SIZE,THREAD_SIZE,1*sizeof(int)>>>(d_idat,d_idat); break;
}
//cuda_asum_int<<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(int)>>>(d_idat,d_idat);
FINAL_THREAD = BLOCK_SIZE;
BLOCK_SIZE = BLOCK_SIZE/512;
}
} else {
FINAL_THREAD = (unsigned int)N_elem;
}
THREAD_SIZE = FINAL_THREAD/2;
BLOCK_SIZE = 1;
//RUN REDUCTIONS
switch (THREAD_SIZE)
{
case 512:
cuda_asum_int<512><<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(int)>>>(d_idat,d_idat); break;
case 256:
cuda_asum_int<256><<<BLOCK_SIZE,THREAD_SIZE,256*sizeof(int)>>>(d_idat,d_idat); break;
case 128:
cuda_asum_int<128><<<BLOCK_SIZE,THREAD_SIZE,128*sizeof(int)>>>(d_idat,d_idat); break;
case 64:
cuda_asum_int< 64><<<BLOCK_SIZE,THREAD_SIZE,64*sizeof(int)>>>(d_idat,d_idat); break;
case 32:
cuda_asum_int< 32><<<BLOCK_SIZE,THREAD_SIZE,32*sizeof(int)>>>(d_idat,d_idat); break;
case 16:
cuda_asum_int< 16><<<BLOCK_SIZE,THREAD_SIZE,16*sizeof(int)>>>(d_idat,d_idat); break;
case 8:
cuda_asum_int< 8><<<BLOCK_SIZE,THREAD_SIZE,8*sizeof(int)>>>(d_idat,d_idat); break;
case 4:
cuda_asum_int< 4><<<BLOCK_SIZE,THREAD_SIZE,4*sizeof(int)>>>(d_idat,d_idat); break;
case 2:
cuda_asum_int< 2><<<BLOCK_SIZE,THREAD_SIZE,2*sizeof(int)>>>(d_idat,d_idat); break;
case 1:
cuda_asum_int< 1><<<BLOCK_SIZE,THREAD_SIZE,1*sizeof(int)>>>(d_idat,d_idat); break;
}
}
// ********************************************************************* //
// ************************FLOAT ARRAY SUMMATION************************ //
// ********************************************************************* //
// ****************************************** //
// ****FLOATS DEVICE SIDE KERNEL PROGRAM***** //
// ****************************************** //
template <unsigned int blockSizeflt>
__global__ void cuda_asum_flt(float *d_idat, float *d_odat)
{
extern __shared__ float sdataf[];
//LOAD ELEMENT FROM GLOBAL TO SHARED MEM
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + tid;
sdataf[tid] = d_idat[i] + d_idat[i + blockDim.x];
__syncthreads();
//REDUCTION
if (blockSizeflt >= 1024)
{
if (tid < 512) {sdataf[tid] += sdataf[tid + 512];} __syncthreads();
}
if (blockSizeflt >= 512)
{
if (tid < 256) {sdataf[tid] += sdataf[tid + 256];} __syncthreads();
}
if (blockSizeflt >= 256)
{
if (tid < 128) {sdataf[tid] += sdataf[tid + 128];} __syncthreads();
}
if (tid < 64)
{
if (blockSizeflt >= 128) sdataf[tid] += sdataf[tid + 64];__syncthreads();
if (blockSizeflt >= 64) sdataf[tid] += sdataf[tid + 32];__syncthreads();
if (blockSizeflt >= 32) sdataf[tid] += sdataf[tid + 16];__syncthreads();
if (blockSizeflt >= 16) sdataf[tid] += sdataf[tid + 8];__syncthreads();
if (blockSizeflt >= 8) sdataf[tid] += sdataf[tid + 4];__syncthreads();
if (blockSizeflt >= 4) sdataf[tid] += sdataf[tid + 2];__syncthreads();
if (blockSizeflt >= 2) sdataf[tid] += sdataf[tid + 1];__syncthreads();
}
//WRITE RESULT TO GLOB MEM
if(tid == 0) d_odat[blockIdx.x] = sdataf[0];
}
// ****************************************** //
// **FLOATS HOST SIDE KERNEL CALLER PROGRAM** //
// ****************************************** //
//DEVICE ARRAY (d_idat) NEEDS TO BE DEFINED IN PARENT PROGRAM
extern void cuda_asum_fltm(float *d_idat, int N_elem,int MAX_THREADS) //sqrt(N_elem) must be an int
{
//THREAD AND BLOCK SIZES
unsigned int THREAD_SIZE = MAX_THREADS;
unsigned int BLOCK_SIZE = N_elem/(MAX_THREADS*2);
unsigned int FINAL_THREAD;
if (N_elem > THREAD_SIZE)
{
//SUM THE ARRAY THROUGH MULTIPLE KERNEL CALLS
while(BLOCK_SIZE > 1)
{
//RUN REDUCTIONS
switch (THREAD_SIZE)
{
case 1024:
cuda_asum_flt<1024><<<BLOCK_SIZE,THREAD_SIZE,1024*sizeof(int)>>>(d_idat,d_idat); break;
case 512:
cuda_asum_flt<512><<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(int)>>>(d_idat,d_idat); break;
case 256:
cuda_asum_flt<256><<<BLOCK_SIZE,THREAD_SIZE,256*sizeof(int)>>>(d_idat,d_idat); break;
case 128:
cuda_asum_flt<128><<<BLOCK_SIZE,THREAD_SIZE,128*sizeof(int)>>>(d_idat,d_idat); break;
case 64:
cuda_asum_flt< 64><<<BLOCK_SIZE,THREAD_SIZE,64*sizeof(int)>>>(d_idat,d_idat); break;
case 32:
cuda_asum_flt< 32><<<BLOCK_SIZE,THREAD_SIZE,32*sizeof(int)>>>(d_idat,d_idat); break;
case 16:
cuda_asum_flt< 16><<<BLOCK_SIZE,THREAD_SIZE,16*sizeof(int)>>>(d_idat,d_idat); break;
case 8:
cuda_asum_flt< 8><<<BLOCK_SIZE,THREAD_SIZE,8*sizeof(int)>>>(d_idat,d_idat); break;
case 4:
cuda_asum_flt< 4><<<BLOCK_SIZE,THREAD_SIZE,4*sizeof(int)>>>(d_idat,d_idat); break;
case 2:
cuda_asum_flt< 2><<<BLOCK_SIZE,THREAD_SIZE,2*sizeof(int)>>>(d_idat,d_idat); break;
case 1:
cuda_asum_flt< 1><<<BLOCK_SIZE,THREAD_SIZE,1*sizeof(int)>>>(d_idat,d_idat); break;
}
FINAL_THREAD = BLOCK_SIZE;
BLOCK_SIZE = BLOCK_SIZE/1024;
}
} else {
FINAL_THREAD = (unsigned int)N_elem/2;
}
THREAD_SIZE = FINAL_THREAD/2;
BLOCK_SIZE = 1;
//RUN REDUCTIONS
switch (THREAD_SIZE)
{
case 1024:
cuda_asum_flt<1024><<<BLOCK_SIZE,THREAD_SIZE,1024*sizeof(int)>>>(d_idat,d_idat); break;
case 512:
cuda_asum_flt<512><<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(int)>>>(d_idat,d_idat); break;
case 256:
cuda_asum_flt<256><<<BLOCK_SIZE,THREAD_SIZE,256*sizeof(int)>>>(d_idat,d_idat); break;
case 128:
cuda_asum_flt<128><<<BLOCK_SIZE,THREAD_SIZE,128*sizeof(int)>>>(d_idat,d_idat); break;
case 64:
cuda_asum_flt< 64><<<BLOCK_SIZE,THREAD_SIZE,64*sizeof(int)>>>(d_idat,d_idat); break;
case 32:
cuda_asum_flt< 32><<<BLOCK_SIZE,THREAD_SIZE,32*sizeof(int)>>>(d_idat,d_idat); break;
case 16:
cuda_asum_flt< 16><<<BLOCK_SIZE,THREAD_SIZE,16*sizeof(int)>>>(d_idat,d_idat); break;
case 8:
cuda_asum_flt< 8><<<BLOCK_SIZE,THREAD_SIZE,8*sizeof(int)>>>(d_idat,d_idat); break;
case 4:
cuda_asum_flt< 4><<<BLOCK_SIZE,THREAD_SIZE,4*sizeof(int)>>>(d_idat,d_idat); break;
case 2:
cuda_asum_flt< 2><<<BLOCK_SIZE,THREAD_SIZE,2*sizeof(int)>>>(d_idat,d_idat); break;
case 1:
cuda_asum_flt< 1><<<BLOCK_SIZE,THREAD_SIZE,1*sizeof(int)>>>(d_idat,d_idat); break;
}
}
// ********************************************************************* //
// ***********************DOUBLE ARRAY SUMMATION************************ //
// ********************************************************************* //
// ****************************************** //
// ****DOUBLES DEVICE SIDE KERNEL PROGRAM**** //
// ****************************************** //
template <unsigned int blockSizedoub>
__global__ void cuda_asum_doub(double *d_idat, double *d_odat)
{
extern __shared__ double sdatad[];
//LOAD ELEMENT FROM GLOBAL TO SHARED MEM
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + tid;
sdatad[tid] = d_idat[i] + d_idat[i + blockDim.x];
__syncthreads();
//REDUCTION
if (blockSizedoub >= 512)
{
if (tid < 256) {sdatad[tid] += sdatad[tid + 256];} __syncthreads();
}
if (blockSizedoub >= 256)
{
if (tid < 128) {sdatad[tid] += sdatad[tid + 128];} __syncthreads();
}
if (blockSizedoub >= 128)
{
if (tid < 64) {sdatad[tid] += sdatad[tid + 64];} __syncthreads();
}
if (tid < 32)
{
if (blockSizedoub >= 64) sdatad[tid] += sdatad[tid + 32];
if (blockSizedoub >= 32) sdatad[tid] += sdatad[tid + 16];
if (blockSizedoub >= 16) sdatad[tid] += sdatad[tid + 8];
if (blockSizedoub >= 8) sdatad[tid] += sdatad[tid + 4];
if (blockSizedoub >= 4) sdatad[tid] += sdatad[tid + 2];
if (blockSizedoub >= 2) sdatad[tid] += sdatad[tid + 1];
}
//WRITE RESULT TO GLOB MEM
if(tid == 0) d_odat[blockIdx.x] = sdatad[0];
}
// ****************************************** //
// *DOUBLES HOST SIDE KERNEL CALLER PROGRAM** //
// ****************************************** //
//DEVICE ARRAY (d_idat) NEEDS TO BE DEFINED IN PARENT PROGRAM
extern void cuda_asum_doubm(double *d_idat, double N_elem) //sqrt(N_elem) must be an int
{
//THREAD AND BLOCK SIZES
unsigned int THREAD_SIZE = 512/2;
unsigned int BLOCK_SIZE = N_elem/512;
unsigned int FINAL_THREAD;
if (N_elem > 512)
{
//SUM THE ARRAY THROUGH MULTIPLE KERNEL CALLS
while(BLOCK_SIZE > 1)
{
//RUN REDUCTIONS
switch (THREAD_SIZE)
{
case 512:
cuda_asum_doub<512><<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(double)>>>(d_idat,d_idat); break;
case 256:
cuda_asum_doub<256><<<BLOCK_SIZE,THREAD_SIZE,256*sizeof(double)>>>(d_idat,d_idat); break;
case 128:
cuda_asum_doub<128><<<BLOCK_SIZE,THREAD_SIZE,128*sizeof(double)>>>(d_idat,d_idat); break;
case 64:
cuda_asum_doub< 64><<<BLOCK_SIZE,THREAD_SIZE,64*sizeof(double)>>>(d_idat,d_idat); break;
case 32:
cuda_asum_doub< 32><<<BLOCK_SIZE,THREAD_SIZE,32*sizeof(double)>>>(d_idat,d_idat); break;
case 16:
cuda_asum_doub< 16><<<BLOCK_SIZE,THREAD_SIZE,16*sizeof(double)>>>(d_idat,d_idat); break;
case 8:
cuda_asum_doub< 8><<<BLOCK_SIZE,THREAD_SIZE,8*sizeof(double)>>>(d_idat,d_idat); break;
case 4:
cuda_asum_doub< 4><<<BLOCK_SIZE,THREAD_SIZE,4*sizeof(double)>>>(d_idat,d_idat); break;
case 2:
cuda_asum_doub< 2><<<BLOCK_SIZE,THREAD_SIZE,2*sizeof(double)>>>(d_idat,d_idat); break;
case 1:
cuda_asum_doub< 1><<<BLOCK_SIZE,THREAD_SIZE,1*sizeof(double)>>>(d_idat,d_idat); break;
}
//cuda_asum_int<<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(int)>>>(d_idat,d_idat);
FINAL_THREAD = BLOCK_SIZE;
BLOCK_SIZE = BLOCK_SIZE/512;
}
} else {
FINAL_THREAD = (unsigned int)N_elem;
}
// THIS SECTION BEGINS WITH THREADS NEEDED ARE LESS THAN MAXIMUM BLOCK SIZE
THREAD_SIZE = FINAL_THREAD/2;
BLOCK_SIZE = 1;
//RUN REDUCTIONS
switch (THREAD_SIZE)
{
case 512:
cuda_asum_doub<512><<<BLOCK_SIZE,THREAD_SIZE,512*sizeof(double)>>>(d_idat,d_idat); break;
case 256:
cuda_asum_doub<256><<<BLOCK_SIZE,THREAD_SIZE,256*sizeof(double)>>>(d_idat,d_idat); break;
case 128:
cuda_asum_doub<128><<<BLOCK_SIZE,THREAD_SIZE,128*sizeof(double)>>>(d_idat,d_idat); break;
case 64:
cuda_asum_doub< 64><<<BLOCK_SIZE,THREAD_SIZE,64*sizeof(double)>>>(d_idat,d_idat); break;
case 32:
cuda_asum_doub< 32><<<BLOCK_SIZE,THREAD_SIZE,32*sizeof(double)>>>(d_idat,d_idat); break;
case 16:
cuda_asum_doub< 16><<<BLOCK_SIZE,THREAD_SIZE,16*sizeof(double)>>>(d_idat,d_idat); break;
case 8:
cuda_asum_doub< 8><<<BLOCK_SIZE,THREAD_SIZE,8*sizeof(double)>>>(d_idat,d_idat); break;
case 4:
cuda_asum_doub< 4><<<BLOCK_SIZE,THREAD_SIZE,4*sizeof(double)>>>(d_idat,d_idat); break;
case 2:
cuda_asum_doub< 2><<<BLOCK_SIZE,THREAD_SIZE,2*sizeof(double)>>>(d_idat,d_idat); break;
case 1:
cuda_asum_doub< 1><<<BLOCK_SIZE,THREAD_SIZE,1*sizeof(double)>>>(d_idat,d_idat); break;
}
}
|
8,703 | /**
Merges to array A and B in M using a path
@file pathMerge.cu
@author Dang Vu Laurent Durand Homer
@version 1.0 14/12/20
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <string>
#include <iostream>
#include <stdlib.h>
/**
Verify cuda calls and return cuda error if any
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
Initialise ascendant array with random values
@param array : the array to fill with random ascendant values
size : Size of the arrays
adder : Each x_i is higher than x_i-1 of a random value between 0 and adder
*/
void init_array(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = array[i-1] + rand()%adder;
}
}
/**
Print an array of size size
@param a : array to print
size : size of arrays
*/
void print_array(int* a, int size)
{
printf("[");
for(int i = 0; i < size;i++)
{
printf("%d " , a[i]);
}
printf("]\n");
}
/**
Sequential version of merge
@param a_k, b_k : array to merge
m_k : merge of a and b
n_a, n_b, n_b : respective sizes of a_k, b_k, m_k
*/
void mergeSeq(int *a_k, int *b_k, int *m_k, int n_a, int n_b, int n_m)
{
int i, j;
i=0;
j=0;
while(i+j < n_m)
{
if (i>= n_a)
{
m_k[i+j]=b_k[j];
j++;
}
else if (j>= n_b || a_k[i] < b_k[j])
{
m_k[i+j]=a_k[i];
i++;
}
else
{
m_k[i+j]=b_k[j];
j++;
}
}
}
/**
Parallel version of merge of A and B with |A| + |B| <= 1024
@param d_a, d_b : device versions of arrays to merge
d_m : device version of merge of a and b
n_a, n_b, n_b : respective sizes of d_a, d_b, d_m
*/
__device__ void mergeSmall_k(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){
int i = threadIdx.x;
if(i < n_m)
{
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x]))
{
d_m[i] = d_a[Q.y];
}
else
{
d_m[i] = d_b[Q.x];
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
/**
Parallel version of merge of A and B of any sizes
@param a, b : device versions of arrays to merge
m : device version of merge of a and b
n_a, n_b, n_b : respective sizes of d_a, d_b, d_m
path : points of the path to cut A and B to pieces to merge
n_path : number of points in the path
nb_partition : number of pieces of A and B (a_k and b_k) to merge with mergeSmall_k
*/
__global__ void mergeBig_k(int *m, int n_m, int *a, int n_a, int *b, int n_b, int2 *path, int n_path, int nbPartitions)
{
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
if (blockId <= nbPartitions)//On utilise un block pour chaque partition
{
int x0, y0, x1, y1;
x0 = path[blockId].x;
y0 = path[blockId].y;
x1 = path[blockId+1].x;
y1 = path[blockId+1].y;
const int dimx=x1-x0;
const int dimy = y1-y0;
//A modifier par dimx dimy dimx+dimy
__shared__ int a_k[1024];
__shared__ int b_k[1024];
__shared__ int m_k[1024];
if (threadId < dimx) //On rempli a_k[i] : 0 <= i < dimx
{
a_k[threadId] = a[x0+threadId];
}
else if (threadId < dimy+dimx)//On rempli b_k[i] : indice dimx <= i < dimx+dimy+1
{
b_k[threadId-dimx] = b[y0+threadId-dimx];
}
__syncthreads();
mergeSmall_k(a_k, b_k, m_k, dimx, dimy, dimx+dimy);
m[i] = m_k[threadId];
}
}
/**
Genearte the path to devide A and B to pieces that we'll give to mergeSmall_k
@param pas: size of pieces
path : store the points of the path
n_path : number of points in the path
nb_partition : number of pieces of A and B (a_k and b_k) to merge with mergeSmall_k
d_a, d_b : device versions of arrays to merge
n_a, n_b : respective sizes of d_a, d_b
*/
__global__ void pathBig_k(int pas, int2* path, int n_path , int* d_a, int n_a ,int* d_b, int n_b)
{
int thread_i = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_i <= (n_a + n_b)/pas) //<------------//On vérifie que l'indice du thread est inférieur à la taille du tableau de retour et qu'il est un multiple du pas
{
int i = thread_i*pas;
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
//Calcul des coordonnées du milieu de P et K
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
//
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
//
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
//printf("thread : %d => (%d, %d)\n", thread_i, Q.y, Q.x);
//!\\ Problème ordre x et y
path[thread_i].x=Q.y;
path[thread_i].y=Q.x;
}
//Si |m| n'est pas un mutliple de pas, le thread 0 ajoute (n_a, n_b) à la fin du tableau
if (thread_i==0 && (n_a+n_b)%pas!=0)
{
//printf("thread : %d => (%d, %d)\n", thread_i, n_a, n_b);
path[n_path-1].x=n_a;
path[n_path-1].y=n_b;
}
}
/**
verify that A and B are correctly merge in M
*/
int assertMerge(int *tab, int *tab2, int size)
{
for (int i=0; i<size-1; i++)
{
if (tab[i] > tab[i+1] || tab[i] != tab2[i] || (i>10000 && tab[i] == 0))
{
printf("WARNING : Unsuccessful merge on indice %d ...\n", i);
return 0;
}
}
printf("Successful merge !\n");
return 1;
}
/**
Merge 2 lists of arrays {A_i} and {B_i} in {M_i}1<=i<=N
@param argv[1] : size of A
argv[2] : size of B
*/
int main(int argc, char *argv[])
{
std::clock_t startS, endS;
float seqMergeTime, parMergeTime, DoH, HoD;
srand(time(NULL));
int n_a, n_b;
int pas;
if(argc>= 3)
{
n_a = atoi(argv[1]);
n_b = atoi(argv[2]);
pas = atoi(argv[3]);
}
else
{
n_a = 100;
n_b = 100;
pas = 1024;
}
int n_m = n_a+n_b;
// <1024
int nbPartitions = n_m/pas+(n_m%pas!=0); // On ajoute 1 si n_m n'est pas un mutliple de p
int n_path = (1 + nbPartitions); //1(pour (0,0)) + |m|/pas(nbr de morceau de taille pas) + 1(si dernier morceau de taille < pas))
printf("========== Merge of A and B ==========\n");
printf("* Size of A : %d\n", n_a);
printf("* Size of B : %d\n", n_b);
printf("* Step : %d\n* Nbr of partitions : %d\n\n", pas, nbPartitions);
//Initialisation des tableaux a et b
int *a, *aGPU;
a = (int*)malloc(n_a*sizeof(int));
init_array(a, n_a, 10);
gpuErrchk(cudaMalloc(&aGPU, n_a*sizeof(int)));
int *b, *bGPU;
b = (int*)malloc(n_b*sizeof(int));
init_array(b, n_b, 10);
gpuErrchk(cudaMalloc(&bGPU, n_b*sizeof(int)));
// print_array(b, n_b);
// print_array(a, n_a);
int *m, *mGPU, *mseq;
m = (int*)malloc(n_m*sizeof(int));
mseq = (int*)malloc(n_m*sizeof(int));
gpuErrchk(cudaMalloc(&mGPU, n_m*sizeof(int)));
//Declaration et allocation de path
int2 *pathGPU;
gpuErrchk(cudaMalloc(&pathGPU, n_path*sizeof(int2)));
startS = std::clock();
gpuErrchk(cudaMemcpy(aGPU, a, n_a*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(bGPU, b, n_b*sizeof(int), cudaMemcpyHostToDevice));
endS = std::clock();
HoD = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Merge of A and B of size %d and %d runing...\n", n_a, n_b);
startS = std::clock();
//================ Parallel : =======================\\
pathBig_k<<<nbPartitions/1024+1, 1024>>>(pas, pathGPU, n_path, aGPU, n_a, bGPU, n_b);
mergeBig_k<<<nbPartitions, pas>>>(mGPU, n_m, aGPU, n_a, bGPU, n_b, pathGPU, n_path, nbPartitions);
cudaDeviceSynchronize();
endS = std::clock();
parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
//Copy device to host
startS = std::clock();
cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost);
endS = std::clock();
DoH = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Merge done !\n\n");
//================ Sequential : =======================\\
startS = std::clock();
mergeSeq(a, b, mseq, n_a, n_b, n_m);
endS = std::clock();
seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
//print_array(m, n_m);
// print_array(mseq, n_m);
printf("\n========= Sequential merge : =============\n");
printf("Total time elapsed : %f s\n", seqMergeTime);
printf("\n");
printf("========= Parallel merge : =============\n");
printf("Total time elapsed : %f s\n", parMergeTime+DoH+HoD);
printf("Time running algorithm : %f s\n", parMergeTime);
printf("Time to copy Host to Device : %f s\n", HoD);
printf("Time to copy Device to Host : %f s\n", DoH);
assertMerge(m, mseq, n_m);
printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("Parrallel merge is %f times faster than sequential merge !\n", seqMergeTime/(parMergeTime+HoD+DoH));
//desallocation
cudaFree(aGPU);
cudaFree(bGPU);
cudaFree(pathGPU);
return 0;
}
|
8,704 | extern "C" {
#define TILE_SIZE 12
#define NUMBER_THREAD_X 16
#define NUMBER_THREAD_Y 16
#define TILE_SIZE NUMBER_THREAD_X * NUMBER_THREAD_Y * 3 // each block matches with the input tile
__global__ void convolution_tile(float *I, float *P,float * deviceMaskData,int width, int height,int channels, int maskRows,int maskColumns) {
const int maskRowsRadius = maskRows / 2;
const int maskColumnsRadius = maskColumns / 2;
// Original columns/rows index before shifting
int colOriginal = blockIdx.x * (blockDim.x - maskColumnsRadius*2) + threadIdx.x;
int rowOriginal = blockIdx.y * (blockDim.y - maskRowsRadius*2) + threadIdx.y;
// Thread columns and rows
// (Original cols/rows shifted by the mask radius backwards)
int colT = colOriginal - maskColumnsRadius;
int rowT = rowOriginal - maskRowsRadius;
int depth = threadIdx.z;
// 1st phase: copy from global memory to shared memory (tiling)
// As design choice, we assume that each block matches each input tile
// meaning that each thread loads its own input pixel
// but only the central ones computes the output pixel
__shared__ float Ids[TILE_SIZE];
int sharedMemoryPos = (threadIdx.y * blockDim.y + threadIdx.x)*channels + depth;
// Actual tiling
if (rowT >= 0 && rowT < height && colT >= 0 && colT < width) {
Ids[sharedMemoryPos] = I[(rowT * width + colT) * channels + depth];
}
else { // check for ghost elements
Ids[sharedMemoryPos] = 0.0f;
}
// Wait for other threads in the same block
__syncthreads();
// 2nd phase: evaluate convolution
// This first IF is to check whether we're still inside the image boundaries or not
if (rowT >= 0 && rowT < height && colT >= 0 && colT < width) {
// This second IF is to check whether we're inside the central block area or not (border threads do not compute anything)
if (threadIdx.x >= maskColumnsRadius && threadIdx.x < (blockDim.x - 2) && threadIdx.y >= maskRowsRadius && threadIdx.y < (blockDim.y - 2)) {
float pValue = 0;
int startCol = threadIdx.x - maskColumnsRadius;
int startRow = threadIdx.y - maskRowsRadius;
for (int i = 0; i < maskRows; i++) {
for (int j = 0; j < maskColumns; j++) {
int currentCol = startCol + j;
int currentRow = startRow + i;
// Check for ghost elements already done during tiling
float iValue = Ids[(currentRow * blockDim.y + currentCol) * channels + depth];
pValue += iValue * deviceMaskData[i * maskRows + j];
}
}
// Store the result inside the output vector P in the global memory
P[(rowT * width + colT) * channels + depth] = pValue;
}
}
}
} |
8,705 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define VEC_LENGTH 20*1024*32
#define NUM_THREADS 32
__global__ void add(float* dev_a, float* dev_b, float* dev_c){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < VEC_LENGTH){
dev_c[tid] = exp(sin(dev_a[tid]) + sin(dev_b[tid]));
}
}
int main(void){
float a[VEC_LENGTH];
float b[VEC_LENGTH];
float c[VEC_LENGTH];
for(int i=0; i<VEC_LENGTH; i++){
a[i] = (float) rand() / (float) 0xffffffff;
b[i] = (float) rand() / (float) 0xffffffff;
}
clock_t begin, end;
begin = clock();
for(int i=0; i<VEC_LENGTH; i++){
c[i] = exp(sin(a[i]) + sin(b[i]));
}
end = clock();
printf("time spent by CPU is %f sec\n", (double)(end - begin) / CLOCKS_PER_SEC);
begin = clock();
float *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)& dev_a, VEC_LENGTH*sizeof(float));
cudaMalloc((void **)& dev_b, VEC_LENGTH*sizeof(float));
cudaMalloc((void **)& dev_c, VEC_LENGTH*sizeof(float));
cudaMemcpy(dev_a, a, VEC_LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, VEC_LENGTH*sizeof(float), cudaMemcpyHostToDevice);
add<<<VEC_LENGTH/NUM_THREADS, NUM_THREADS>>>(dev_a, dev_b, dev_c);
cudaMemcpy(dev_c, c, VEC_LENGTH*sizeof(float), cudaMemcpyDeviceToHost);
end = clock();
printf("time spent by GPU is %f sec\n", (double)(end - begin) / CLOCKS_PER_SEC);
return 0;
}
|
8,706 | /*
autor fredy m
uaem
desonses@gmail.com para mas comentarios
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define N 24 //tamano de los vectores
#define BLOCKS 6 // tamano del bloque(numero de hilos en cada bloque)
/*
suma de vectores de 3 dim, mide el tiempo de ejecucion
*/
// gridDim.x: La primera nos da el n�mero de bloques (M)
// blockDim.x: la segunda el n�mero de hilos que tiene cada bloque (N)
//Global: funcion llamada desde el host y ejecutada en el device(kernel)
__global__ void Add(float *a, float *b, float *c)
{
int Id = threadIdx.x + blockDim.x * blockIdx.x;
if (Id < N) {
a[Id] = threadIdx.x;
b[Id] = blockIdx.x;
c[Id] = Id;
}
}
// funcion para revision de errores en las funciones de CUDA
__host__ void check_CUDA_Error(const char *mensaje) {
cudaError_t error;
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje);
}
}
int main(int argc, char **argv)
{
float *resultado1, *resultado2, *resultado3;
float *resultado11, *resultado12, *resultado13;
float *resultado21, *resultado22, *resultado23;
float *dev_vector1, *dev_vector2, *dev_vector3;
float *dev_vector11, *dev_vector12, *dev_vector13;
float *dev_vector21, *dev_vector22, *dev_vector23;
//reserva de memoria en el host
resultado1 = (float*)malloc(N * sizeof(float));
resultado2 = (float*)malloc(N * sizeof(float));
resultado3 = (float*)malloc(N * sizeof(float));
//reserva de memoria en el host
resultado11 = (float*)malloc(N * sizeof(float));
resultado12 = (float*)malloc(N * sizeof(float));
resultado13 = (float*)malloc(N * sizeof(float));
//reserva de memoria en el host
resultado21 = (float*)malloc(N * sizeof(float));
resultado22 = (float*)malloc(N * sizeof(float));
resultado23 = (float*)malloc(N * sizeof(float));
cudaError_t error;
//reserva de memoria en el device
error = cudaMalloc((void**)&dev_vector1, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector2, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector3, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector11, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector12, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector13, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector21, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector22, N * sizeof(float));
error = cudaMalloc((void**)&dev_vector23, N * sizeof(float));
if (error != cudaSuccess) {
printf("\n ocurrio un error: %s", cudaGetErrorString(error));
}
//lanzamiento del kernel
//calculamos el numero de bloques necesario para un tamano de bloque fijo
int nBloques = N / BLOCKS;
if (N % BLOCKS != 0) {
nBloques = nBloques + 1;
}
int nBloques2 = 1;
int hilosB = BLOCKS;
int hilosB2 = 1;
// declaracion de eventos para medir el tiempo de ejecucion en la GPU
cudaEvent_t start;
cudaEvent_t stop;
// creacion de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
// marca de inicio
cudaEventRecord(start, 0);
// codigo a temporizar en el device
//ejecucion del kernel
Add <<<nBloques, hilosB>>>(dev_vector1, dev_vector2, dev_vector3);
check_CUDA_Error("Error Kernel 1");
Add <<<nBloques2, N>>>(dev_vector11, dev_vector12, dev_vector13);
check_CUDA_Error("Error Kernel 2");
Add <<<N, 1>>>(dev_vector21, dev_vector22, dev_vector23);
check_CUDA_Error("Error Kernel 3");
// marca de final
cudaEventRecord(stop, 0);
// sincronizacion GPU-CPU
cudaEventSynchronize(stop);
// calculo del tiempo en milisegundos
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
//recogida de los datos
cudaMemcpy(resultado1, dev_vector1, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector1");
cudaMemcpy(resultado2, dev_vector2, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector2");
cudaMemcpy(resultado3, dev_vector3, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector3");
cudaMemcpy(resultado11, dev_vector11, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector11");
cudaMemcpy(resultado12, dev_vector12, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector12");
cudaMemcpy(resultado13, dev_vector13, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector13");
cudaMemcpy(resultado21, dev_vector21, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector21");
cudaMemcpy(resultado22, dev_vector22, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector22");
cudaMemcpy(resultado23, dev_vector23, N * sizeof(float), cudaMemcpyDeviceToHost);
check_CUDA_Error("Error dev_vector23");
//impresion de los datos
printf("\n");
printf("vector de %d elementos\n", N);
printf("Lanzamiento con %d bloques y %d hilos en cada bloque (%d hilos)\n", nBloques, BLOCKS,nBloques*hilosB);
printf(">indice de hilo: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado1[i]);
}
printf("\n");
printf(">indice de bloque: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado2[i]);
}
printf("\n");
printf(">indice global: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado3[i]);
}
//
printf("\n");
printf("\n");
printf("vector de %d elementos\n", N);
printf("Lanzamiento con %d bloques (%d hilos)\n", nBloques2, nBloques*hilosB);
//impresion de los datos
printf(">indice de hilo: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado11[i]);
}
printf("\n");
printf(">indice de bloque: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado12[i]);
}
printf("\n");
printf(">indice global: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado13[i]);
}
//
printf("\n");
printf("\n");
printf("vector de %d elementos\n", N);
printf("Lanzamiento con %d bloques (%d hilos)\n", N, hilosB2);
//impresion de los datos
printf(">indice de hilo: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado21[i]);
}
printf("\n");
printf(">indice de bloque: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado22[i]);
}
printf("\n");
printf(">indice global: \n");
for (int i = 0; i < N; i++) {
printf("%.0f, ", resultado23[i]);
}
printf("\n");
printf("\n");
// impresion de resultados
printf("> Tiempo de ejecucion: %f ms\n", elapsedTime);
//liberacion de memoria del device
cudaFree(dev_vector1);
cudaFree(dev_vector2);
cudaFree(dev_vector3);
cudaFree(dev_vector11);
cudaFree(dev_vector12);
cudaFree(dev_vector13);
cudaFree(dev_vector21);
cudaFree(dev_vector22);
cudaFree(dev_vector23);
//liberacion de memoria del host
free(resultado1);
free(resultado2);
free(resultado3);
free(resultado11);
free(resultado12);
free(resultado13);
free(resultado21);
free(resultado22);
free(resultado23);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("\n...");
fflush(stdin);
char tecla = getchar();
return 0;
}
|
8,707 | #include "includes.h"
__global__ void coalesced2(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)*2;
if (i+1 < N) { C[i] = A[i]; C[i+1] = A[i+1];}
} |
8,708 | /*
* EzTopUpdater.cpp
*
* Created on: 23 янв. 2016 г.
* Author: aleksandr
*/
#include "EzTopUpdater.h"
#include "SmartIndex.h"
/*
* indx должен пренадлежать участку от [0, sizeX-1]
*/
__device__
void EzTopUpdater::operator() (const int indx) {
int m = indx;
Ez(m, sizeY - 1) = coeff[0]*(Ez(m, sizeY - 3) + EzTop(0, 1, m)) +
coeff[1] * (EzTop(0, 0, m) + EzTop(2, 0, m) - Ez(m, sizeY - 2) - EzTop(1, 1, m)) +
coeff[2] * EzTop(1, 0, m) - EzTop(2, 1, m);
for (int n = 0; n < 3; n++) {
EzTop(n, 1, m) = EzTop(n, 0, m);
EzTop(n, 0, m) = Ez(m, sizeY - 1 - n);
}
}
|
8,709 | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void sort(int *key,int *bucket, int n, int range) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int j=0,k=0; k<=i; j++) {
key[i]=j;
__syncthreads();
k+=bucket[j];
__syncthreads();
}
}
__global__ void bucket1(int *bucket){
int i = blockIdx.x * blockDim.x + threadIdx.x;
bucket[i] = 0;
}
__global__ void bucket2(int *key, int *bucket){
int i = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&bucket[key[i]],1);
}
int main() {
int n = 50;
int range = 5;
int *key, *bucket;
cudaMallocManaged(&key,n*sizeof(int));
cudaMallocManaged(&bucket,range*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
bucket1<<<1,range>>>(bucket);
bucket2<<<1,n>>>(key,bucket);
sort<<<1,n>>>(key, bucket, n, range);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
cudaFree(key);
cudaFree(bucket);
}
|
8,710 | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
cudaMalloc((void **)&d_a, sizeof(int) * N);
cudaMalloc((void **)&d_b, sizeof(int) * N);
cudaMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
cudaThreadSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
system("pause");
return 0;
}
|
8,711 | /*
*
* Jason Yik
* jyik@usc.edu
* EE451 Final Project
*
* CUDA implementation of DCSC matrix multiplication.
* Implementation without shared memory.
*
* Run on USC HPC:
* srun -n1 --gres=gpu:1 ./parallel <n> <nnz>
*
* Run with executable in a graphs folder containing n_nnz_x graphs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define GRID_WIDTH 128
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 32
//TODO: possibly increase block width, decrease block height since there will be max 8 nonzeroes per col on avg
typedef struct {
char column;
int *JC;
int *IR;
int *NUM;
int n; // number of columns
int nzc; // non zero columns/rows
int nnz; // number of nonzeroes
} cs_matrix_t;
typedef struct {
char column;
int *JC;
int *CP;
int *IR;
int *NUM;
int n; // number of columns
int nzc; // non zero columns/rows
int nnz; // number of nonzeroes
} dcs_matrix_t;
//uses CUDA unified memory
void cuda_cs(cs_matrix_t *m, const char *file, int n, int nnz, char column, int random_seed) {
m->column = column;
m->n = n;
FILE *fp = fopen(file, "r");
int bufc, bufr;
cudaMallocManaged(&(m->JC), sizeof(int) * (n+1));
cudaMallocManaged(&(m->IR), sizeof(int) * nnz);
cudaMallocManaged(&(m->NUM), sizeof(int) * nnz);
srand(random_seed);
//srand(time(0));
int nzc = 0;
int i = 0;
int current_index = 0;
fscanf(fp, "%d", &bufc);
while(!feof(fp)) {
//if blank columns, fill in JC
while(i < bufc) {
m->JC[i] = current_index;
i++;
}
m->JC[i] = current_index;
nzc++;
while(!feof(fp) && i == bufc) {
fscanf(fp, "%d", &bufr);
m->IR[current_index] = bufr;
m->NUM[current_index] = rand() % 100;
current_index++;
fscanf(fp, "%d", &bufc);
}
i++;
}
//fill in the remainder of JC
while(i <= n) {
//current_index should now be greater than size of IR/NUM
m->JC[i] = current_index;
i++;
}
m->nnz = current_index;
m->nzc = nzc;
fclose(fp);
return;
}
//uses CUDA unified memory
void cuda_dcs(cs_matrix_t *m, dcs_matrix_t *d) {
d->column = m->column;
d->IR = m->IR;
d->NUM = m->NUM;
d->n = m->n;
d->nnz = m->nnz;
d->nzc = m->nzc;
cudaMallocManaged(&(d->JC), sizeof(int) * m->nzc);
cudaMallocManaged(&(d->CP), sizeof(int) * (m->nzc +1));
int current_index = 0;
for(int i = 0; i < m->n; i++) {
if(m->JC[i] == m->JC[i+1]) {
continue;
}
else {
d->JC[current_index] = i;
d->CP[current_index] = m->JC[i];
current_index++;
}
}
d->CP[current_index] = m->nnz;
//invalidate m
cudaFree(m->JC);
return;
}
__device__ int binary_search(int *arr, int len, int target) {
int left = 0;
int right = len -1;
int mid;
while(left <= right) {
mid = (left + right)/2;
if(arr[mid] == target) return mid;
if(arr[mid] < target) {
left = mid +1;
continue;
}
if(arr[mid] > target) {
right = mid -1;
continue;
}
}
return -1;
}
__global__ void device_multiply(dcs_matrix_t A, dcs_matrix_t B, int *C, int num_cols_per_block, int n) {
int block_first = blockIdx.x * num_cols_per_block;
if(block_first > B.nzc) return; // more blocks than nzc
int block_last = block_first + num_cols_per_block; //exclusive
if(block_last > B.nzc) block_last = B.nzc;
//TODO: setup shared memory --> while loop for each thread, once all threads break they can sync
//loop for the columns that this will look at
int x = block_first + threadIdx.x; // index in B.JC this thread col is working on
while(x < block_last) {
int j = B.JC[x];
int first = B.CP[x];
int last = B.CP[x+1];
int curr = first + threadIdx.y; // row index in B.IR this thread is working on
//loop for the nonzero elements that this thread will execute on
while(curr < last) {
//do the multiplication, remember to atomicAdd for C
int brow = B.IR[curr];
int bval = B.NUM[curr];
int apos = binary_search(A.JC, A.nzc, brow);
if(apos != -1) {
int acurr = A.CP[apos];
int alast = A.CP[apos+1];
int i, aval;
while(acurr != alast) { // iterate over elements in column brow of A
i = A.IR[acurr];
aval = A.NUM[acurr];
// C[i * n + j] += aval * bval;
atomicAdd(C + (i*n + j), aval * bval); // race conditions may occur within this thread row
acurr++;
}
}
curr += blockDim.y; // next non-zero assigned round robin
}
x += blockDim.x; // next column is assigned round robin
}
//note: threads don't have to wait for each other to sync, some can be on different columns than others no problem
}
void parallel_multiply(int *C, int n, int nnz, char *Afile, char *Bfile, int Arseed, int Brseed) {
//start timer
struct timespec start, computation_done;
double time;
if( clock_gettime(CLOCK_REALTIME, &start) == -1) { perror("clock gettime");}
//setup
cs_matrix_t m1;
dcs_matrix_t A;
cuda_cs(&m1, Afile, n, nnz, 1, Arseed);
cuda_dcs(&m1, &A);
cs_matrix_t m2;
dcs_matrix_t B;
cuda_cs(&m2, Bfile, n, nnz, 1, Brseed);
cuda_dcs(&m2, &B);
//call device multiply
dim3 dimGrid(GRID_WIDTH);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
double nd = (double)B.nzc / (double)GRID_WIDTH;
int num_cols_per_block = (int) ceil(nd);
device_multiply<<<dimGrid, dimBlock>>>(A, B, C, num_cols_per_block, n);
cudaDeviceSynchronize(); // in order to access unified memory
//stop timer for computation
if( clock_gettime(CLOCK_REALTIME, &computation_done) == -1) { perror("clock gettime");}
//print out time for finishing computation and copying back data
time = (computation_done.tv_sec - start.tv_sec)+ (double)(computation_done.tv_nsec - start.tv_nsec)/1e9;
printf("DCSC A nnz: %d, A nzc: %d, B nnz: %d, B nzc: %d\nExecution Time: %f\n", A.nnz, A.nzc, B.nnz, B.nzc, time);
//free unified memory
cudaFree(A.JC);
cudaFree(A.CP);
cudaFree(A.IR);
cudaFree(A.NUM);
cudaFree(B.JC);
cudaFree(B.CP);
cudaFree(B.IR);
cudaFree(B.NUM);
}
int main(int argc, char **argv) {
if(argc < 3) {
printf("arguments: n nnz\n");
return 1;
}
int n = atoi(argv[1]);
int nnz = atoi(argv[2]);
int num_iterations = 5;
int *C = (int *) malloc (sizeof(int)*n*n);
int *gpu_C;
cudaMalloc((void**)&gpu_C, sizeof(int)*n*n);
char Afile[20];
char Bfile[20];
//number of iterations of the program
for(int it = 0; it < num_iterations; it++) {
//initialize C
for(int i = 0; i < n*n; i++) {
C[i] = 0;
}
cudaMemcpy(gpu_C, C, sizeof(int)*n*n, cudaMemcpyHostToDevice);
sprintf(Afile, "%d_%d_%d", n, nnz, it);
sprintf(Bfile, "%d_%d_%d", n, nnz, (it+1) % num_iterations);
//execute multiplication
parallel_multiply(gpu_C, n, nnz, Afile, Bfile, 1, 1);
//verify that C is correct here - deleted for sake of execution time
}
cudaFree(gpu_C);
free(C);
return 0;
}
|
8,712 | #include "includes.h"
//#define __OUTPUT_PIX__
#define BLOCK_SIZE 32
__constant__ __device__ float lTable_const[1064];
__constant__ __device__ float mr_const[3];
__constant__ __device__ float mg_const[3];
__constant__ __device__ float mb_const[3];
__global__ void lin2lin_resmpl_messy_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int hn, int wn, int xbd0, int xbd1, int ybd0, int ybd1, int *xas_const, int *xbs_const, float *xwts_const, int *yas_const, int *ybs_const, float *ywts_const)
{
unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x);
unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y);
if ((x_pos < dst_wd) && (y_pos < dst_ht)) {
int xa, ya, yb;
float wt, wt1;
float *A00, *A01, *A02, *A03, *B00;
float *A10, *A11, *A12, *A13, *B10;
float *A20, *A21, *A22, *A23, *B20;
float *A0 = dev_in_img + 0;
float *B0 = dev_out_img + (0 * dst_ht * dst_wd);
float *A1 = dev_in_img + 1;
float *B1 = dev_out_img + (1 * dst_ht * dst_wd);
float *A2 = dev_in_img + 2;
float *B2 = dev_out_img + (2 * dst_ht * dst_wd);
int y1 = 0;
if (org_ht > dst_ht) {
int m = 1;
for (int iter = 0; iter < y_pos; iter++) {
while (y1 + m < hn && yb == ybs_const[y1 + m])
m++;
y1 += m;
}
wt = ywts_const[y1];
wt1 = 1 - wt;
} else {
y1 = y_pos;
wt = ywts_const[y1];
wt1 = 1 - wt;
}
if (y_pos == 0)
y1 = 0;
ya = yas_const[y1];
A00 = A0 + (ya * org_wd * n_channels);
A01 = A00 + (org_wd * n_channels);
A02 = A01 + (org_wd * n_channels);
A03 = A02 + (org_wd * n_channels);
A10 = A1 + (ya * org_wd * n_channels);
A11 = A00 + (org_wd * n_channels);
A12 = A01 + (org_wd * n_channels);
A13 = A02 + (org_wd * n_channels);
A20 = A2 + (ya * org_wd * n_channels);
A21 = A00 + (org_wd * n_channels);
A22 = A01 + (org_wd * n_channels);
A23 = A02 + (org_wd * n_channels);
yb = ybs_const[y1];
B00 = B0 + (yb * dst_wd);
B10 = B1 + (yb * dst_wd);
B20 = B2 + (yb * dst_wd);
int x = 0;
if (org_wd < x_pos) {
// resample along y direction
if (org_ht > dst_ht) {
int m = 1;
while ((y1 + m < hn) && (yb == ybs_const[y1 + m]))
m++;
if (m == 1) {
dev_C0_tmp[x_pos] = A00[x_pos] * ywts_const[y1];
dev_C1_tmp[x_pos] = A10[x_pos] * ywts_const[y1];
dev_C2_tmp[x_pos] = A20[x_pos] * ywts_const[y1];
} else if (m == 2) {
dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) +
(A01[x_pos] * ywts_const[y1 + 1]);
dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) +
(A11[x_pos] * ywts_const[y1 + 1]);
dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) +
(A21[x_pos] * ywts_const[y1 + 1]);
} else if (m == 3) {
dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) +
(A01[x_pos] * ywts_const[y1 + 1]) +
(A02[x_pos] * ywts_const[y1 + 2]);
dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) +
(A11[x_pos] * ywts_const[y1 + 1]) +
(A12[x_pos] * ywts_const[y1 + 2]);
dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) +
(A21[x_pos] * ywts_const[y1 + 1]) +
(A22[x_pos] * ywts_const[y1 + 2]);
} else if (m >= 4) {
dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) +
(A01[x_pos] * ywts_const[y1 + 1]) +
(A02[x_pos] * ywts_const[y1 + 2]) +
(A03[x_pos] * ywts_const[y1 + 3]);
dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) +
(A11[x_pos] * ywts_const[y1 + 1]) +
(A12[x_pos] * ywts_const[y1 + 2]) +
(A13[x_pos] * ywts_const[y1 + 3]);
dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) +
(A21[x_pos] * ywts_const[y1 + 1]) +
(A22[x_pos] * ywts_const[y1 + 2]) +
(A23[x_pos] * ywts_const[y1 + 3]);
}
for (int y0 = 4; y0 < m; y0++) {
A01 = A00 + (y0 * org_wd);
A11 = A10 + (y0 * org_wd);
A11 = A10 + (y0 * org_wd);
wt1 = ywts_const[y1 + y0];
dev_C0_tmp[x_pos] = dev_C0_tmp[x_pos] + (A01[x_pos] * wt1);
dev_C1_tmp[x_pos] = dev_C1_tmp[x_pos] + (A11[x_pos] * wt1);
dev_C2_tmp[x_pos] = dev_C2_tmp[x_pos] + (A21[x_pos] * wt1);
}
} else {
bool yBd = y_pos < ybd0 || y_pos >= dst_ht - ybd1;
if (yBd) {
dev_C0_tmp[x_pos] = A00[x_pos];
dev_C1_tmp[x_pos] = A10[x_pos];
dev_C2_tmp[x_pos] = A20[x_pos];
} else {
dev_C0_tmp[x_pos] = (A00[x_pos] * wt) + (A01[x_pos] * wt1);
dev_C1_tmp[x_pos] = (A10[x_pos] * wt) + (A11[x_pos] * wt1);
dev_C2_tmp[x_pos] = (A20[x_pos] * wt) + (A21[x_pos] * wt1);
}
}
}
/* ensure that all threads have calculated the values for C until this point */
__syncthreads();
// resample along x direction (B -> C)
if (x_pos < dst_wd) {
if (org_wd > dst_wd) {
if (xbd0 == 2) {
xa = xas_const[x_pos * 4];
B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]);
B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]);
B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]);
} else if (xbd0 == 3) {
xa = xas_const[x_pos * 4];
B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) +
(dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]);
B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) +
(dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]);
B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) +
(dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]);
} else if (xbd0 == 4) {
xa = xas_const[x_pos * 4];
B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) +
(dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) +
(dev_C0_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]);
B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) +
(dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) +
(dev_C1_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]);
B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) +
(dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) +
(dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) +
(dev_C2_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]);
} else if (xbd0 > 4) {
for(x = 0; x < wn; x++) {
B00[xbs_const[x]] += dev_C0_tmp[xas_const[x]] * xwts_const[x];
B10[xbs_const[x]] += dev_C1_tmp[xas_const[x]] * xwts_const[x];
B20[xbs_const[x]] += dev_C2_tmp[xas_const[x]] * xwts_const[x];
}
}
} else {
for (x = 0; x < xbd0; x++) {
B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x];
B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x];
B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x];
}
for (; x < dst_wd - xbd1; x++) {
B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x] + dev_C0_tmp[xas_const[x] + 1] * (r - xwts_const[x]);
B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x] + dev_C1_tmp[xas_const[x] + 1] * (r - xwts_const[x]);
B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x] + dev_C2_tmp[xas_const[x] + 1] * (r - xwts_const[x]);
}
for (; x < dst_wd; x++) {
B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x];
B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x];
B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x];
}
}
}
__syncthreads();
}
} |
8,713 | #include <bits/stdc++.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
#define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
#define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos))
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int BLOCK_SIZE = 256;
const int VEC_SIZE = 12248;
__global__ void init(){}
__device__ float dothings(int t,int sz, float *input){
float ans = 0;
for(int i=0;i<12;++i){
ans += input[(i+t)%sz];
}
return ans;
}
__global__ void process(int N_step, int N_inst, float *input, float *output){
int g_id = blockIdx.x * blockDim.x + threadIdx.x;
if(g_id >= N_inst) return;
float local_data[VEC_SIZE];
float ans = 0.;
for(int i=0;i<VEC_SIZE;++i) local_data[i] = input[VEC_SIZE * g_id + i];
for(int t=0;t<N_step;++t){
ans += dothings(t, VEC_SIZE, local_data);
}
output[g_id] = ans;
return;
}
int main(int argc, char *argv[]){
srand(0);
int num_inst = 1024, num_step = 1024;
if(argc > 1) num_step = stoi(argv[1]);
/* For measuing the time */
cudaEvent_t start, stop;
float cuda_time;
cudaEventCreate(&start); // creating the event 1
cudaEventCreate(&stop); // creating the event 2
vector<float> hin(VEC_SIZE * num_step), hout(num_inst);
def_dvec(float) din(VEC_SIZE * num_step), dout(num_inst);
generate(hin.begin(), hin.end(), [](){return float(rand())/RAND_MAX;});
int n_block = (num_inst + BLOCK_SIZE - 1)/BLOCK_SIZE;
init<<<n_block,BLOCK_SIZE>>>();
gpu_copy(hin, din);
cudaEventRecord(start, 0);
process<<<n_block, BLOCK_SIZE>>>(num_step, num_inst, to_ptr(din), to_ptr(dout));
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured
cout<<"Time Usage for running the kernel is: "<<cuda_time/1000<<"s"<<endl;
gpu_copy(dout, hout);
cout<<"Showing the answer:"<<endl;
for(int i=0;i<num_inst;i+=num_inst/10) cout<<hout[i]<<' ';
cout<<endl;
return 0;
} |
8,714 | #include "includes.h"
__global__ void VecAdd(float* A, float* B, float* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
C[i] = A[i] + B[i];
}
} |
8,715 | #include "includes.h"
__global__ void scale_random(float *random, size_t total_size){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < total_size){
random[index] = random[index] * 2.0 - 1.0;
__syncthreads();
}
} |
8,716 | #include <stdio.h>
#include <cuda.h>
typedef float MatrixVal;
typedef struct matrix {
MatrixVal *values;
unsigned int rows, cols;
} Matrix;
typedef struct input {
Matrix *A, *B;
} Input;
void setMatrixPosition(Matrix *matrix, unsigned int row, unsigned int col, MatrixVal value) {
matrix->values[col + matrix->cols * row] = value;
}
MatrixVal getMatrixPosition(Matrix *matrix, unsigned int row, unsigned int col) {
return matrix->values[col + matrix->cols * row];
}
void setMatrixSize(Matrix *matrix, unsigned int rows, unsigned int cols) {
matrix->values = (MatrixVal *) malloc(rows * cols * sizeof(MatrixVal));
matrix->cols = cols;
matrix->rows = rows;
}
Matrix *newMatrix() {
Matrix *matrix = (Matrix *) malloc(sizeof(Matrix));
return matrix;
}
void deleteMatrix(Matrix *matrix) {
free(matrix->values);
free(matrix);
}
Matrix *readMatrixFrom(FILE *src) {
unsigned int row, col, rows, cols;
MatrixVal value;
Matrix *matrix = newMatrix();
fscanf(src, "%u %u", &rows, &cols);
setMatrixSize(matrix, rows, cols);
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
fscanf(src, "%f", &value);
setMatrixPosition(matrix, row, col, value);
}
}
return matrix;
}
void deleteInput(Input input) {
deleteMatrix(input.A);
deleteMatrix(input.B);
}
Input readMatricesFromFiles(char *fileName1, char *fileName2) {
Input input;
FILE *file1, *file2;
file1 = fopen(fileName1, "r");
input.A = readMatrixFrom(file1);
fclose(file1);
file2 = fopen(fileName2, "r");
input.B = readMatrixFrom(file2);
fclose(file2);
return input;
}
Input readMatricesFromStdin() {
Input input;
input.A = readMatrixFrom(stdin);
input.B = readMatrixFrom(stdin);
return input;
}
void printUsage() {
printf("Usage: matrix-multiply <cuda|cpu> [file-with-matrix1 file-with-matrix2]\n");
printf("\nIf files are not passed, matrices are read from stdin.\n");
printf("Input format: n-rows n-cols entries\n");
printf("Output format: n-rows n-cols result-entries\n");
printf("Output is always to stdout\n");
}
void processUsingCuda(Input input) {
}
void processUsingCpu(Input input) {
}
int main(int argc, char **argv) {
Input input;
if (argc == 2) {
input = readMatricesFromStdin();
} else if (argc == 4) {
input = readMatricesFromFiles(argv[2], argv[3]);
} else {
printf("Error: wrong number of arguments: %d\n", argc);
printUsage();
return 1;
}
if (strcmp(argv[1], "cuda") == 0) {
processUsingCuda(input);
} else if (strcmp(argv[1], "cpu") == 0) {
processUsingCpu(input);
} else {
printf("Error: %s is not a valid form of computation\n");
printUsage();
return 2;
}
return 0;
}
|
8,717 | #include "includes.h"
__global__ void vec_add(int N, int *A, int *B, int *C){
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < N) C[0] = A[i] * B[i];
} |
8,718 | #include <math.h>
#include "mesh.cuh"
#include "material.cuh"
#include "constants.cuh"
void material_init(struct material *mat);
void mesh_init(struct mesh *m);
void mesh_material_init(struct mesh *me, struct material *ma) {
mesh_init(me);
material_init(ma);
}
void mesh_init(struct mesh *m) {
m->lx = LX;
m->ly = LY;
m->vmax = LX*LY;
m->nelx = NELX;
m->nely = NELY;
m->volfrac = VOLFRAC;
m->ax = (double)LX / (double)NELX;
m->by = (double)LY / (double)NELY;
m->area = m->ax*m->by;
m->penal = PENAL;
m->prho = PRHO;
m->rmin = RMIN;
m->ft = FT;
m->alpha = ALPHA;
m->beta = BETA;
m->ninp = NINP;
m->nout = NOUT;
m->fixed_count = 0;
}
void material_init(struct material *mat) {
mat->e0 = E0;
mat->emin = 0.000001;
mat->rho0 = RHO0;
mat->rhomin = 0.000001;
mat->nu = NU;
}
|
8,719 | #include "includes.h"
__global__ void mysgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
{
float c = 0.0f;
int m = blockIdx.x * blockDim.x + threadIdx.x;
int n = blockIdx.y * blockDim.y + threadIdx.y;
for (int i = 0; i < k; ++i) {
float a = A[m + i * lda];
float b = B[n + i * ldb];
c += a * b;
}
C[m+n*ldc] = C[m+n*ldc] * beta + alpha * c;
} |
8,720 | #include "includes.h"
__global__ void NodesApplyTramplingEffectKernel(float* target, float* distanceToPath, int graphW, int graphH, float pathThickness, float tramplingCoefficient)
{
int i = 1 + blockIdx.x * blockDim.x + threadIdx.x;
int j = 1 + blockIdx.y * blockDim.y + threadIdx.y;
if(i <= graphW && j <= graphH) {
int index = i + j * (graphW + 2);
float t = distanceToPath[index];
t = max(0.0f, min(1.0f, fabsf(t / pathThickness)));
t = t * (t * (-4 * t + 6) - 3) + 1; // cubic parabola
atomicAdd(&target[index], t * tramplingCoefficient);
}
} |
8,721 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/count.h>
#include <iostream>
char TESTSTRING[] = "how many lines\n are in this\n guy right\n here";
struct is_end
{
__host__ __device__
bool operator()(const char x)
{
return x == '\n';
}
};
int main() {
int stringSize = sizeof TESTSTRING - 1;
thrust::device_vector<char> dev(stringSize);
thrust::copy(TESTSTRING, TESTSTRING + stringSize, dev.begin());
int break_cnt = thrust::count(dev.begin(), dev.end(), '\n');
thrust::device_vector<int> dev_pos(break_cnt);
thrust::copy_if(thrust::make_counting_iterator(0),
thrust::make_counting_iterator(stringSize),
dev.begin(), dev_pos.begin(), is_end());
thrust::host_vector<int> hos = dev_pos;
for (int i = 0; i < hos.size(); i++) {
std::cout << hos[i] << std::endl;
}
}
|
8,722 | #include "includes.h"
__global__ void fill( int * v, std::size_t size )
{
auto tid = threadIdx.x;
v[ tid ] = tid;
} |
8,723 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
void _CheckCudaError(cudaError_t ret, char *file, int line)
{
if (ret != cudaSuccess) {
printf("%s - %s (%s:%d)\n", cudaGetErrorName(ret), cudaGetErrorString(ret), file, line);
exit(EXIT_FAILURE);
}
}
#define CheckCudaError(call) _CheckCudaError((call), __FILE__, __LINE__)
struct s12 {
int a; int b; int c;
};
struct s16 {
int a; int b; int c; int d;
};
struct s20 {
int a; int b; int c; int d; int e;
};
struct s24 {
int a; int b; int c; int d; int e; int f;
};
struct s28 {
int a; int b; int c; int d; int e; int f; int g;
};
struct s32 {
int a; int b; int c; int d; int e; int f; int g; int h;
};
#define TESTSIZE 1024
template <typename T>
__global__ void test_kernel(T* d)
{
__shared__ T s[TESTSIZE / 2];
// copy first half of data to SMEM
if (threadIdx.x < TESTSIZE / 2)
s[threadIdx.x] = d[threadIdx.x];
__syncthreads();
// copy SMEM to second half
if (threadIdx.x >= TESTSIZE / 2)
d[threadIdx.x] = s[threadIdx.x % (TESTSIZE / 2)];
}
template <typename T>
__global__ void test_kernel_conflict(T* d)
{
__shared__ T s[TESTSIZE];
if (threadIdx.x < TESTSIZE / 2)
s[threadIdx.x * 2] = d[threadIdx.x]; // bank conflict
__syncthreads();
if (threadIdx.x >= TESTSIZE / 2)
d[threadIdx.x] = s[threadIdx.x * 2 % TESTSIZE]; // bank conflict
}
template <typename T>
void test()
{
T h[TESTSIZE];
memset(h, 0, sizeof(h));
for (int i = 0; i < TESTSIZE / 2; i++)
*(int*)&h[i] = TESTSIZE / 2 - i;
T *d;
CheckCudaError(cudaMalloc(&d, sizeof(h)));
CheckCudaError(cudaMemcpy(d, h, sizeof(h), cudaMemcpyHostToDevice));
test_kernel<<<1, TESTSIZE>>>(d);
CheckCudaError(cudaMemcpy(h, d, sizeof(h), cudaMemcpyDeviceToHost));
printf("first element : %d, last element : %d\n", *(int*)&h[0], *(int*)&h[TESTSIZE - 1]); // expected numbers are: TESTSIZE / 2, 1
CheckCudaError(cudaMemcpy(d, h, sizeof(h), cudaMemcpyHostToDevice));
test_kernel_conflict<<<1, TESTSIZE>>>(d);
CheckCudaError(cudaMemcpy(h, d, sizeof(h), cudaMemcpyDeviceToHost));
printf("first element : %d, last element : %d\n", *(int*)&h[0], *(int*)&h[TESTSIZE - 1]); // expected numbers are: TESTSIZE / 2, 1
CheckCudaError(cudaFree(d));
}
int main()
{
test<int>(); // 4B per element
test<long long int>(); // 8B per element
test<struct s12>(); // 12B per element
test<struct s16>(); // 16B per element
test<struct s20>(); // 20B per element
test<struct s24>(); // 24B per element
test<struct s28>(); // 28B per element
test<struct s32>(); // 32B per element
}
|
8,724 | #include <math.h>
#include <stdio.h>
// Array access macros
#define INPUT(i,j) input_grid[(j) + (i)*(N)]
#define TEMP(i,j) temp_grid[(j) + (i)*(N)]
#define WINDOW_SIZE (7)
#define NEIGHBOR_SIZE (3)
#define BLOCK_SIZE (512)
#define FILTER_SIZE ((WINDOW_SIZE) + (NEIGHBOR_SIZE) - 1)
#define FILTER_RADIUS (((FILTER_SIZE) - 1) / 2)
__global__ void nlmSimple(int N, double const *input_grid, double *output_grid, float filtSigma)
{
__shared__ double temp_grid[BLOCK_SIZE * FILTER_SIZE];
// Define global and local indices of current pixel
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + FILTER_RADIUS * blockDim.x;
int pix_ix, pix_iy, pix_jx, pix_jy;
double neighbor_j,
neighbor_i,
output = 0,
sum_weights = 0;
// Read input elements into shared memory
for (int i = -FILTER_RADIUS; i <= FILTER_RADIUS; i++)
{
if ((int)blockIdx.x + i >= 0 && (int)blockIdx.x + i < N)
{
temp_grid[lindex + i * (int)blockDim.x] = input_grid[gindex + i * (int)blockDim.x];
}
}
// Synchronize (ensure all the data is available)
__syncthreads();
pix_iy = lindex % N;
pix_ix = (lindex - pix_iy) / N;
if (pix_ix < FILTER_SIZE && pix_iy < N)
{
int window_radius = (WINDOW_SIZE - 1) / 2;
int neighbor_radius = (NEIGHBOR_SIZE - 1) / 2;
// Iterate through window
for (int k = -window_radius; k <= window_radius; k++)
for (int l = -window_radius; l <= window_radius; l++)
{
double weight = 0;
double distance = 0;
pix_jx = pix_ix + k;
pix_jy = pix_iy + l;
if (pix_jx < 0 || pix_jx >= FILTER_SIZE ||
pix_jy < 0 || pix_jy >= N)
continue;
// Iterate through every pix_j neighbors
for (int p = -neighbor_radius; p <= neighbor_radius; p++)
for (int q = -neighbor_radius; q <= neighbor_radius; q++)
{
if (pix_jx + p < 0 || pix_jx + p >= FILTER_SIZE ||
pix_jy + q < 0 || pix_jy + q >= N ||
pix_ix + p < 0 || pix_ix + p >= FILTER_SIZE ||
pix_iy + q < 0 || pix_iy + q >= N)
continue;
neighbor_j = TEMP(pix_jx + p, pix_jy + q);
neighbor_i = TEMP(pix_ix + p, pix_iy + q);
distance += (neighbor_i - neighbor_j) * (neighbor_i - neighbor_j);
}
// Derive weight for pixels i and j
weight = __expf(-(distance / filtSigma +
(k*k + l*l) * (1.0f)/(float)(WINDOW_SIZE* WINDOW_SIZE)));
sum_weights += weight;
// Sum for every pixel in the window
output += TEMP(pix_jx, pix_jy) * weight;
}
// Normalize
sum_weights = (double)(1 / sum_weights);
output *= sum_weights;
// Write output to global memory
output_grid[gindex] = output;
}
}
|
8,725 | #include <stdio.h>
#include "cuda.h"
int main() {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("%d", prop.major * 10 + prop.minor);
}
|
8,726 | /*
* file name: matrix.cu
*
* CPE810A: Homework 1, matrix * matrix by using shared memory
*
* Yupeng Cao, 10454637
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 16
#define TILE_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
*********************************************************************
*/
__global__ void gpu_matrix_mult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
/*
*********************************************************************
function name: shared_matrix_mult
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Using Shared Memory
*********************************************************************
*/
__global__ void shared_matrix_mult(float* A, float* B, float* C, int m, int n, int k)
{
__shared__ float As[TILE_SIZE][TILE_SIZE];
__shared__ float Bs[TILE_SIZE][TILE_SIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float CValue = 0.0;
for (int t = 0; t * TILE_SIZE < n; ++t)
{
if (row < m && t * TILE_SIZE + threadIdx.x < n)
As[threadIdx.y][threadIdx.x] = A[row * n + t * TILE_SIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0;
if (col < k && t * TILE_SIZE + threadIdx.y < n)
Bs[threadIdx.y][threadIdx.x] = B[(t * TILE_SIZE + threadIdx.y) * k + col];
else
Bs[threadIdx.y][threadIdx.x] = 0;
__syncthreads();
for (int i = 0; i < TILE_SIZE; ++i)
{
CValue += As[threadIdx.y][i] * Bs[i][threadIdx.x];
}
__syncthreads();
}
if (row < m && col < k)
C[row * k + col] = CValue;
}
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix in CPU,
for validating GPU results
*********************************************************************
*/
void cpu_matrix_mult(float *h_a, float *h_b, float *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
float tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
/*
*********************************************************************
function name: printMatrix
description: Print calculation results
for visualize GPU results
Note: if the matrix size larger than 10,
this function will not execute
*********************************************************************
*/
void printMatrix(float* result_matrix, int row, int col) {
if (row > 10 || col > 10) return;
printf("\n");
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
printf("%f\t", result_matrix[i*col + j]);
}
printf("\n");
}
return;
}
/*
*********************************************************************
function name: main
description: test and compare
*********************************************************************
*/
int main(int argc, char *argv[])
{
// input check
if ( argc != 4)
{
printf("Error input Parameter \n");
printf("Please input matrix size \n");
printf("Matrix A: m by n; Matrix B: n by k \n");
return 0;
}
/*
Matrix A: m * n
Matrix B: n * k
*/
int m = atoi(argv[1]);
int n = atoi(argv[2]);
int k = atoi(argv[3]);
srand(1000);
//printf("please type in m n and k\n");
//scanf("%d %d %d", &m, &n, &k);
// allocate memory in host, h_cc is used to store CPU result
float *h_a, *h_b, *h_c, *h_cc;
cudaMallocHost((void **) &h_a, sizeof(int)*m*n);
cudaMallocHost((void **) &h_b, sizeof(int)*n*k);
cudaMallocHost((void **) &h_c, sizeof(int)*m*k);
cudaMallocHost((void **) &h_cc, sizeof(int)*m*k);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
h_b[i * k + j] = rand() % 1024;
}
}
// Allocate memory space on the device
float *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(int)*m*n);
cudaMalloc((void **) &d_b, sizeof(int)*n*k);
cudaMalloc((void **) &d_c, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice);
// assign Grid and Block size
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// count the execution time
float shared_gpu_time_ms, gpu_elapsed_time_ms, cpu_elapsed_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU without using Shared Memory version
cudaEventRecord(start, 0);
// Launch kernel
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU without shared memory: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms);
// start to count execution time of GPU with using Shared Memory version
cudaEventRecord(start, 0);
// Launch kernel
shared_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&shared_gpu_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU with shared memory: %f ms.\n\n", m, n, n, k, shared_gpu_time_ms);
// Transefr results from device to host
cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
// start the CPU version
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, m, n, k);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms);
// validate results computed by GPU with shared memory
int all_ok = 1;
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
if(h_cc[i*k + j] != h_c[i*k + j])
{
all_ok = 0;
}
}
}
// compute speedup ratio
// cpu time / shared_memory time
if(all_ok)
{
printf("all results are correct!, speedup = %f\n", cpu_elapsed_time_ms / shared_gpu_time_ms);
}
else
{
printf("incorrect results\n");
}
printMatrix(h_c, m, k);
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
8,727 | // nnIndex: B*M*K;
// nnCount: B*M;
// input: B*N*C;
// filter: filter_size*C*r;
// output: B*M*(C*r)
__global__ void depthwise_conv3d_forward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, float* output)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
output[i*M*C*r+j] += input[i*N*C+n*C+cin]*filter[f*C*r+cout]/nnSize;
}
}
}
}
__global__ void depthwise_input_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
float derIn = gradOutput[i*M*C*r+j]*filter[f*C*r+cout]/nnSize;
atomicAdd(&gradInput[i*N*C+n*C+cin],derIn);
}
}
}
}
__global__ void depthwise_filter_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* gradOutput, float* gradFilter, int sharedMemSize,
int startIdx)
{
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int endIdx = sharedMemSize+startIdx;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
int f = binIndex[i*M*K+m*K+k];
float derFilt = gradOutput[i*M*C*r+j]*input[i*N*C+n*C+cin]/nnSize;
int currIdx = f*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],derFilt);
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
// nnIndex: B*M*K;
// nnCount: B*M;
// input: B*N*C;
// filter: filter_size*C*r;
// output: B*M*(C*r)
__global__ void fuzzy_depthwise_conv3d_forward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, float* output)
{
// T is the number of fuzzy bins each neighbor locates in
const int T = 4;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float weight = 0;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
weight += coeff*filter[f*C*r+cout];
}
}
output[i*M*C*r+j] += input[i*N*C+n*C+cin]*weight/nnSize;
}
}
}
}
__global__ void fuzzy_depthwise_input_backward(int B, int N, int M, int C, int r, int K,
const int* nnIndex, const int* nnCount, const int* binIndex,
const float* binCoeff, const float* input, const float* filter,
const float* gradOutput, float* gradInput)
{
const int T = 4;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float weight = 0;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
weight += coeff*filter[f*C*r+cout];
}
}
float derIn = gradOutput[i*M*C*r+j]*weight/nnSize;
atomicAdd(&gradInput[i*N*C+n*C+cin],derIn);
}
}
}
}
__global__ void fuzzy_depthwise_filter_backward(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* gradOutput, float* gradFilter,
int sharedMemSize, int startIdx)
{
const int T = 4;
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int endIdx = sharedMemSize+startIdx;
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=blockIdx.y*blockDim.x+threadIdx.x;j<M*(C*r);j+=blockDim.x*gridDim.y)
{
int cout = j%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int m = j/(C*r); // output point ID
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // input point ID
float derFilt = gradOutput[i*M*C*r+j]*input[i*N*C+n*C+cin]/nnSize;
for(int idx=0;idx<T;idx++)
{
int f = binIndex[i*M*K*T+m*K*T+k*T+idx];
float coeff = binCoeff[i*M*K*T+m*K*T+k*T+idx];
if (coeff>0)
{
int currIdx = f*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],coeff*derFilt);
}
}
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
void depthwiseConv3dLauncher(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* input,
const float* filter, float* output)
{
depthwise_conv3d_forward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex,
input, filter, output);
}
void depthwiseConv3dGradLauncher(int B, int N, int M, int F, int C, int r, int K,
const int* nnIndex, const int* nnCount, const int* binIndex,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
depthwise_input_backward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex,
input, filter, gradOutput, gradInput);
int maxIter = (F*C*r)/maxSharedMemSize;
int remainder = (F*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
depthwise_filter_backward<<<B,1024,sizeof(float)*maxSharedMemSize>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, input, gradOutput, gradFilter,
maxSharedMemSize, maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
depthwise_filter_backward<<<B,1024,sizeof(float)*remainder>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, input, gradOutput, gradFilter,
remainder, maxSharedMemSize*maxIter);
}
}
void fuzzyDepthwiseConv3dLauncher(int B, int N, int M, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, float* output)
{
fuzzy_depthwise_conv3d_forward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex,
binCoeff, input, filter, output);
}
void fuzzyDepthwiseConv3dGradLauncher(int B, int N, int M, int F, int C, int r, int K, const int* nnIndex,
const int* nnCount, const int* binIndex, const float* binCoeff,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
fuzzy_depthwise_input_backward<<<B,1024>>>(B, N, M, C, r, K, nnIndex, nnCount, binIndex, binCoeff,
input, filter, gradOutput, gradInput);
int maxIter = (F*C*r)/maxSharedMemSize;
int remainder = (F*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
fuzzy_depthwise_filter_backward<<<B,1024,sizeof(float)*maxSharedMemSize>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, binCoeff, input, gradOutput,
gradFilter, maxSharedMemSize,
maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
fuzzy_depthwise_filter_backward<<<B,1024,sizeof(float)*remainder>>>(B, N, M, C, r, K, nnIndex, nnCount,
binIndex, binCoeff, input, gradOutput,
gradFilter, remainder,
maxSharedMemSize*maxIter);
}
} |
8,728 | #include "includes.h"
__global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if (ix<nx && iy<ny)
{
MatC[idx] = sin(MatA[idx]) + sin(MatB[idx]);
}
} |
8,729 | #include <stdlib.h>
#include <stdio.h>
#define NUM_BLOCKS 20
__device__ int **dataptr;
// Per-block pointer
__global__ void dynamic_allocmem()
{
// Only the first thread in the block does the allocation
// since we want only one allocation per block.
if (blockIdx.x == 0 && threadIdx.x == 0)
dataptr = (int**)malloc(NUM_BLOCKS * sizeof(int*));
}
__global__ void allocmem()
{
// Only the first thread in the block does the allocation
// since we want only one allocation per block.
if (threadIdx.x == 0)
dataptr[blockIdx.x] = (int*)malloc(1 * sizeof(int));
__syncthreads();
// Check for failure
if (dataptr[blockIdx.x] == NULL)
return;
// Zero the data with all threads in parallel
dataptr[blockIdx.x][threadIdx.x] = 0;
}
// Simple example: store thread ID into each element
__global__ void usemem()
{
int* ptr = dataptr[blockIdx.x];
if (ptr != NULL)
ptr[threadIdx.x] += threadIdx.x;
}
// Print the content of the buffer before freeing it
__global__ void freemem()
{
int* ptr = dataptr[blockIdx.x];
if (ptr != NULL)
printf("Block %d, Thread %d: final value = %d\n", blockIdx.x, threadIdx.x, ptr[threadIdx.x]);
// Only free from one thread!
if (threadIdx.x == 0)
free(ptr);
}
int main()
{
long long max_heap_size_d = (long long)3*1024*1024*1024;
cudaDeviceSetLimit(cudaLimitMallocHeapSize, max_heap_size_d);
// Allocate memory
dynamic_allocmem<<< NUM_BLOCKS, 10 >>>();
// Allocate memory
allocmem<<< NUM_BLOCKS, 10 >>>();
// Use memory
usemem<<< NUM_BLOCKS, 10 >>>();
//usemem<<< NUM_BLOCKS, 10 >>>();
//usemem<<< NUM_BLOCKS, 10 >>>();
// Free memory
freemem<<< NUM_BLOCKS, 10 >>>();
cudaDeviceSynchronize();
return 0;
}
|
8,730 | #include "includes.h"
__global__ void invert_mass_matrix(double *values, unsigned int size)
{
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
{
if (values[i] > 1e-15)
values[i] = 1. / values[i];
else
values[i] = 0.;
}
} |
8,731 | /* Molecular dynamics simulation linear code for binary Lennard-Jones liquid
under NVE ensemble; Author: You-Liang Zhu, Email: youliangzhu@ciac.ac.cn
Copyright: You-Liang Zhu
This code is free: you can redistribute it and/or modify it under the terms
of the GNU General Public License.*/
#include <ctype.h>
#include <cuda_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// catch the error thrown by CUDA
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
// implement periodic boundary condition
__host__ __device__ float pbc(float x, float box_len) {
float box_half = box_len * 0.5;
if (x > box_half)
x -= box_len;
else if (x < -box_half)
x += box_len;
return x;
}
// random number generator [0.0-1.0)
float R2S() {
int ran = rand();
float fran = (float)ran / (float)RAND_MAX;
return fran;
}
// initially generate the position and mass of particles
void init(unsigned int np, float4 *r, float4 *v, float3 box, float min_dis) {
for (unsigned int i = 0; i < np; i++) {
bool find_pos = false;
float4 ri;
while (!find_pos) {
ri.x = (R2S() - 0.5) * box.x;
ri.y = (R2S() - 0.5) * box.y;
ri.z = (R2S() - 0.5) * box.z;
find_pos = true;
for (unsigned int j = 0; j < i; j++) {
float dx = pbc(ri.x - r[j].x, box.x);
float dy = pbc(ri.y - r[j].y, box.y);
float dz = pbc(ri.z - r[j].z, box.z);
float r = sqrt(dx * dx + dy * dy + dz * dz);
// a minimum safe distance to avoid the overlap of LJ particles
if (r < min_dis) {
find_pos = false;
break;
}
}
}
// randomly generate the type of particle, 1.0 represent type A and 2.0
// represent type B
ri.w = R2S() > 0.5 ? 1.0 : 2.0;
r[i] = ri;
v[i].w = 1.0;
}
}
// first step integration of velocity verlet algorithm
extern "C" __global__ void first_integration_kernel(unsigned int np, float dt,
float3 box, float4 *r,
float4 *v, float4 *f) {
// calculate the global index of thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < np) {
float4 ri = r[i];
float mass = v[i].w;
v[i].x += 0.5 * dt * f[i].x / mass;
v[i].y += 0.5 * dt * f[i].y / mass;
v[i].z += 0.5 * dt * f[i].z / mass;
ri.x += dt * v[i].x;
ri.y += dt * v[i].y;
ri.z += dt * v[i].z;
r[i].x = pbc(ri.x, box.x);
r[i].y = pbc(ri.y, box.y);
r[i].z = pbc(ri.z, box.z);
}
}
void first_integration(unsigned int np, float dt, float3 box, float4 *r,
float4 *v, float4 *f, unsigned int nthreads) {
dim3 grid((np / nthreads) + 1, 1, 1);
dim3 block(nthreads, 1, 1);
first_integration_kernel<<<grid, block>>>(np, dt, box, r, v, f);
// block until the device has complete
cudaDeviceSynchronize();
// check if kernel function execution throw out an error
checkCUDAError("Kernel execution");
}
// non-bonded force calculation
extern "C" __global__ void force_calculation_kernel(unsigned int np, float3 box,
float3 lj1, float3 lj2,
float4 *r, float4 *f,
float rcutsq) {
// declare an shared array
extern __shared__ float4 spos[];
// declare i for global thread index
int i = blockIdx.x * blockDim.x + threadIdx.x;
float4 force = make_float4(0.0, 0.0, 0.0, 0.0);
float4 ri = make_float4(0.0, 0.0, 0.0, 0.0);
// if index is less than the index of particles
if (i < np)
ri = r[i];
// copy data from global memory to shared memory by one block of data at a
// time
for (int start = 0; start < np; start += blockDim.x) {
float4 pos = make_float4(0.0, 0.0, 0.0, 0.0);
// the thread with ID x will copy the data with global ID
// (threadIdx.x + start)th to its block's shared memory
if (start + threadIdx.x < np)
pos = r[start + threadIdx.x];
__syncthreads();
spos[threadIdx.x] = pos;
__syncthreads();
// end_offset is the biggest offset of last block
int end_offset = min(blockDim.x, np - start);
if (i < np) {
for (unsigned int offset = 0; offset < end_offset; offset++) {
int j = start + offset;
/* particles have no interactions with themselves */
if (i == j)
continue;
float4 rj = spos[offset];
/* calculated the shortest distance between particle i and j */
float dx = pbc(ri.x - rj.x, box.x);
float dy = pbc(ri.y - rj.y, box.y);
float dz = pbc(ri.z - rj.z, box.z);
float type = ri.w + rj.w;
float rsq = dx * dx + dy * dy + dz * dz;
/* compute force and energy if within cutoff */
if (rsq < rcutsq) {
float lj1_ij, lj2_ij;
if (type == 2.0) // i=1.0, j=1.0
{
lj1_ij = lj1.x;
lj2_ij = lj2.x;
} else if (type == 3.0) // i=1.0, j=2.0; or i=2.0, j=1.0
{
lj1_ij = lj1.y;
lj2_ij = lj2.y;
} else if (type == 4.0) // i=2.0, j=2.0
{
lj1_ij = lj1.z;
lj2_ij = lj2.z;
}
// force transform to float is necessary here
// float calculation is much faster than double
float r2inv = float(1.0) / rsq;
float r6inv = r2inv * r2inv * r2inv;
float ffac = r2inv * r6inv *
(float(12.0) * lj1_ij * r6inv - float(6.0) * lj2_ij);
float epot = r6inv * (lj1_ij * r6inv - lj2_ij);
force.x += ffac * dx;
force.y += ffac * dy;
force.z += ffac * dz;
force.w += epot;
}
}
}
}
if (i < np)
f[i] = force;
}
void force_calculation(unsigned int np, float3 box, float3 lj1, float3 lj2,
float4 *r, float4 *f, float rcutsq,
unsigned int nthreads) {
dim3 grid((np / nthreads) + 1, 1, 1);
dim3 block(nthreads, 1, 1);
unsigned int shared_bytes = nthreads * sizeof(float4);
force_calculation_kernel<<<grid, block, shared_bytes>>>(np, box, lj1, lj2, r,
f, rcutsq);
cudaDeviceSynchronize();
checkCUDAError("Kernel execution");
}
// second step integration of velocity verlet algorithm
__global__ void second_integration_kernel(unsigned int np, float dt, float4 *v,
float4 *f) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < np) {
float mass = v[i].w;
v[i].x += 0.5 * dt * f[i].x / mass;
v[i].y += 0.5 * dt * f[i].y / mass;
v[i].z += 0.5 * dt * f[i].z / mass;
}
}
void second_integration(unsigned int np, float dt, float4 *v, float4 *f,
unsigned int nthreads) {
dim3 grid((np / nthreads) + 1, 1, 1);
dim3 block(nthreads, 1, 1);
second_integration_kernel<<<grid, block>>>(np, dt, v, f);
cudaDeviceSynchronize();
checkCUDAError("Kernel execution");
}
// system information collection for temperature, kinetic energy, potential and
// total energy
__global__ void compute_info_threads(unsigned int np, float4 *v, float4 *f,
float2 *scratch) {
extern __shared__ float2 sdata[];
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
float2 temp = make_float2(0.0, 0.0);
if (i < np) {
float4 vi = v[i];
temp.x = vi.w * (vi.x * vi.x + vi.y * vi.y + vi.z * vi.z);
temp.y = f[i].w;
}
if (i + blockDim.x < np) {
float4 vi = v[i + blockDim.x];
temp.x += vi.w * (vi.x * vi.x + vi.y * vi.y + vi.z * vi.z);
temp.y += f[i + blockDim.x].w;
}
sdata[threadIdx.x] = temp;
__syncthreads();
// divide and rule
int offset = blockDim.x >> 1;
while (offset > 0) {
if (threadIdx.x < offset) {
sdata[threadIdx.x].x += sdata[threadIdx.x + offset].x;
sdata[threadIdx.x].y += sdata[threadIdx.x + offset].y;
}
offset >>= 1;
__syncthreads();
}
if (threadIdx.x == 0) {
scratch[blockIdx.x].x = sdata[0].x;
scratch[blockIdx.x].y = sdata[0].y;
}
}
__global__ void compute_info_blocks(unsigned int np, float *info,
float2 *scratch, unsigned int nblocks) {
extern __shared__ float2 sdata[];
float2 final_sum = make_float2(0.0, 0.0);
for (int start = 0; start < nblocks; start += blockDim.x * 2) {
float2 temp = make_float2(0.0, 0.0);
if (start + threadIdx.x < nblocks) {
temp.x = scratch[start + threadIdx.x].x;
temp.y = scratch[start + threadIdx.x].y;
if (start + threadIdx.x + blockDim.x < nblocks) {
temp.x = scratch[start + threadIdx.x + blockDim.x].x;
temp.y = scratch[start + threadIdx.x + blockDim.x].y;
}
}
sdata[threadIdx.x] = temp;
__syncthreads();
int offset = blockDim.x >> 1;
while (offset > 0) {
if (threadIdx.x < offset) {
sdata[threadIdx.x].x += sdata[threadIdx.x + offset].x;
sdata[threadIdx.x].y += sdata[threadIdx.x + offset].y;
}
offset >>= 1;
__syncthreads();
}
if (threadIdx.x == 0) {
final_sum.x += sdata[0].x;
final_sum.y += sdata[0].y;
}
}
if (threadIdx.x == 0) {
float ekin = 0.5 * final_sum.x;
float potential = 0.5 * final_sum.y;
unsigned int nfreedom = 3 * np - 3;
float temper = 2.0 * ekin / float(nfreedom);
float energy = ekin + potential;
info[0] = temper;
info[1] = potential;
info[2] = energy;
}
}
void compute_info(unsigned int np, float4 *v, float4 *f, float *info,
float2 *scratch, unsigned int nthreads,
unsigned int nblocks) {
dim3 grid(nblocks, 1, 1);
dim3 block(nthreads >> 1, 1, 1);
unsigned int shared_bytes = sizeof(float2) * nthreads >> 1;
compute_info_threads<<<grid, block, shared_bytes>>>(np, v, f, scratch);
cudaDeviceSynchronize();
checkCUDAError("kernel execution");
int offset = 0;
int temp = nblocks;
while (temp > 0) {
temp >>= 1;
offset += 1;
}
unsigned int final_nthreads = 512;
shared_bytes = sizeof(float2) * final_nthreads;
grid = dim3(1, 1, 1);
block = dim3(final_nthreads, 1, 1);
compute_info_blocks<<<grid, block, shared_bytes>>>(np, info, scratch,
nblocks);
cudaDeviceSynchronize();
checkCUDAError("kernel execution");
}
// output system information and frame in XYZ formation which can be read by VMD
void output(FILE *traj, unsigned int step, float *info, float4 *r,
unsigned int np) {
float temp = info[0];
float potential = info[1];
float energy = info[2];
fprintf(traj, "%d\n step=%d temp=%20.8f pot=%20.8f ener=%20.8f\n", np,
step, temp, potential, energy);
for (unsigned int i = 0; i < np; i++) {
float4 ri = r[i];
if (ri.w == 1.0)
fprintf(traj, "A %20.8f %20.8f %20.8f\n", ri.x, ri.y, ri.z);
else if (ri.w == 2.0)
fprintf(traj, "B %20.8f %20.8f %20.8f\n", ri.x, ri.y, ri.z);
}
}
// main function
int main(int argc, char **argv) {
// running parameters
unsigned int np = 2700; // the number of particles
unsigned int nsteps = 500; // the number of time steps
float dt = 0.001; // integration time step
float rcut = 3.0; // the cutoff radius of interactions
// float temperature = 1.0;// target temperature
unsigned int nprint = 100; // period for data output
timeval start; // start time
timeval end; // end time
// box size in x, y, and z directions
float3 box = make_float3(15.0, 15.0, 15.0);
// epsilon.x for type 1.0 and 1.0; epsilon.y for
// type 1.0 and 2.0; epsilon.z for type 1.0 and 2.0
float3 epsilon = make_float3(1.0, 0.5, 1.0);
// sigma.x for type 1.0 and 1.0; sigma.y for
// type 1.0 and 2.0; sigma.z for type 1.0 and 2.0
float3 sigma = make_float3(1.0, 1.0, 1.0);
// the minimum distance between particles for system generation
float min_dis = sigma.x * 0.9;
float3 lj1, lj2;
// calculate these constants firstly
lj1.x = 4.0 * epsilon.x * pow(sigma.x, int(12));
lj1.y = 4.0 * epsilon.y * pow(sigma.y, int(12));
lj1.z = 4.0 * epsilon.z * pow(sigma.z, int(12));
lj2.x = 4.0 * epsilon.x * pow(sigma.x, int(6));
lj2.y = 4.0 * epsilon.y * pow(sigma.y, int(6));
lj2.z = 4.0 * epsilon.z * pow(sigma.z, int(6));
// announce GPU device ID
cudaSetDevice(0);
// number of threads per block
unsigned int nthreads = 64;
// number of blocks
unsigned int nblocks = (int)ceil((float)np / (float)nthreads);
// memory size
size_t memSize = np * sizeof(float4);
// memory allocation
float4 *r = (float4 *)malloc(memSize); // rx, ry, rz, type(0, 1, 2 ...)
float4 *v = (float4 *)malloc(memSize); // vx, vy, vz, mass
float4 *f = (float4 *)malloc(memSize); // fx, fy, fz, potential
float *info =
(float *)malloc(16 * sizeof(float)); // temperature, potential, energy ...
float4 *r_d = NULL;
float4 *v_d = NULL;
float4 *f_d = NULL;
float *info_d = NULL;
float2 *scratch_d = NULL;
// memory allocation in GPU memory
cudaMalloc((void **)&r_d, memSize);
cudaMalloc((void **)&v_d, memSize);
cudaMalloc((void **)&f_d, memSize);
cudaMalloc((void **)&info_d, 16 * sizeof(float));
cudaMalloc((void **)&scratch_d, nblocks * sizeof(float));
// trajectory file in XYZ format that can be open by VMD
FILE *traj = fopen("traj.cu.xyz", "w");
/* generate system information */
printf("Starting simulation with %d atoms for %d steps.\n", np, nsteps);
printf("Generating system.\n");
// initiate some particles in box
init(np, r, v, box, min_dis);
// copy location and velocity into GPU memory
cudaMemcpy(r_d, r, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v, memSize, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy");
// get start time
gettimeofday(&start, NULL);
/* main MD loop */
printf("Running simulation.\n");
for (unsigned int step = 0; step <= nsteps; step++) // running simulation loop
{
/* first integration for velverlet */
first_integration(np, dt, box, r_d, v_d, f_d, nthreads);
/* force calculation */
force_calculation(np, box, lj1, lj2, r_d, f_d, rcut * rcut, nthreads);
/* compute temperature and potential */
compute_info(np, v_d, f_d, info_d, scratch_d, nthreads, nblocks);
/* second integration for velverlet */
second_integration(np, dt, v_d, f_d, nthreads);
/* write output frames and system information, if requested */
if ((step % nprint) == 0) {
cudaMemcpy(r, r_d, memSize, cudaMemcpyDeviceToHost);
cudaMemcpy(info, info_d, 16 * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
output(traj, step, info, r, np);
printf("time step %d \n", step);
}
}
gettimeofday(&end, NULL); // get end time
long timeusr =
(end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
printf("time is %ld microseconds\n",
timeusr); // the spending time on simulation in microseconds
// free memories and close files
fclose(traj);
free(r);
free(v);
free(f);
free(info);
cudaFree(r_d);
cudaFree(v_d);
cudaFree(f_d);
cudaFree(info_d);
cudaFree(scratch_d);
return 0;
}
|
8,732 | #include "includes.h"
__global__ void ComputeAdjacencyMatrix(float* dOut, int* nn, int n, int k)
{
// Get the column that the current thread is responsible for
auto col = blockIdx.x * blockDim.x + threadIdx.x;
// If id is within bounds
if(col < n)
{
auto nnCol = &nn[col * n];
for(auto i = 0; i < k; ++i)
{
dOut[col * n + nnCol[i]] = dOut[col + n * nnCol[i]] = 1.0f;
}
}
} |
8,733 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define BLOCK_SIZE 8
#define M 2560
#define K 2560
#define N 2560
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError_t err, const char *file, const int line )
{
if( CUDA_SUCCESS != err) {
fprintf(stderr,
"CUDA Driver API error = %04d from file <%s>, line %i.\n",
err, file, line );
exit(-1);
}
}
/*
typedef struct {
int height;
int width;
float *elements;
} Matrix;
*/
struct Matrix {
int height;
int width;
float *elements;
};
__global__ void MatMulKernel(const Matrix A, const Matrix B, Matrix C) {
float cValue = 0.;
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
for (int i = 0; i < A.width; ++i) {
cValue += A.elements[row * A.width + i] * B.elements[i * B.width + col];
}
C.elements[row * C.width + col] = cValue;
}
void MatMulGPU(const Matrix A, const Matrix B, Matrix C) {
Matrix d_A, d_B, d_C;
d_A.width = A.width; d_A.height = A.height;
d_B.width = B.width; d_B.height = B.height;
d_C.width = C.width; d_C.height = C.height;
size_t size_A = A.width * A.height * sizeof(float);
size_t size_B = B.width * B.height * sizeof(float);
size_t size_C = C.width * C.height * sizeof(float);
checkCudaErrors(cudaMalloc(&d_A.elements, size_A));
checkCudaErrors(cudaMalloc(&d_B.elements, size_B));
checkCudaErrors(cudaMalloc(&d_C.elements, size_C));
checkCudaErrors(cudaMemcpy(d_A.elements, A.elements, size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B.elements, B.elements, size_B, cudaMemcpyHostToDevice));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.height / dimBlock.x, B.width / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
checkCudaErrors(cudaMemcpy(C.elements, d_C.elements, size_C, cudaMemcpyDeviceToHost));
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
//cudaDeviceSynchronize();
}
void MatMulCPU(const Matrix A, const Matrix B, Matrix C) {
for (int i = 0; i < C.height; ++i) {
for (int j = 0; j < C.width; ++j) {
C.elements[i * C.width + j] = 0.;
for (int k = 0; k < A.width; ++k) {
C.elements[i * C.width + j] += A.elements[i * A.width + k] * B.elements[k * B.width + j];
}
}
}
}
int main(void)
{
// init Matrix A, B
Matrix h_A, h_B, h_C;
h_A.height = M; h_A.width = K;
h_A.elements = (float *) malloc(M * K * sizeof(float));
for (int i = 0; i < M * K; ++i) h_A.elements[i] = 0.1;
h_B.height = K; h_B.width = N;
h_B.elements = (float *) malloc(K * N * sizeof(float));
for (int i = 0; i < K * N; ++i) h_B.elements[i] = 0.2;
h_C.height = M; h_C.width = N;
h_C.elements = (float *) malloc(M * N * sizeof(float));
MatMulGPU(h_A, h_B, h_C);
//MatMulCPU(h_A, h_B, h_C);
/*
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
printf("%f ", h_C.elements[j + i * M]);
}
printf("\n");
}
*/
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
return 0;
}
|
8,734 | #include <stdio.h>
int main() {
int deviceCount;
cudaDeviceProp devProp;
cudaGetDeviceCount(&deviceCount);
printf("Found %d devices\n", deviceCount);
for(int device = 0;device < deviceCount;device++) {
cudaGetDeviceProperties(&devProp, device);
printf("Device %d\n", device);
printf("Compute capability : %d.%d\n", devProp.major, devProp.minor);
printf("Name : %s\n", devProp.name);
printf("Total Global Memory : %zu\n", devProp.totalGlobalMem);
printf("Shared memory per block : %zu\n", devProp.sharedMemPerBlock);
printf("Registers per block : %d\n", devProp.regsPerBlock);
printf("Warp size : %d\n", devProp.warpSize);
printf("Max threads per block : (%d, %d, %d)\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
printf("Max block : (%d, %d, %d)\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]);
printf("Total constant memory : %zu\n", devProp.totalConstMem);
printf("Multiprocessors count : %d\n", devProp.multiProcessorCount);
}
return 0;
}
|
8,735 | /*
*/
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
}
|
8,736 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define DEFAULT_ROW 16384
#define DEFAULT_COL 16384
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
float* ia = A, *ib =B, *ic =C;
for (int iy =0; iy<ny; iy++){
for (int ix =0; ix<nx; ix++){
ic[ix] = ia[ix] + ib[ix];
//if (iy*nx + ix == 67133440) printf("the addition in host: %.6f + %.6f = %.6f\n",ia[ix],ib[ix],ic[ix]);
}
ia += nx;
ib += nx;
ic += nx;
}
}
//host side matrix comparison
int h_compareResult(float *h_C, float *d_C, int noElems){
float *host_c = h_C,*device_c = d_C;
for (int i =0; i<noElems; i++){
if (*(host_c) != *(device_c)){
#ifdef DEBUG
printf("the i = %d\n", i);
printf("the data of CPU is %.6f\n", *(host_c));
printf("the data of GPU is %.6f\n", *(device_c));
#endif
return 1;
}
host_c++;
device_c++;
}
return 0;
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
int ix = threadIdx.x + blockIdx.x*blockDim.x ;
int iy = threadIdx.y + blockIdx.y*blockDim.y ;
int idx = iy*nx + ix ;
if( (ix<nx) && (iy<ny) )
C[idx] = A[idx] + B[idx] ;
//printf("the addition at idx = %d in device: %.6f + %.6f = %.6f\n",idx, A[idx],B[idx],C[idx]);
}
// device-side matrix addition
__global__ void f_addmat4( float *A, float *B, float *C, int nx, int ny, int mode_number ){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
int ix = threadIdx.x + blockIdx.x*blockDim.x ;
int iy = threadIdx.y + blockIdx.y*blockDim.y ;
int idx = iy*nx + ix ;
if( (ix<nx) && (iy<ny) ){
int i;
int index;
for (i = 0; i< 4; i++){
// compute 4 element in this thread.
index = idx + i * mode_number;
//if (index >1000 && i == 3) printf("the addition when idenx = %d in device: %.6f + %.6f = %.6f\n",index,A[idx],B[idx],C[idx]);
C[index] = A[index] + B[index] ;
}
}
//if (idx == 0) printf("the addition in device: %.6f + %.6f = %.6f\n",A[idx],B[idx],C[idx]);
}
/*
void initData(float* add, int new_nx, int block_x, int nx, int ny){
int row,col;
float a = 5.0;
for (row=0; row< ny; row++){
for (col=0; col< new_nx; col++){
if (row == ny -1){ // last block
if (col%8 < (8 - nx%8)) *(add++) = ((float)rand()/(float)(RAND_MAX)) * a;
else *(add++) = 0;
}
else{
if (col%8 < block_x ) *(add++) = ((float)rand()/(float)(RAND_MAX)) * a;
else *(add++) = 0;
}
}
}
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny, int new_nx, int block_x){
float* ia = A, *ib =B, *ic =C;
for (int iy =0; iy<ny; iy++){
for (int ix =0; ix<new_nx; ix++){
if (iy== ny -1){ // last block
if (ix%8 < (8 - nx%8)) *(ic++) = *(ia++) + *(ib++);
else{
ia++;
ib++;
}
}
else{
if (ix%8 < block_x ) *(ic++) = *(ia++) + *(ib++);
else{
ia++;
ib++;
}
}
//if (iy*nx + ix == 67133440) printf("the addition in host: %.6f + %.6f = %.6f\n",ia[ix],ib[ix],ic[ix]);
}
}
}
void removePading(float* h_dC, float* h_temp_dC, int nx, int ny, int new_nx, int block_x){
int row,col;
int count=0;
float *r_padding = h_temp_dC, *r = h_dC;
for (row=0; row< ny; row++){
for (col=0; col< new_nx; col++){
if (row == ny -1){ // last block
if (col%8 < (8 - nx%8)){
r[count] = r_padding[row * new_nx + col];
count ++;
}
}
else{
if (col%8 < block_x ){
r[count] = r_padding[row * new_nx + col];
count ++;
}
}
}
}
}
*/
void initData(float* add, int noElems){
int i;
float a = 5.0;
for (i=0; i< noElems; i++){
*(add++) = ((float)rand()/(float)(RAND_MAX)) * a;
}
}
int main(int argc, char* argv[]){
if(argc != 3){
printf("Error: wrong number of argument\n");
exit(0);
}
int nx = atoi(argv[1]);
int ny = atoi(argv[2]);
// do the input argument check.
if(nx<=0 || ny<= 0){
printf("Error: input arguement can't be zero or negative\n");
exit(0);
}
int noElems = nx * ny;
int bytes = noElems * sizeof(float);
#ifdef DEBUG
printf("the input row # is %d\n",nx);
printf("the input col # is %d\n",ny);
printf("the noElems is %d\n",noElems);
printf("the bytes is %d\n",bytes);
#endif
// according to input dimension and GPU limitation, calculate the minmum ny;
int block_x, block_y, min_blocky = 1;
while ((ny + min_blocky-1)/min_blocky > 65535){
min_blocky ++;
}
block_y = min_blocky;
// according to minimum block_y and max of 1024 threads per block, calculate the maximum nx;
block_x = 1024 / block_y;
// alloc memeory host-side
float *h_A;
float *h_B;
float *h_dC;
float *h_hC = (float*) malloc(bytes); // host result
//pin memeory in host side
cudaHostAlloc((void**)&h_A, bytes, 0);
cudaHostAlloc((void**)&h_B, bytes, 0);
cudaHostAlloc((void**)&h_dC, bytes, 0);
// init matrices with random data
initData(h_A, noElems);
initData(h_B, noElems);
//alloc memeory device-side
float *d_A, *d_B, *d_C;
cudaMalloc( &d_A, bytes);
cudaMalloc( &d_B, bytes);
cudaMalloc( &d_C, bytes);
// getting host side result
h_addmat( h_A, h_B, h_hC, nx, ny) ;
int i;
// calculating minimum bytes each Stream should take according to the calculated block_y
int minimumBytesPerStream = nx * sizeof(float) * 4 * block_y;
while (minimumBytesPerStream < 4194304*16){ // 4194304 is when 1024(thread) * 2 (blocks/SMS) * 16 (SMS) * 4 (sizeof(Float)) * 2 (Two float number required for addition), we want data transfer is multiple of this number
minimumBytesPerStream = minimumBytesPerStream * 2;
}
// yPerStream is mutiple of 4 so every thread can process 4 different y in one stream
int yPerStream = minimumBytesPerStream/ nx;
// calculating bytes each Stream according to the calculated yPerStream
int bytesPerStream = nx * sizeof(float) * yPerStream;
// calculating number of Streams according to the calculated bytesPerStream
int NSTREAMS = bytes/bytesPerStream;
// if there is data remain where they are not multiple of bytesPerStream
int remainBytes = bytes%bytesPerStream;
// initialize the stream array
cudaStream_t stream[NSTREAMS+1];
// input the pre-calculated block size and calculate the grid size
dim3 block( block_x, block_y ) ; // you will want to configure this
dim3 grid( (nx + block.x-1)/block.x, (bytesPerStream/(sizeof(float) * nx) + block.y-1)/block.y ) ;
#ifdef DEBUG
printf("the final bytesPerStream is = %d\n", bytesPerStream);
printf("the remainBytes is = %d\n", remainBytes);
printf("the final block size is x = %d and y = %d \n",block_x, block_y);
printf("the final grid dimension is x = %d and y = %d \n",(nx + block_x-1)/block_x, (yPerStream + block.y-1)/block.y );
#endif
// initialize the event for calculating accumulate kernel time.
// NOTE: if we don't need to calculating the accumulate kernel time, the total time is at least 10% faster.
// But kernel time is important to show.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double timeStampA = getTimeStamp() ;
double timeStampB= getTimeStamp() ;
float milliseconds;
float AccumulateKernelTime = 0;
for(i = 1; i <=NSTREAMS; i++ ){
// create stream
cudaStreamCreate(&stream[i]);
//calculating offset
int offset = (i-1) * bytesPerStream/4;
//Asynch copy data from host to device
cudaMemcpyAsync(&d_A[offset],&h_A[offset],bytesPerStream, cudaMemcpyHostToDevice, stream[i]);
cudaMemcpyAsync(&d_B[offset],&h_B[offset],bytesPerStream, cudaMemcpyHostToDevice, stream[i]);
//record the timestamp before kernel invoke
cudaEventRecord(start);
//invoke kernel
f_addmat4<<<grid, block,0,stream[i]>>>( &d_A[offset], &d_B[offset], &d_C[offset], nx, bytesPerStream/(4* sizeof(float) * nx), bytesPerStream/(4* sizeof(float)) ) ;
//record the timestamp before kernel invoke
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// write down the difference
cudaEventElapsedTime(&milliseconds, start, stop);
// add this time to accumulated time
AccumulateKernelTime += milliseconds/1000;
//Asynch copy data from device back to host
cudaMemcpyAsync(&h_dC[offset],&d_C[offset],bytesPerStream, cudaMemcpyDeviceToHost,stream[i]);
}
// if there is remaining byte, we do the process one more time
if(remainBytes != 0){
int remainEle = remainBytes/4;
cudaStream_t last;
cudaStreamCreate(&last);
int offset = NSTREAMS * bytesPerStream/4;
cudaMemcpyAsync(&d_A[offset],&h_A[offset],remainBytes, cudaMemcpyHostToDevice, last);
cudaMemcpyAsync(&d_B[offset],&h_B[offset],remainBytes, cudaMemcpyHostToDevice, last);
dim3 grid( (nx + block.x-1)/block.x, (remainEle/nx + block.y-1)/block.y ) ;
cudaEventRecord(start);
f_addmat<<<grid, block,0,last>>>( &d_A[offset], &d_B[offset], &d_C[offset], nx, remainEle/nx ) ;
cudaEventRecord(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
AccumulateKernelTime += milliseconds/1000;
cudaMemcpyAsync(&h_dC[offset],&d_C[offset],remainBytes, cudaMemcpyDeviceToHost,last);
cudaStreamSynchronize(last);
}
double timeStampC = getTimeStamp() ;
//wait for all stream finish the job
for(i = 1; i <=NSTREAMS; i++ ){
cudaStreamSynchronize(stream[i]);
}
cudaDeviceSynchronize() ;
//time where device side jobs have been finished
double timeStampD = getTimeStamp() ;
// free some Host and GPU resources that are not needed anymore
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ;
#ifdef DEBUG
float * ptr;
int n;
ptr = h_dC;
n = 0;
ptr = ptr + n;
printf("the data of GPU at index %d before comparison is %.6f\n", n,*(ptr));
#endif
//h_compareResult compares the result computed by host and result computed by device
//if any element is not same, the function will return 1, otherwise print out the time
if (h_compareResult(h_hC,h_dC,noElems) == 1){
printf("Error: the two results don't match\n");
}
else{
//printf(" %.6f %.6f %.6f %.6f\n",timeStampD - timeStampA,timeStampB - timeStampA, AccumulateKernelTime, timeStampD - timeStampC );
printf(" %.6f %.6f %.6f %.6f\n",timeStampD - timeStampA,timeStampB - timeStampA, AccumulateKernelTime, timeStampD - timeStampC );
}
// free rest Host Side Resources
cudaFreeHost(h_dC);
free(h_hC);
cudaDeviceReset();
}
|
8,737 | #include<stdio.h>
__global__ void print_indexs() {
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d, blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d, blockDim.x: %d, blockDim.y: %d, blockDim.z: %d, gridDim.x: %d, gridDim.y: %d, gridDim.z: %d\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main() {
int nx = 16;
int ny = 16;
int nz = 16;
dim3 block(8,8,8);
dim3 grid(nx/block.x, ny/block.y, nz/block.z);
print_indexs<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
8,738 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<sys/time.h>
void safe_call(cudaError_t ret)
{
if(ret!=cudaSuccess)
{
printf("Error : %s\n",cudaGetErrorString(ret));
exit(-1);
}
}
void fill_vec(double *arr, int len)
{
int i;
for(i=0;i<len;i++)
arr[i] = drand48();
}
__global__ void vecvecadd(double *a, double *b, double *c, int len)
{
int i = blockDim.x*blockIdx.x+threadIdx.x;
if(i<len)
c[i] = a[i] + b[i];
}
int main(int argc, char **argv)
{
double *h_A, *h_B, *h_C;
double *d_A, *d_B, *d_C;
int veclen, i, blockSize, gridSize;
cudaEvent_t start,stop;
float diff;
double time,gflops;
if(argc!=3)
{
printf("Syntax : exec <veclen> <blocksize>\n");
exit(-1);
}
veclen = atoi(argv[1]);
blockSize = atoi(argv[2]);
gridSize = veclen/blockSize;
if(veclen%blockSize==0)
gridSize += 1;
safe_call(cudaEventCreate(&start));
safe_call(cudaEventCreate(&stop));
h_A = (double *) malloc(veclen*sizeof(double));
h_B = (double *) malloc(veclen*sizeof(double));
h_C = (double *) malloc(veclen*sizeof(double));
if(h_A==NULL || h_B==NULL || h_C==NULL)
{
printf("Error : host memory allocation\n");
exit(-1);
}
safe_call(cudaMalloc((void **)&d_A, veclen*sizeof(double)));
safe_call(cudaMalloc((void **)&d_B, veclen*sizeof(double)));
safe_call(cudaMalloc((void **)&d_C, veclen*sizeof(double)));
fill_vec(h_A,veclen);
fill_vec(h_B,veclen);
safe_call(cudaMemcpy((void *)d_A, (void *)h_A, veclen*sizeof(double), cudaMemcpyHostToDevice));
safe_call(cudaMemcpy((void *)d_B, (void *)h_B, veclen*sizeof(double), cudaMemcpyHostToDevice));
safe_call(cudaEventRecord(start, 0));
vecvecadd<<<gridSize,blockSize>>>(d_A,d_B,d_C,veclen);
safe_call(cudaEventRecord(stop, 0));
safe_call(cudaEventSynchronize(stop));
safe_call(cudaEventElapsedTime(&diff,start,stop));
time = diff*1.0e-3;
safe_call(cudaMemcpy((void *)h_C, (void *)d_C, veclen*sizeof(double), cudaMemcpyDeviceToHost));
for(i=0;i<veclen;i++)
if(h_C[i]!=(h_A[i]+h_B[i]))
{
printf("Error in calculation\n");
exit(-1);
}
safe_call(cudaFree(d_A));
safe_call(cudaFree(d_B));
safe_call(cudaFree(d_C));
free(h_A);
free(h_B);
free(h_C);
gflops=(1.0e-9 * (( 1.0 *veclen )/time));
printf("Success\nTime = %lfs\nGflops = %f\n",time,gflops);
return 0;
}
|
8,739 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
int csv_check(char* path)
{
if (access(path, F_OK))
{
printf("Cannot find the file at %s\n", path);
return -1;
}
if (access(path, R_OK))
{
printf("Cannot read the file at %s\n", path);
return -1;
}
return 0;
}
/* this flie takes three parameters:
* char* file: path to the txt file formatted in csv
* int** output: pointer stores data in the csv file
* int* line: pointer to line count
*/
int read_csv(char* file, int** output, int *line)
{
if (csv_check(file))
{
//error detected
printf("RuntimeError: Cannot access the file at %s\n", file);
return -1;
}
else
{
// check passed
// allocate memory
*line = 0;
// read the csv file
//printf("GOOD TILL HERE\n");
FILE* f = fopen(file, "r");
//printf("Still good\n");
char *tmp = (char *)calloc(10, sizeof(char));
char *token;
if (f)
{
//printf("test\n");
while(fgets(tmp, 10, f))
{
//printf("line=%d\n",*line);
//printf("boom\n");
*(output+*line) = (int *)calloc(3, sizeof(int));
token = strtok(tmp, ",");
//printf("line=%d\n",*line);
int i = 0;
while(token!=NULL)
{
*(*(output+*line)+i) = atoi(token);
token = strtok(NULL, ",");
i++;
}
*line = *line + 1;
}
}
fclose(f);
//printf("error\n");
tmp = NULL;
free(tmp);
return 0;
}
}
int read_csv_array(char* file, int* output, int *line)
{
if (csv_check(file))
{
printf("RuntimeError: Cannot access the file at %s\n", file);
return -1;
}
else
{
*line = 0;
FILE* f = fopen(file, "r");
char *tmp = (char *)calloc(10, sizeof(char));
char *token;
if (f)
{
while(fgets(tmp,10,f))
{
token = strtok(tmp, ",");
for(int i = 0; i<3; i++)
{
*(output + *line * 3 + i) = atoi(token);
token = strtok(NULL, ",");
}
*line = *line + 1;
}
}
fclose(f);
tmp = NULL;
free(tmp);
return 0;
}
}
int save(char* file, int* results, int size)
{
FILE *f = fopen(file, "w");
if (f)
{
for(int i=0; i<size; i++)
{
fprintf(f, "%d\n", *(results+i));
}
}
fclose(f);
return 0;
}
// an example of how to use this read_csv and save
/*
int main(){
char *test="../res/input_10000.txt";
int line = 0;
int **output = (int **)calloc(1000000, sizeof(int *));
read_csv(test, output, &line);
for(int i=0; i<line; i++){
printf("line %d: input1: %d, linput2: %d, type: %d\n", i, **(output+i), *(*(output+i)+1), *(*(output+i)+2));
}
printf("Total: %d\n", line);
output = NULL;
free(output);
int result[3] = {1,2,3};
save("./test.txt", result, 3);
return 0;
}
*/
|
8,740 | #include <stdio.h>
#include <time.h>
#include <cuda.h>
#define CHKCUDA(val) ({ \
cudaError_t v = (val); \
if (v != cudaSuccess) { \
const char *ename; \
ename = cudaGetErrorName(v); \
fprintf(stderr, "CUDA error %s(%d) at %s:%d\n", \
ename, v, __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
v;})
__global__ void kmain(int N, int *A, int *B, int *C) {
int idx;
int row, col;
int s, i;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N * N) {
return;
}
row = idx / N;
col = idx % N;
for( s = 0, i = 0; i < N;i++) {
s += A[row * N + i] * B[ i * N + col];
}
C[row * N + col] = s;
}
int main(int argc, char **argv) {
int N;
int *A, *B, *C;
int blocks;
int *dA, *dB, *dC;
int asize;
clock_t ts;
N = (argc > 1) ? atoi(argv[1]) : 512;
asize = N * N * sizeof (int);
A = (int *)malloc(asize);
B = (int *)malloc(asize);
C = (int *)malloc(asize);
printf("=== begin %s where N is %d === \n", argv[0], N);
ts = clock();
CHKCUDA(cudaMalloc(&dA, asize));
CHKCUDA(cudaMalloc(&dB, asize));
CHKCUDA(cudaMalloc(&dC, asize));
CHKCUDA(cudaMemcpy(dA, A, asize, cudaMemcpyHostToDevice));
CHKCUDA(cudaMemcpy(dB, B, asize, cudaMemcpyHostToDevice));
CHKCUDA(cudaMemcpy(dC, C, asize, cudaMemcpyHostToDevice));
blocks = N * N / 512;
kmain<<<blocks, 512>>>(N, dA, dB, dC);
CHKCUDA(cudaDeviceSynchronize());
CHKCUDA(cudaMemcpy(A, dA, asize, cudaMemcpyDeviceToHost));
CHKCUDA(cudaMemcpy(B, dB, asize, cudaMemcpyDeviceToHost));
CHKCUDA(cudaMemcpy(C, dC, asize, cudaMemcpyDeviceToHost));
CHKCUDA(cudaFree(dA));
CHKCUDA(cudaFree(dB));
CHKCUDA(cudaFree(dC));
printf("=== finished %s in %d ms ===\n",
argv[0],
(int)((clock() - ts) / (CLOCKS_PER_SEC/1000)));
free(A);
free(B);
free(C);
return 0;
}
|
8,741 | #include <stdio.h>
#define SIZE 1024
__global__ void markEven(int* nums, int* flags)
{
if(nums[threadIdx.x] & 1 == 0)
{
flags[threadIdx.x] = 1;
}
else
{
flags[threadIdx.x] = 0;
}
}
__global__ void scanSum(int* nums, int* c_nums)
{
extern __shared__ int sh_nums[];
int index = threadIdx.x;
if(index > 0)
{
sh_nums[index] = nums[index - 1];
}
else
{
sh_nums[index] = 0;
}
__syncthreads();
int offset = 2;
while(offset < blockDim.x)
{
int left = index - offset;
if(left >= 0)
{
sh_nums[index] += sh_nums[left];
}
offset <<= 1;
__syncthreads();
}
c_nums[index] = sh_nums[index];
}
__global__ void scatterAddress(int* nums, int* flags, int* address, int* filtered)
{
int index = threadIdx.x;
if(flags[index] == 1)
{
filtered[address[index]] = nums[index];
}
}
int main()
{
int* nums;
nums = (int*) malloc(sizeof(int) * SIZE);
for(int i = 0; i < SIZE; i++)
{
nums[i] = i;
}
int* d_nums;
cudaMalloc(&d_nums, sizeof(int) * SIZE);
int* d_flags;
cudaMalloc(&d_flags, sizeof(int) * SIZE);
int* d_address;
cudaMalloc(&d_address, sizeof(int) * SIZE);
cudaMemcpy(d_nums, nums, sizeof(int) * SIZE, cudaMemcpyHostToDevice);
markEven<<<1, SIZE, sizeof(int) * SIZE>>>(d_nums, d_flags);
scanSum<<<1, SIZE, sizeof(int) * SIZE>>>(d_flags, d_address);
int* filter_size;
filter_size = (int*) malloc(sizeof(int));
cudaMemcpy(filter_size, &d_address[SIZE - 1], sizeof(int), cudaMemcpyDeviceToHost);
int* filtered;
filtered = (int*) malloc(sizeof(int) * filter_size[0]);
int* d_filtered;
cudaMalloc(&d_filtered, sizeof(int) * filter_size[0]);
scatterAddress<<<1, SIZE>>>(d_nums, d_flags, d_address, d_filtered);
cudaMemcpy(filtered, d_filtered, sizeof(int) * filter_size[0], cudaMemcpyDeviceToHost);
for(int i = 0; i < filter_size[0]; i++)
{
printf("%d \n", filtered[i]);
}
}
|
8,742 | #include<stdio.h>
//compilar: nvcc hello.cu -o hello
__device__ const char *STR = "Hello world!";
const char STR_LENGHT = 12;
__global__ void hello(){
printf("%c\n", STR[threadIdx.x % STR_LENGHT]);
}
int main(void){
int num_threads = STR_LENGHT;
int num_blocks = 2;
dim3 dimBlock(16,16);
dim3 dimGrid(32,32);
//hello<<<dimGrid,dimBlock>>>();
hello<<<1,12>>>();
cudaDeviceSynchronize();
return 0;
}
|
8,743 | #include <stdio.h>
/**
* Function Name:
* distance()
* distance() returns the Euclidean distance between two pixels. This function is executed on CUDA device
*
* @param[in] p1 coordinates of pixel one, p1[0] is for row number, p1[1] is for column number
* @param[in] p2 coordinates of pixel two, p2[0] is for row number, p2[1] is for column number
* @return return distance between p1 and p2
*/
__device__ float distance( int p1[], int p2[] ){
return 0.0;
}
//idk why put this will only run with a global
//variables with d_ are input pointers and the _o is going to be our output array.
//this format will connect our main with the functions
__global__ void drawEdgeCUDA(int *d_pixels, char **d_header, int *o_pixels, int numRows, int numCols, int edgeWidth ){
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
}//end CUDA EDGE
//Both line and circle. Look at your headers and inputs before working.
__global__ void drawCircleCUDA(int *d_pixels, char **d_header, int *o_pixels, int numRows, int numCols, int centerRow, int centerCol, int radius){
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
}//end CUDACIRCLE
__global__ void drawLineCUDA(int *d_pixels, char **d_header, int *o_pixels, int numRows, int numCols, int p1row, int p1col, int p2row, int p2col){
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
}//end CUDA LINE |
8,744 | struct ArrayListNewCudaPixelData
{
__device__ ArrayListNewCudaPixelData()
{
}
unsigned char blue;
unsigned char green;
unsigned char red;
unsigned char alpha;
};
// H264Images.ArrayListNewCuda
extern "C" __global__ void calGPU( unsigned char* dev_bitmap1, int dev_bitmap1Len0, unsigned char* dev_bitmap2, int dev_bitmap2Len0, unsigned char* dev_result, int dev_resultLen0, int* imageWidth, int imageWidthLen0, int* count, int countLen0, int* possition, int possitionLen0);
// H264Images.ArrayListNewCuda
extern "C" __global__ void calGPU( unsigned char* dev_bitmap1, int dev_bitmap1Len0, unsigned char* dev_bitmap2, int dev_bitmap2Len0, unsigned char* dev_result, int dev_resultLen0, int* imageWidth, int imageWidthLen0, int* count, int countLen0, int* possition, int possitionLen0)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int array[2];
int arrayLen0 = 2;
array[(0)] = 0;
array[(1)] = 0;
for (i = 0; i < imageWidth[(1)]; i++)
{
for (j = 0; j < imageWidth[(0)]; j++)
{
int num = (i * imageWidth[(0)] + j) * 4;
ArrayListNewCudaPixelData pixelData = ArrayListNewCudaPixelData();
ArrayListNewCudaPixelData pixelData2 = ArrayListNewCudaPixelData();
pixelData.red = dev_bitmap1[(num + 2)];
pixelData.green = dev_bitmap1[(num + 1)];
pixelData.blue = dev_bitmap1[(num)];
pixelData.alpha = dev_bitmap1[(num + 3)];
pixelData2.green = dev_bitmap2[(num + 1)];
pixelData2.red = dev_bitmap2[(num + 2)];
pixelData2.blue = dev_bitmap2[(num)];
pixelData2.alpha = dev_bitmap2[(num + 3)];
bool flag = pixelData.red > pixelData2.red;
int num2;
if (flag)
{
num2 = (int)(pixelData.red - pixelData2.red);
}
else
{
num2 = (int)(pixelData2.red - pixelData.red);
}
bool flag2 = pixelData.alpha > pixelData2.alpha;
int num3;
if (flag2)
{
num3 = (int)(pixelData.alpha - pixelData2.alpha);
}
else
{
num3 = (int)(pixelData2.alpha - pixelData.alpha);
}
bool flag3 = pixelData.green > pixelData2.green;
int num4;
if (flag3)
{
num4 = (int)(pixelData.green - pixelData2.green);
}
else
{
num4 = (int)(pixelData2.green - pixelData.green);
}
bool flag4 = pixelData.blue > pixelData2.blue;
int num5;
if (flag4)
{
num5 = (int)(pixelData.blue - pixelData2.blue);
}
else
{
num5 = (int)(pixelData2.blue - pixelData.blue);
}
bool flag5 = num2 > 8 || num3 > 8 || num4 > 8 || num5 > 8;
if (flag5)
{
int* expr_1F9_cp_0 = array;
int expr_1F9_cp_1 = 1;
int num6 = expr_1F9_cp_0[(expr_1F9_cp_1)];
expr_1F9_cp_0[(expr_1F9_cp_1)] = num6 + 1;
possition[(num6)] = i;
int* expr_20F_cp_0 = array;
int expr_20F_cp_1 = 1;
num6 = expr_20F_cp_0[(expr_20F_cp_1)];
expr_20F_cp_0[(expr_20F_cp_1)] = num6 + 1;
possition[(num6)] = j;
int* expr_224_cp_0 = array;
int expr_224_cp_1 = 0;
num6 = expr_224_cp_0[(expr_224_cp_1)];
expr_224_cp_0[(expr_224_cp_1)] = num6 + 1;
dev_result[(num6)] = pixelData2.blue;
int* expr_23F_cp_0 = array;
int expr_23F_cp_1 = 0;
num6 = expr_23F_cp_0[(expr_23F_cp_1)];
expr_23F_cp_0[(expr_23F_cp_1)] = num6 + 1;
dev_result[(num6)] = pixelData2.green;
int* expr_25A_cp_0 = array;
int expr_25A_cp_1 = 0;
num6 = expr_25A_cp_0[(expr_25A_cp_1)];
expr_25A_cp_0[(expr_25A_cp_1)] = num6 + 1;
dev_result[(num6)] = pixelData2.red;
int* expr_275_cp_0 = array;
int expr_275_cp_1 = 0;
num6 = expr_275_cp_0[(expr_275_cp_1)];
expr_275_cp_0[(expr_275_cp_1)] = num6 + 1;
dev_result[(num6)] = pixelData2.alpha;
count[(1)] = array[(1)];
count[(0)] = array[(0)];
}
}
}
}
|
8,745 | #include "includes.h"
__global__ void cuda_conv2D_deactivate(double* err, const double* net, const double* activation, size_t outputs)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= outputs)
return;
err[id] *= (1.0 - activation[id] * activation[id]);
} |
8,746 | __global__ void vector_add(float *out, float *a, float *b, int n) {
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
} |
8,747 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
// zwykła funkcja w C/C++
void incrementArrayOnHost(double *tab, int N)
{
for (int i=0; i < N; i++)
tab[i] += 1.0;
}
// funkcja (tzw. kernel) działająca na GPU
__global__ void incrementArrayOnDevice(double *tab, int N)
{
int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
// if (idx < N && blockIdx.y > 0)
// printf (" %d %d %d %d %d %d \n", idx, gridDim.x, blockDim.x, N, blockIdx.x, threadIdx.x);
if (idx<N)
tab[idx] += 1.0;
}
int main(void)
{
const int N = 100000000;
printf("N = %d\n", N);
double *a_h, *b_h; // wskaźniki na pamięć na CPU (host)
double *a_d; // wskaźnik na bufor w GPU (device)
// przydział pamięci na CPU
a_h = new double [N];
b_h = new double [N];
// przydział pamięci na GPU
cudaMalloc((void **) &a_d, sizeof(double)*N);
// inicjalizacja danych na CPU
for (int i=0; i<N; i++)
{
a_h[i] = i + 1.0;
b_h[i] = 0;
}
// przesłąnie danych na GPU: a_h -> a_d
cudaMemcpy(a_d, a_h, sizeof(double)*N, cudaMemcpyDefault );
// robimy jakieś obliczenia na CPU
incrementArrayOnHost(a_h, N);
// a teraz próbujemy zrobić to samo na GPU
dim3 blockSize = 512;
dim3 gridSize (1,1,1);
const int max_block_size = 65535;
int nBlocks = N/blockSize.x + (N%blockSize.x == 0 ? 0 : 1);
gridSize.y = 1 + nBlocks/max_block_size;
gridSize.x = (nBlocks > max_block_size) ? max_block_size : nBlocks;
printf("%d %d\n", gridSize.x, gridSize.y);
// wywołujemy kernel na GPU
incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N);
// kopiujemy wynik z GPU do CPU
cudaDeviceSynchronize();
cudaMemcpy(b_h, a_d, sizeof(double)*N, cudaMemcpyDefault);
// sprawdzamy wynik
for (int i=0; i<N; i++)
if (a_h[i] != b_h[i] && i < 100)
{
printf ("i=%d %g %g\n", i, a_h[i], b_h[i]);
assert(a_h[i] == b_h[i]);
}
// sprzątamy
delete [] a_h;
delete [] b_h;
cudaFree(a_d);
printf("Jeżeli widzisz ten napis, to program działa poprawnie\n");
}
|
8,748 | // Copyright (c) Megvii Inc. All rights reserved.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define THREADS_BLOCK_X 32
#define THREADS_BLOCK_Y 4
#define THREADS_PER_BLOCK THREADS_BLOCK_X * THREADS_BLOCK_Y
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void voxel_pooling_forward_kernel(int batch_size, int num_points, int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z, const int *geom_xyz, const float *input_features,
float *output_features, int *pos_memo) {
const int bidx = blockIdx.x;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int sample_dim = THREADS_PER_BLOCK;
const int idx_in_block = tidy * THREADS_BLOCK_X + tidx;
const int block_sample_idx = bidx * sample_dim;
const int thread_sample_idx = block_sample_idx + idx_in_block;
const int total_samples = batch_size * num_points;
__shared__ int geom_xyz_shared[THREADS_PER_BLOCK * 3];
if (thread_sample_idx < total_samples) {
const int sample_x = geom_xyz[thread_sample_idx * 3 + 0];
const int sample_y = geom_xyz[thread_sample_idx * 3 + 1];
const int sample_z = geom_xyz[thread_sample_idx * 3 + 2];
geom_xyz_shared[idx_in_block * 3 + 0] = sample_x;
geom_xyz_shared[idx_in_block * 3 + 1] = sample_y;
geom_xyz_shared[idx_in_block * 3 + 2] = sample_z;
if ((sample_x >= 0 && sample_x < num_voxel_x) && (sample_y >= 0 && sample_y < num_voxel_y) && (sample_z >= 0 && sample_z < num_voxel_z)) {
pos_memo[thread_sample_idx * 3 + 0] = thread_sample_idx / num_points;
pos_memo[thread_sample_idx * 3 + 1] = sample_y;
pos_memo[thread_sample_idx * 3 + 2] = sample_x;
}
}
__syncthreads();
for (int i = tidy; i < THREADS_PER_BLOCK && block_sample_idx + i < total_samples; i += THREADS_BLOCK_Y) {
const int sample_x = geom_xyz_shared[i * 3 + 0];
const int sample_y = geom_xyz_shared[i * 3 + 1];
const int sample_z = geom_xyz_shared[i * 3 + 2];
if (sample_x < 0 || sample_x >= num_voxel_x || sample_y < 0 || sample_y >= num_voxel_y || sample_z < 0 || sample_z >= num_voxel_z) {
continue;
}
const int batch_idx = (block_sample_idx + i) / num_points;
for (int j = tidx; j < num_channels; j += THREADS_BLOCK_X) {
atomicAdd(&output_features[(batch_idx * num_voxel_y * num_voxel_x + sample_y * num_voxel_x + sample_x) * num_channels + j], input_features[(block_sample_idx + i) * num_channels + j]);
}
}
}
void voxel_pooling_forward_kernel_launcher(int batch_size, int num_points, int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z, const int *geom_xyz, const float *input_features,
float *output_features, int *pos_memo, cudaStream_t stream) {
cudaError_t err;
dim3 blocks(DIVUP(batch_size * num_points, THREADS_PER_BLOCK));
dim3 threads(THREADS_BLOCK_X, THREADS_BLOCK_Y);
voxel_pooling_forward_kernel<<<blocks, threads, 0, stream>>>(batch_size, num_points, num_channels, num_voxel_x, num_voxel_y, num_voxel_z, geom_xyz, input_features, output_features, pos_memo);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
8,749 | #include <iostream>
#include <time.h>
#include <string.h>
#include <chrono>
using namespace std;
#define Threads 32
//This method return the matrix L and U of a LU factorization
__global__ void cuda_LU_factorization(float *U, float *L, int filas, int columnas, int selected_row, int selected_col)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
if((col < columnas && fil < filas) && (fil > selected_row && col > (selected_col-1)))
{
int index_selected_row = (columnas*selected_row)+col;
int index_selected_col = (columnas*fil)+selected_col;
int index_kk = (columnas*selected_row)+selected_col;
int index = (columnas*fil)+col;
if(col == selected_col)
{
L[index] = U[index_selected_col]/U[index_kk];
}
U[index] = U[index]-U[index_selected_row]*U[index_selected_col]/U[index_kk];
//L[index] = U[index]+L[index];
}
}
__global__ void cuda_permutationP(float *A, float *P, int filas, int columnas, int selected_row, int selected_col, int *dev_per)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
int index_kk = (columnas*selected_row)+selected_col;
if((A[index_kk] == 0 && col == selected_col) && (col < columnas && fil < filas))
{
if(fil == selected_row)
*dev_per = fil;
}
}
__global__ void cuda_solve_Lx(float *L, float *B, int filas, int columnas, int selected_row, int selected_col)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
for(int selected=0; selected<filas-1; selected++)
{
selected_row = selected;
selected_col = selected;
if((col < columnas && fil < filas) && (fil > selected_row && col > (selected_col-1)))
{
//Index for Matrix L
int index_selected_row = (columnas*selected_row)+col;
int index_selected_col = (columnas*fil)+selected_col;
int index_kk = (columnas*selected_row)+selected_col;
int index = (columnas*fil)+col;
//Index for Matrix B
if(fil > selected_row && col == selected_col)
{
int indexB = fil;
B[indexB] =B [indexB]-B[col]*L[index_selected_col]/L[index_kk];
}
L[index] = L[index]-L[index_selected_row]*L[index_selected_col]/L[index_kk];
}
}
}
/*__global__ void cuda_solve_Lx(float *L, float *B, int filas, int columnas, int selected_row, int selected_col)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
if((col < columnas && fil < filas) && (fil > selected_row && col > (selected_col-1)))
{
//Index for Matrix L
int index_selected_row = (columnas*selected_row)+col;
int index_selected_col = (columnas*fil)+selected_col;
int index_kk = (columnas*selected_row)+selected_col;
int index = (columnas*fil)+col;
//Index for Matrix B
if(fil > selected_row && col == selected_col)
{
int indexB = fil;
B[indexB] =B [indexB]-B[col]*L[index_selected_col]/L[index_kk];
}
L[index] = L[index]-L[index_selected_row]*L[index_selected_col]/L[index_kk];
}
}*/
__global__ void cuda_solve_Ux(float *U, float *B, int filas, int columnas, int selected_row, int selected_col)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
for(int selected=filas-1; selected>=0; selected--)
{
selected_row = selected;
selected_col = selected;
if((col < columnas && fil < filas) && (fil < selected_row && col < (selected_col+1)))
{
int index_selected_row = (columnas*selected_row)+col;
int index_selected_col = (columnas*fil)+selected_col;
int index_kk = (columnas*selected_row)+selected_col;
int index = (columnas*fil)+col;
if(fil<selected_row && col == selected_col)
{
int indexB = fil;
//B[indexB] = B[col];
//B[indexB] = B[col];
B[indexB] = B[indexB]-B[col]*U[index_selected_col]/U[index_kk];;
}
U[index] = U[index]-U[index_selected_row]*U[index_selected_col]/U[index_kk];
//L[index] = U[index]+L[index];
}
}
}
/*__global__ void cuda_solve_Ux(float *U, float *B, int filas, int columnas, int selected_row, int selected_col)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
if((col < columnas && fil < filas) && (fil < selected_row && col < (selected_col+1)))
{
int index_selected_row = (columnas*selected_row)+col;
int index_selected_col = (columnas*fil)+selected_col;
int index_kk = (columnas*selected_row)+selected_col;
int index = (columnas*fil)+col;
if(fil<selected_row && col == selected_col)
{
int indexB = fil;
//B[indexB] = B[col];
//B[indexB] = B[col];
B[indexB] = B[indexB]-B[col]*U[index_selected_col]/U[index_kk];;
}
U[index] = U[index]-U[index_selected_row]*U[index_selected_col]/U[index_kk];
//L[index] = U[index]+L[index];
}
}*/
__global__ void cuda_reduce_U(float *U, float *B, int filas, int columnas)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
if((col < columnas && fil < filas) && (fil == col))
{
int index = (columnas*fil)+col;
B[fil] = B[fil]/U[index];
U[index] = U[index]/U[index];
}
}
int main()
{
srand(time(NULL));
int filas = 4;
int columnas = 4;
int N = filas;
float *L = (float *)malloc(N * N * sizeof(float));
float *xB = (float *)malloc(1 * N * sizeof(float));
float *a = (float *)malloc(N * N * sizeof(float));
float *P = (float *)malloc(N * N * sizeof(float));
/*xB[0] = -1;
xB[1] = 3;
xB[2] = 2;
xB[3] = 5;*/
for(int i=0; i<N; i++)
{
xB[i] = rand()%5+1;
}
//Create matrix L
for (int i=0; i<N; i++) {
L[i] = 0.0f;
for (int j=0; j<N; j++)
if (i == j)
{
L[i * N + j] = 1.0f;
}
}
//srand(time(NULL));
//Create Matrix A
float *dev_U;
float *dev_L;
float *dev_B;
for(int i=0; i<filas; i++)
{
for(int j=0; j<columnas; j++)
{
a[i*N+j] = rand()%10+1;
cout << a[i*N+j] << " - ";
}
cout << endl;
}
//a[0] = 0;
/*for(int i=0; i<filas; i++)
{
for(int j=0; j<columnas; j++)
{
cout << a[i][j] << " - ";
}
cout << endl;
}*/
cudaMalloc((void**) &dev_U, filas*columnas*sizeof(float));
cudaMalloc((void**) &dev_L, filas*columnas*sizeof(float));
cudaMalloc((void**) &dev_B, filas*1*sizeof(float));
cudaMemcpy(dev_U, a, filas*columnas*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_L, L, filas*columnas*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, xB, filas*1*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimThreadsBloque(Threads, Threads);
float BFloat = (float) columnas / (float) Threads;
int B = (int) ceil(BFloat);
// El grid tendrá B número de bloques en x y y
dim3 dimBloques(B, B);
//int *dev_per;
//cudaMalloc((void**) &dev_per, 1*sizeof(int));
auto t11 = std::chrono::high_resolution_clock::now();
//LU factorization
for(int selected=0; selected<filas-1; selected++)
{
cuda_LU_factorization<<<dimBloques, dimThreadsBloque>>>(dev_U, dev_L, filas, columnas, selected, selected);
}
/*for(int selected=0; selected<filas-1; selected++)
{
}*/
cuda_solve_Lx<<<dimBloques, dimThreadsBloque>>>(dev_L, dev_B, filas, columnas, 0, 0);
/*for(int selected=filas-1; selected>=0; selected--)
{
}*/
cuda_solve_Ux<<<dimBloques, dimThreadsBloque>>>(dev_U, dev_B, filas, columnas, 0, 0);
//Final reduce U
cuda_reduce_U<<<dimBloques, dimThreadsBloque>>>(dev_U, dev_B, filas, columnas);
auto t12 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count();
cout << "Time to gauss elimination: " << duration << endl;
cudaMemcpy(a, dev_U, filas*columnas*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(L, dev_L, filas*columnas*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(xB, dev_B, filas*1*sizeof(float), cudaMemcpyDeviceToHost);
//auto t12 = std::chrono::high_resolution_clock::now();
cudaFree(dev_U);
cudaFree(dev_L);
cout << "print U: " << endl;
for(int i=0; i<filas; i++)
{
for(int j=0; j<columnas; j++)
{
cout << a[i*N+j] << " - ";
}
cout << endl;
}
cout << "print L: " << endl;
for(int i=0; i<filas; i++)
{
for(int j=0; j<columnas; j++)
{
cout << L[i*N+j] << " - ";
}
cout << endl;
}
cout << "print B: " << endl;
for(int i=0; i<filas; i++)
{
cout << xB[i] << endl;
}
//cout << std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count() << endl;
return 0;
} |
8,750 | #include "includes.h"
__global__ void __radixcounts(float *a, int n, int digit, unsigned int *bi) {
__shared__ unsigned int ic[RNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int tid = threadIdx.x;
int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK);
for (int i = istart; i < iend; i += RBIGBLK) {
__syncthreads();
ic[threadIdx.x] = 0;
__syncthreads();
for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) {
float v = a[j];
unsigned char *cv = (unsigned char *)&v;
atomicInc(&ic[cv[digit]], 65536*32767);
}
__syncthreads();
bi[bibase + threadIdx.x] = ic[threadIdx.x];
bibase += RNDVALS;
}
} |
8,751 | extern "C"
__global__ void cudaGetNeighbors(float* xs, float* ys, int* vis, int len, int* neighbors, double minEps, int minPts) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int src;
unsigned int dest;
unsigned int point_id = tid;
unsigned int neighborscnt;
while (point_id < len * len) {
src = point_id / len;
dest = point_id % len;
float dist;
if (src <= dest) {
float srcX = xs[src];
float destX = xs[dest];
float srcY = ys[src];
float destY = ys[dest];
float xRes = srcX - destX;
float yRes = srcY - destY;
dist = xRes * xRes + yRes * yRes;
if (dist <= minEps * minEps) {
neighbors[point_id] = 1;
}
neighbors[dest * len + src] = neighbors[point_id];
}
point_id += blockDim.x * gridDim.x;
}
__syncthreads();
point_id = tid;
while (point_id < len) {
neighborscnt = 1;
src = point_id * len;
for (int i = 0; i < len; i++) {
if (point_id != i) {
if (neighbors[src + i]) {
neighborscnt++;
}
}
}
if (neighborscnt >= minPts) {
vis[point_id]++;
}
point_id += blockDim.x * gridDim.x;
}
}
|
8,752 | #include "includes.h"
__global__ void linearLayerUpdateWeights( float* dZ, float* A, float* W, int dZ_x_dim, int dZ_y_dim, int A_x_dim, int A_y_dim, float learning_rate) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
// A is treated as transposed
int W_x_dim = A_y_dim;
int W_y_dim = dZ_y_dim;
float dW_value = 0.0f;
if (row < W_y_dim && col < W_x_dim) {
for (int i = 0; i < dZ_x_dim; i++) {
dW_value += dZ[row * dZ_x_dim + i] * A[col * A_x_dim + i];
}
W[row * W_x_dim + col] = W[row * W_x_dim + col] - learning_rate * (dW_value / A_x_dim);
}
} |
8,753 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 20000
#define GRID_D1 20
#define GRID_D2 2
#define BLOCK_D1 512
#define BLOCK_D2 1
#define BLOCK_D3 1
// this is the kernel function called for each thread
// we use the CUDA variables {threadIdx, blockIdx, blockDim, gridDim} to determine a unique ID for each thread
__global__ void hello(void)
{
// id of the block
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
// size of each block (within grid of blocks)
int blocksize = blockDim.x * blockDim.y * blockDim.z;
// id of thread in a given block
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// assign overall id/index of the thread
int idx = myblock * blocksize + subthread;
if(idx < 2000 || idx > 19000) {
// print buffer from within the kernel is limited so only print for first and last chunks of thread
if (idx < N){
printf("Hello World! My block index is (%d, %d) [Grid dims=(%d,%d)], 3D-thread index within blocks=(%d,%d,%d) => \
thread index=%d\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.z, idx);
} else {
printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => \
thread index=%d [### this thread would not be used for N=%d ###]\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y,
threadIdx.x, threadIdx.y, threadIdx.z, idx, N);
}
}
}
int main(int argc, char **argv)
{
// objects containing the block and grid info
const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3);
const dim3 gridSize(GRID_D1, GRID_D2, 1);
int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*GRID_D1*GRID_D2;
if (nthreads < N){
printf("\n================ NOT ENOUGH THREADS TO COVER N=%d ===================\n\n",N);
} else {
printf("Launching %d threads (N=%d)\n", nthreads, N);
}
// launch the kernel on the specified grid of thread blocks
hello<<<gridSize, blockSize>>>();
// Need to flush prints, otherwise none of the prints from within the kernel will show up
// as program exit does not flush the print buffer.
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
} else {
printf("kernel launch success!\n");
}
printf("That's all!\n");
return 0;
}
|
8,754 |
#include <stdio.h>
#include <cuda.h>
#define N 32
__global__ void square( unsigned *h_matrix ){
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
h_matrix[id] = id * id;
}
int main() {
dim3 block(N, N);
//creating the matrix variables
unsigned *matrix, *h_matrix;
//initializing the variables
matrix = (unsigned *)malloc(N * N * sizeof(unsigned));
cudaMalloc(&h_matrix, N * N * sizeof(unsigned));
//squaring the matrix
square<<<1, block>>>(h_matrix);
//copying the memory over
cudaMemcpy( matrix, h_matrix, N * N * sizeof(unsigned), cudaMemcpyDeviceToHost );
//print out the matrix
for( int i = 0; i < N; i++ ){
for( int j = 0; j < N; j++ ){
printf( "%d\t", matrix[ i * N + j ] );
}
printf( "\n" );
}
return 0;
}
|
8,755 | //fail
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__global__ void foo(int* p) {
p[threadIdx.x] = 0;
}
|
8,756 | #include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <time.h>
using namespace std;
#define MRows 1600
#define MCols 1600
#define NRows 1600
#define NCols 1500
#define PRows 1600
#define PCols 1500
#define H 10
#define W 10
#define TILE_WIDTH 32
// Mulitiplicacion de matrices en paralelo con TILES
__global__ void MultTiled(float *M, float *N, float *P) {
// Definimos los tiles para ambas matrices
__shared__ int ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float PValue = 0.0;
for (int p = 0; p < MCols / TILE_WIDTH; p++) {
// Verficamos que el elemento este dentro de la matriz M
if (Row < MRows && (p * TILE_WIDTH + tx) < MCols)
ds_M[ty][tx] = M[Row * MCols + (p * TILE_WIDTH + tx)];
else
// Si no esta dentro de la matriz se asigna un 0
ds_M[ty][tx] = 0.0;
// Verficamos que el elemento este dentro de la matriz N
if (Col < NCols && (p * TILE_WIDTH + ty) < MCols)
ds_N[ty][tx] = N[(p * TILE_WIDTH + ty) * NCols + Col];
else
// Si no esta dentro de la matriz se asigna un 0
ds_N[ty][tx] = 0.0;
__syncthreads();
// Realiza la operacion de multiplicacion con los valores que hay dentro del TILE
for (int n = 0; n < TILE_WIDTH; ++n)
PValue += ds_M[ty][n] * ds_N[n][tx];
__syncthreads();
}
// Guardamos los valores calculados en la multiplicacion en la matriz de resultados
if (Row < PRows && Col < PCols)
P[(Row * PCols) + Col] = PValue;
}
// Multiplicacion de matrices en paralelo
__global__ void multMatCUDA(float *d_a, float *d_b, float *d_c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < PRows && col < PCols) {
float result = 0;
for (int k = 0; k < MCols; k++) {
result += d_a[row * MCols + k] * d_b[k * NCols + col];
}
d_c[row * PCols + col] = result;
}
}
// Multiplicacion de matrices secuencialmente
void multMat(float *h_a, float *h_b, float *h_c){
for(int i = 0; i < PRows; i++){
for(int j = 0; j < PCols; j++){
float result = 0;
for(int k = 0; k < MCols; k++){
result += h_a[i * MCols + k] * h_b[k * NCols + j];
}
h_c[i * PCols + j] = result;
}
}
}
// Compara si dos matrices son iguales
bool compareTo(float *h_c,float *h_result){
for(int i = 0; i < PRows; i++){
for(int j = 0; j < PCols; j++){
if(h_c[i * PCols + j] != h_result[i * PCols + j]){
return false;
}
}
}
return true;
}
// Imprime los valores de una matriz
void printMatrix(float *result, int R, int C) {
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
cout << result[i * C + j] << " ";
}
cout << endl;
}
}
int main() {
clock_t start, end;
double gpu_time_used, tiles_time_used, cpu_time_used;
float blockSize = 32;
float *h_a, *h_b, *h_c, *h_result, *M, *N, *P, *t_result;
// Asignar memoria en el host
h_a = (float *)malloc(sizeof(float) * MRows * MCols);
h_b = (float *)malloc(sizeof(float) * NRows * NCols);
h_c = (float *)malloc(sizeof(float) * PRows * PCols);
h_result = (float *)malloc(sizeof(float) * PRows * PCols);
t_result = (float *)malloc(sizeof(float) * PRows * PCols);
// Inicializar la primer matriz
for (int i = 0; i < MRows; i++) {
for (int j = 0; j < MCols; j++) {
h_a[i * MCols + j] = 1.0;
}
}
// Inicializar la segunda matriz
for (int i = 0; i < NRows; i++) {
for (int j = 0; j < NCols; j++) {
h_b[i * NCols + j] = 1.0;
}
}
// Llamado a la multiplicacion de matrices secuencial
start = clock();
multMat(h_a, h_b, h_c);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
// Asignacion de memoria en el device
cudaMalloc(&M, sizeof(float) * MRows * MCols);
cudaMalloc(&N, sizeof(float) * NRows * NCols);
cudaMalloc(&P, sizeof(float) * PRows * PCols);
// Copiar los datos del host al device
cudaMemcpy(M, h_a, MRows * MCols * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(N, h_b, NRows * NCols * sizeof(float), cudaMemcpyHostToDevice);
// Se definen el numero de bloques y el numero de hilos por bloque
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(PCols / float(blockSize)), ceil(PRows / float(blockSize)),
1);
// Llamado a la multiplicacion de matrices en paralelo
// start = clock();
// multMatCUDA<<<dimGrid, dimBlock>>>(M, N, P);
// cudaMemcpy(h_result, P, PRows * PCols * sizeof(float),
// cudaMemcpyDeviceToHost);
// end = clock();
// gpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
// printf("Tiempo invertido GPU = %lf s\n", gpu_time_used);
// Llamado a la multiplicacion de matrices en paralelo con TILES
start = clock();
MultTiled<<<dimGrid, dimBlock>>>(M, N, P);
cudaDeviceSynchronize();
cudaMemcpy(t_result, P, PRows * PCols * sizeof(float),
cudaMemcpyDeviceToHost);
end = clock();
tiles_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido GPU con Tiles = %lf s\n", tiles_time_used);
// Comparar si las matrices resultantes son iguales
// printMatrix(h_c, PRows, PCols);
// printMatrix(t_result, PRows, PCols);
if (compareTo(h_c, t_result)) {
printf("Matrices Iguales");
} else {
printf("Matrices Diferentes");
}
// Liberar memoria en el device y en el host
cudaFree(M);
cudaFree(N);
cudaFree(P);
free(h_a);
free(h_b);
free(h_c);
free(h_result);
free(t_result);
return 0;
} |
8,757 | #include <stdio.h>
#include <assert.h>
#include <math_constants.h>
#include <stdint.h>
#include <unistd.h>
__global__ void census(float *x0, float *x1, float *output, int size, int num_channels, int size2, int size3, int wnd_half, float bnd_const)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int id = blockId * blockDim.x + threadIdx.x;
if (id < size) {
int x = blockIdx.x;
int y = blockIdx.y;
int d = -threadIdx.x;
float dist;
if (0 <= x + d && x + d < size3) {
dist = 0;
for (int i = 0; i < num_channels; i++) {
int ind_p = (i * size2 + y) * size3 + x;
for (int yy = y - wnd_half; yy <= y + wnd_half; yy++) {
for (int xx = x - wnd_half; xx <= x + wnd_half; xx++) {
if (0 <= xx && xx < size3 && 0 <= xx + d && xx + d < size3 && 0 <= yy && yy < size2) {
int ind_q = (i * size2 + yy) * size3 + xx;
if ((x0[ind_q] < x0[ind_p]) != (x1[ind_q + d] < x1[ind_p + d])) {
dist++;
}
} else {
dist++;
}
}
}
}
dist /= num_channels;
} else {
dist = bnd_const;
}
output[id] = dist;
}
}
__global__ void sad_color(float *x0, float *x1, float *output, int size, int size2, int size3, int wnd_half, float bnd_const)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int id = blockId * blockDim.x + threadIdx.x;
int num_channels = 1;
if (id < size) {
int x = blockIdx.x;
int y = blockIdx.y;
int d = -threadIdx.x;
float dist;
if (0 <= x + d && x + d < size3) {
dist = 0;
for (int i = 0; i < num_channels; i++) {
int ind_p = (i * size2 + y) * size3 + x;
for (int yy = y - wnd_half; yy <= y + wnd_half; yy++) {
for (int xx = x - wnd_half; xx <= x + wnd_half; xx++) {
if (0 <= xx && xx < size3 && 0 <= xx + d && xx + d < size3 && 0 <= yy && yy < size2) {
int ind_q = (i * size2 + yy) * size3 + xx;
dist += abs(x0[ind_p] - x1[ind_q + d]);
}
}
}
}
} else {
dist = bnd_const;
}
output[id] = dist;
}
}
__global__ void linear_comb(float *inp0, float *inp1, float *output, int size, float alpha, float beta)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int id = blockId * blockDim.x + threadIdx.x;
if (id < size) {
output[id] = alpha * inp0[id] + beta * inp1[id];
}
__syncthreads();
}
__global__ void outlier_detection(float *d0, float *d1, float *outlier, int size, int dim3, int disp_max)
{
int id = blockIdx.y * gridDim.x + blockIdx.x;
if (id < size) {
int x = id % dim3;
int d0i = d0[id];
if (x - d0i < 0) {
//assert(0);
outlier[id] = 1;
} else if (abs(d0[id] - d1[id - d0i]) < 1.1) {
outlier[id] = 0; /* match */
} else {
outlier[id] = 1; /* occlusion */
for (int d = 0; d < disp_max; d++) {
if (x - d >= 0 && abs(d - d1[id - d]) < 1.1) {
outlier[id] = 2; /* mismatch */
break;
}
}
}
}
}
__device__ void sort(float *x, int n)
{
for (int i = 0; i < n - 1; i++) {
int min = i;
for (int j = i + 1; j < n; j++) {
if (x[j] < x[min]) {
min = j;
}
}
float tmp = x[min];
x[min] = x[i];
x[i] = tmp;
}
}
__global__ void interpolate_mismatch(float *d0, float *outlier, float *out, int size, int dim2, int dim3)
{
const float dir[] = {
0 , 1,
-0.5, 1,
-1 , 1,
-1 , 0.5,
-1 , 0,
-1 , -0.5,
-1 , -1,
-0.5, -1,
0 , -1,
0.5 , -1,
1 , -1,
1 , -0.5,
1 , 0,
1 , 0.5,
1 , 1,
0.5 , 1
};
int id = blockIdx.y * gridDim.x + blockIdx.x;
if (id < size) {
if (outlier[id] != 2) {
out[id] = d0[id];
return;
}
float vals[16];
int vals_size = 0;
int x = id % dim3;
int y = id / dim3;
for (int d = 0; d < 16; d++) {
float dx = dir[2 * d];
float dy = dir[2 * d + 1];
float xx = x;
float yy = y;
int xx_i = round(xx);
int yy_i = round(yy);
while (0 <= yy_i && yy_i < dim2 && 0 <= xx_i && xx_i < dim3 && outlier[yy_i * dim3 + xx_i] == 2) {
xx += dx;
yy += dy;
xx_i = round(xx);
yy_i = round(yy);
}
int ind = yy_i * dim3 + xx_i;
if (0 <= yy_i && yy_i < dim2 && 0 <= xx_i && xx_i < dim3) {
assert(outlier[ind] != 2);
vals[vals_size++] = d0[ind];
}
}
assert(vals_size > 0);
sort(vals, vals_size);
out[id] = vals[vals_size / 2];
}
}
__global__ void interpolate_occlusion(float *d0, float *outlier, float *out, int size, int dim3)
{
int id = blockIdx.y * gridDim.x + blockIdx.x;
if (id < size) {
if (outlier[id] != 1) {
out[id] = d0[id];
return;
}
int x = id % dim3;
int dx = 0;
while (x + dx >= 0 && outlier[id + dx] != 0) {
dx--;
}
if (x + dx < 0) {
dx = 0;
while (x + dx < dim3 && outlier[id + dx] != 0) {
dx++;
}
}
if (x + dx < dim3) {
out[id] = d0[id + dx];
} else {
out[id] = d0[id];
}
}
}
__global__ void dtransform_lr(
float* output, float* weight,
const int height, const int width, const int channels)
{
//id_e = (y * width + x) * channels + z;
//id_w = y * w + x
int ind = 0;
int ind_prev = 0;
float omega = 0.0;
int i_w = 0;
for (i_w = 1; i_w < width; i_w++)
{
ind = (blockIdx.x * width + i_w) * channels + threadIdx.x;
ind_prev = (blockIdx.x * width + i_w - 1) * channels + threadIdx.x;
omega = weight[blockIdx.x * width + i_w];
output[ind] = (1.0 - omega) * output[ind] + omega * output[ind_prev];
}
for (i_w = width-2; i_w >= 0; i_w--)
{
ind = (blockIdx.x * width + i_w) * channels + threadIdx.x;
ind_prev = (blockIdx.x * width + i_w + 1) * channels + threadIdx.x;
omega = weight[blockIdx.x * width + i_w];
output[ind] = (1.0 - omega) * output[ind] + omega * output[ind_prev];
}
}
__global__ void dtransform_ud(
float* output, float* weight,
const int height, const int width, const int channels)
{
//id_e = (y * width + x) * channels + z;
//id_w = y * w + x
int ind = 0;
int ind_prev = 0;
float omega = 0.0;
int i_h = 0;
for (i_h = 1; i_h < height; i_h++)
{
ind = (i_h * width + blockIdx.x) * channels + threadIdx.x;
ind_prev = ((i_h-1) * width + blockIdx.x) * channels + threadIdx.x;
omega = weight[i_h * width + blockIdx.x];
output[ind] = (1.0 - omega) * output[ind] + omega * output[ind_prev];
}
for (i_h = height-2; i_h >= 0; i_h--)
{
ind = (i_h * width + blockIdx.x) * channels + threadIdx.x;
ind_prev = ((i_h+1) * width + blockIdx.x) * channels + threadIdx.x;
omega = weight[i_h * width + blockIdx.x];
output[ind] = (1.0 - omega) * output[ind] + omega * output[ind_prev];
}
}
|
8,758 | // CUDA runtime
#include <cuda_runtime.h>
#include"device_launch_parameters.h"
#include <stdio.h>
__global__ void cudaKernel(float* visHist, int* rawHistogramRay, float* devOtf, int K, int B, int D)
{
//int i = blockDim.x * blockIdx.x + threadIdx.x;
int id = blockIdx.x;
float localVisHist[128];
int base = id * D * B;
int slcSample = 0;
for( int d = 0; d < D; d++ ){
for( int b = 0; b < B; b++ ){
localVisHist[b] += rawHistogramRay[base + d * D + b];
}
}
}
extern "C" double runCudaKernel( float* visHist, int K, int D, int B, float* otf, int* rawHistogramRays )
{
FILE* fp = fopen( "output.txt", "w" );
float* devVisHist = 0;
float* devOtf = 0;
int* devRawHistogramRay = 0;
cudaError_t cudaStatus;
//Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus= cudaSetDevice(0);
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaSetDevice failed! Do you havea CUDA-capable GPU installed?");
goto Error;
}
//Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus= cudaMalloc((void**)&devVisHist, B * sizeof(float));
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaMalloc failed!");
goto Error;
}
cudaStatus= cudaMalloc((void**)&devOtf, B * sizeof(float));
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaMalloc failed!");
goto Error;
}
cudaStatus= cudaMalloc((void**)&devRawHistogramRay, K * B * D * sizeof(int));
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaMalloc failed!");
goto Error;
}
//Copy input vectors from host memory to GPU buffers.
cudaStatus= cudaMemcpy( devRawHistogramRay, rawHistogramRays, K * B * D * sizeof(int), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaMemcpy failed!");
goto Error;
}
cudaStatus= cudaMemcpy(devOtf, otf, B * sizeof(float), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaMemcpy failed!");
goto Error;
}
//Launch a kernel on the GPU with one thread for each element.
fprintf( fp, "before kernel\n " );fflush( fp );
cudaKernel<<<K,1>>>(devVisHist, devRawHistogramRay, devOtf, K, B, D);
fprintf( fp, "after kernel\n " );fflush( fp );
//Check for any errors launching the kernel
cudaStatus= cudaGetLastError();
if(cudaStatus != cudaSuccess) {
fprintf(fp,"addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//cudaDeviceSynchronize waits for the kernel to finish, and returns
//any errors encountered during the launch.
cudaStatus= cudaDeviceSynchronize();
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaDeviceSynchronize returned error code %d after launchingaddKernel!\n", cudaStatus);
goto Error;
}
//Copy output vector from GPU buffer to host memory.
cudaStatus= cudaMemcpy(visHist, devVisHist, B * sizeof(float), cudaMemcpyDeviceToHost);
if(cudaStatus != cudaSuccess) {
fprintf(fp,"cudaMemcpy failed!");
goto Error;
}
//output result
fprintf( fp, "before output\n " );fflush( fp );
for( int i=0; i<B; i++ ){
fprintf(fp, "%d : %f \n", i, visHist[i]);
}
fprintf( fp, "after output\n " );fflush( fp );
Error:
cudaFree(devRawHistogramRay);
cudaFree(devVisHist);
cudaFree(devOtf);
if(cudaStatus != cudaSuccess) {
fprintf(stderr,"addWithCuda failed!");
return 1;
}
} |
8,759 | #include "includes.h"
__global__ void cunn_SpatialLogSoftMax_updateGradInput_kernel(float *gradInput, float *output, float *gradOutput, int feature_size, int spatial_size, int data_size)
{
int idx = (threadIdx.x + blockDim.x*blockIdx.x);
idx = (idx/spatial_size)*feature_size + idx % spatial_size;
if (idx < data_size) {
int next_idx = idx + feature_size;
float gradSum = 0.0;
// Compute the sum of gradients
for(int i = idx; i < next_idx; i += spatial_size){
gradSum += gradOutput[i];
}
// Compute the new gradient
for(int i = idx; i < next_idx; i += spatial_size){
gradInput[i] = gradOutput[i] - __expf(output[i])*gradSum;
}
}
} |
8,760 | #include <stdio.h>
#include <stdlib.h>
#define NUMBER 100
__global__ void demo(int *arr){
int tx=threadIdx.x;
if(tx>NUMBER){
return;
}
arr[tx]+=tx;
__syncthreads();
}
int main(int argc , char **argv){
int * arr;
cudaError_t err;
err=cudaMalloc((void**)&arr,NUMBER*sizeof(int));
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(arr);
err=cudaFree(arr);
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
}
|
8,761 | #include "includes.h"
__global__ void reorderData(uint startbit, uint *outKeys, uint *outValues, uint2 *keys, uint2 *values, uint *blockOffsets, uint *offsets, uint *sizes, uint totalBlocks)
{
uint GROUP_SIZE = blockDim.x;
__shared__ uint2 sKeys2[256];
__shared__ uint2 sValues2[256];
__shared__ uint sOffsets[16];
__shared__ uint sBlockOffsets[16];
uint* sKeys1 = (uint*) sKeys2;
uint* sValues1 = (uint*) sValues2;
uint blockId = blockIdx.x;
uint i = blockId * blockDim.x + threadIdx.x;
sKeys2[threadIdx.x] = keys[i];
sValues2[threadIdx.x] = values[i];
if(threadIdx.x < 16)
{
sOffsets[threadIdx.x] = offsets[threadIdx.x * totalBlocks +
blockId];
sBlockOffsets[threadIdx.x] = blockOffsets[blockId * 16 + threadIdx.x];
}
__syncthreads();
uint radix = (sKeys1[threadIdx.x] >> startbit) & 0xF;
uint globalOffset = sOffsets[radix] + threadIdx.x - sBlockOffsets[radix];
outKeys[globalOffset] = sKeys1[threadIdx.x];
outValues[globalOffset] = sValues1[threadIdx.x];
radix = (sKeys1[threadIdx.x + GROUP_SIZE] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + threadIdx.x + GROUP_SIZE -
sBlockOffsets[radix];
outKeys[globalOffset] = sKeys1[threadIdx.x + GROUP_SIZE];
outValues[globalOffset] = sValues1[threadIdx.x + GROUP_SIZE];
} |
8,762 | #include "includes.h"
__global__ void simple_copy_kernel(int size, float *src, float *dst)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size)
dst[index] = src[index];
} |
8,763 | #include <cuda.h>
#define KERNEL_SIZE 3
#define BLOCK_SIZE 512
typedef signed int pixel_channel;
typedef unsigned long resolution;
__constant__ pixel_channel kernel_cuda[KERNEL_SIZE * KERNEL_SIZE];
pixel_channel kernel_host[KERNEL_SIZE * KERNEL_SIZE];
__global__ void Pixel_Shared_Convolution(pixel_channel *channel_cuda, pixel_channel *rezult_cuda, resolution width, resolution lineQuantity)
{
__shared__ pixel_channel sharedMemory [3][BLOCK_SIZE + 2];
for(resolution line = 1; line < lineQuantity; line++)
{
resolution temp = blockIdx.x * BLOCK_SIZE + threadIdx.x + 1;
sharedMemory [0][threadIdx.x+1] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][threadIdx.x+1] = channel_cuda[temp + width * line];
sharedMemory [2][threadIdx.x+1] = channel_cuda[temp + width * (line + 1)];
if(threadIdx.x == 0)
{
temp--;
sharedMemory [0][0] = channel_cuda[temp + width * (line-1)];
sharedMemory [1][0] = channel_cuda[temp + width * line];
sharedMemory [2][0] = channel_cuda[temp + width * (line+1)];
}
if(threadIdx.x == (BLOCK_SIZE-1))
{
temp += 2;
sharedMemory [0][BLOCK_SIZE] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][BLOCK_SIZE] = channel_cuda[temp + width * line + 2];
sharedMemory [2][BLOCK_SIZE] = channel_cuda[temp + width * (line + 1)];
}
__syncthreads();
pixel_channel Sum = 0;
for (int i = 0; i < KERNEL_SIZE; i++)
for (int j = 0; j < KERNEL_SIZE; j++)
Sum += sharedMemory[j][threadIdx.x+1] * kernel_cuda[i * 3 + j];
if (Sum < 0)
Sum = 0;
if (Sum > 255)
Sum = 255;
rezult_cuda[blockIdx.x * BLOCK_SIZE + threadIdx.x + width * line + 1] = Sum;
}
return;
}
extern "C" __host__ void Shared_Memory_Convolution(pixel_channel **channel, resolution width, resolution height, pixel_channel kernel[3][3])
{
pixel_channel *channel_cuda, *rezult_cuda;
resolution size = width * height;
for(int i = 0; i < 3; i++)
for(int j = 0; j < 3; j++)
kernel_host[i * 3 + j] = kernel[i][j];
resolution block_count = 0;
if(((width - 2)%BLOCK_SIZE) == 0)
block_count = (width - 2)/BLOCK_SIZE;
else
block_count = (width - 2)/BLOCK_SIZE + 1;
dim3 gridSize = dim3(block_count, 1, 1);
dim3 blockSize = dim3(BLOCK_SIZE, 1, 1);
cudaMalloc((void **)& rezult_cuda, (size + 256) * sizeof(pixel_channel));
cudaMalloc((void **)& channel_cuda, (size + 256) * sizeof(pixel_channel));
cudaMemcpy(channel_cuda, *channel, size * sizeof(pixel_channel), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(kernel_cuda, kernel_host, 9 * sizeof(pixel_channel), 0, cudaMemcpyHostToDevice);
Pixel_Shared_Convolution<<<gridSize, blockSize>>>(channel_cuda, rezult_cuda, width, height - 2);
cudaMemcpy(*channel, rezult_cuda, size * sizeof(pixel_channel), cudaMemcpyDeviceToHost);
cudaFree(rezult_cuda);
cudaFree(channel_cuda);
cudaDeviceReset();
return;
}
__global__ void new_convolution(int *channel_cuda, int *rezult_cuda, int width, int height, int size, int lineQuantity)
{
__shared__ int sharedMemory [3][BLOCK_SIZE+2];
for(int line=1; line<lineQuantity; line++)
{
int temp = blockIdx.x * BLOCK_SIZE + threadIdx.x + 1;
sharedMemory [0][threadIdx.x+1] = channel_cuda[temp + width * (line-1)];
sharedMemory [1][threadIdx.x+1] = channel_cuda[temp + width * line];
sharedMemory [2][threadIdx.x+1] = channel_cuda[temp + width * (line+1)];
if(threadIdx.x == 0)
{
temp--;
sharedMemory [0][0] = channel_cuda[temp + width * (line-1)];
sharedMemory [1][0] = channel_cuda[temp + width * line];
sharedMemory [2][0] = channel_cuda[temp + width * (line+1)];
}
if(threadIdx.x == (BLOCK_SIZE-1))
{
temp += 2;
sharedMemory [0][BLOCK_SIZE] = channel_cuda[temp + width * (line-1)];
sharedMemory [1][BLOCK_SIZE] = channel_cuda[temp + width * line + 2];
sharedMemory [2][BLOCK_SIZE] = channel_cuda[temp + width * (line+1)];
}
__syncthreads();
int rSum = 0, kSum = 0, kernelVal, r;
for (int i = 0; i < KERNEL_SIZE; i++)
{
for (int j = 0; j < KERNEL_SIZE; j++)
{
r = sharedMemory[j][threadIdx.x+1];
kernelVal = kernel_cuda[i*3+j]; //Получаем значение ядра
rSum += r * kernelVal;
kSum += kernelVal;
}
}
if (kSum <= 0) kSum = 1;
//Контролируем переполнения переменных
rSum /= kSum;
if (rSum < 0) rSum = 0;
if (rSum > 255) rSum = 255;
//Записываем значения в результирующее изображение
//if((blockIdx.x * BLOCK_SIZE + threadIdx.x) < (width - 1))
rezult_cuda[blockIdx.x * BLOCK_SIZE + threadIdx.x + width * line + 1] = rSum;
}
}
extern "C" __host__ void asyncConvolution(int **image, int width, int height)
{
#define STREAM_QUANTITY 3
int **channel_cuda; channel_cuda = (int**)malloc(3*sizeof(int*));
int **rezult_cuda; rezult_cuda = (int**)malloc(3*sizeof(int*));
int size = width * height; //Размер изображения
kernel_host[0] = -1; //Инициализация ядра
kernel_host[1] = -1;
kernel_host[2] = -1;
kernel_host[3] = -1; // -1 -1 -1
kernel_host[4] = 9; // -1 9 -1
kernel_host[5] = -1; // -1 -1 -1
kernel_host[6] = -1;
kernel_host[7] = -1;
kernel_host[8] = -1;
//Преобразовываем память в pinned-память
cudaHostRegister(image[0], (width * height + 256) * sizeof(int), cudaHostRegisterMapped);
cudaHostRegister(image[1], (width * height + 256) * sizeof(int), cudaHostRegisterMapped);
cudaHostRegister(image[2], (width * height + 256) * sizeof(int), cudaHostRegisterMapped);
cudaMalloc((void **)& rezult_cuda[0], (width * height + 256) * sizeof(int));
cudaMalloc((void **)& rezult_cuda[1], (width * height + 256) * sizeof(int));
cudaMalloc((void **)& rezult_cuda[2], (width * height + 256) * sizeof(int));
cudaMalloc((void **)& channel_cuda[0], (width * height + 256) * sizeof(int));
cudaMalloc((void **)& channel_cuda[1], (width * height + 256) * sizeof(int));
cudaMalloc((void **)& channel_cuda[2], (width * height + 256) * sizeof(int));
//Копируем константную память
cudaMemcpyToSymbol(kernel_cuda,kernel_host,9*sizeof(int),0,cudaMemcpyHostToDevice);
dim3 gridSize = dim3((width - 2)/BLOCK_SIZE + 1, 1, 1);
dim3 blockSize = dim3(BLOCK_SIZE, 1, 1);
cudaStream_t stream[STREAM_QUANTITY];
for(int i=0;i<STREAM_QUANTITY;i++)
{
cudaStreamCreate(&stream[i]);
}
for(int i=0;i<STREAM_QUANTITY;i++)
{
cudaMemcpyAsync(channel_cuda[i], image[i] ,width*height*sizeof(int),cudaMemcpyHostToDevice,stream[i]);
}
for(int i=0;i<STREAM_QUANTITY;i++)
{
new_convolution<<<gridSize,blockSize,0,stream[i]>>>(channel_cuda[i], rezult_cuda[i], width,height,size,height-2);
}
for(int i=0;i<STREAM_QUANTITY;i++)
cudaMemcpyAsync(image[i], rezult_cuda[i], width*height*sizeof(int),cudaMemcpyDeviceToHost,stream[i]);
for(int i=0;i<STREAM_QUANTITY;i++)
cudaStreamDestroy(stream[i]);
//Очистка памяти
for(int i=0;i<STREAM_QUANTITY;i++)
{
cudaFree(rezult_cuda[i]);
cudaFree(channel_cuda[i]);
}
cudaDeviceReset();
return;
}
|
8,764 | #include <stdio.h>
__global__ void binPacking (float *bins, float *items, float cap, int length)
{
int x = 0;
for(int i = 0; i < length; i++)
{
x = 0;
if(items[i] > cap)
{
printf ("Element %f je veci od kapaciteta spremnika koji je %f. PREKID!\n", items[i], cap);
break;
}
if(bins[0 * length + x] >= items[i])
{
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
else
{
while(bins[0 * length + x] < items[i])
{
x+=1;
}
bins[0 * length + x] -= items[i];
bins[1 * length + x] += items[i];
}
}
}
|
8,765 | #include "includes.h"
__global__ void Matrix_getCol_FloatPointer_naive(const float * A , int Acount, int Acols, const float * colId , int empty_par1, int empty_par2, float * out0 , int out0count, int out0cols)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<Acount/Acols)
{
out0[id] = A[id*Acols + (int)colId[0]];
}
} |
8,766 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits>
using namespace std;
#define NPP_MAXABS_32F ( 3.402823466e+38f )
#define NPP_MINABS_32F ( 1.175494351e-38f )
#define NPP_MAXABS_64F ( 1.7976931348623158e+308 )
#define NPP_MINABS_64F ( 2.2250738585072014e-308 )
#define THREADS 5
#define BLOCKS 10
typedef double apple;
typedef float orange;
__global__ void testFunction(float *dev_a, float *dev_b, orange *dev_c)
{
int thread = threadIdx.x;
if(thread < THREADS)
{
if(thread < NPP_MAXABS_32F)
dev_a[thread] = NPP_MAXABS_32F;
else
dev_a[thread] = NPP_MINABS_32F;
if (thread == 4)
{
dev_a[thread] = *dev_b;
*dev_b = dev_a[thread]*2;
dev_a[thread-1] = *dev_c;
*dev_c = dev_a[thread-1]*3;
}
}
}
int main()
{
printf("\nMAIN START\n");
float a[THREADS] = { 1, 2, 3, 4, 5 };
printf("BEFORE LOOP\n");
for(int i = 0; i<THREADS; i++)
{
printf("a[%d] = %.2f; ", i, a[i]);
}
printf("AFTER LOOP\n");
float *dev_a;
cudaMalloc((void**)&dev_a, THREADS*sizeof(float));
cudaMemcpy(dev_a, a, THREADS*sizeof(float), cudaMemcpyHostToDevice);
float b = 25;
float *dev_b;
cudaMalloc((void**)&dev_b, sizeof(float));
cudaMemcpy(dev_b, &b, sizeof(float), cudaMemcpyHostToDevice);
orange c = 77;
orange *dev_c;
cudaMalloc((void**)&dev_c, sizeof(orange));
cudaMemcpy(dev_c, &c, sizeof(orange), cudaMemcpyHostToDevice);
testFunction<<<BLOCKS, THREADS>>>(dev_a, dev_b, dev_c);
cudaMemcpy(a, dev_a, THREADS*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&b, dev_b, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&c, dev_c, sizeof(orange), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf("\nAFTER CUDA FREE\n");
for(int i = 0; i<THREADS; i++)
{
printf("a[%d] = %.2f; ", i, a[i]);
}
printf("\nEND\n");
printf("b = %.2f; \n", b);
printf("c = %.2f; \n", c);
orange d = 12.3;
apple e = 23.4;
printf("d = %.2f\ne = %.2f\n", d, e);
printf("MAX: %.2f\n", NPP_MAXABS_32F);
printf("MAX FLOAT: %.2f\n", numeric_limits<float>::max());
printf("MAX DOUBLE: %.2f\n", numeric_limits<double>::max());
return 0;
}
|
8,767 | #include "includes.h"
__global__ void kWriteRows(float* data, float* target, int num_images, int num_modules, int num_modules_batch, int module_id_offset, float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = beta * data[im];
}
} |
8,768 | #include <cuda_runtime_api.h>
// FIXME(20160123): commentng out for cuda 7.0.
//#include <cuda_fp16.h>
#include <stdint.h>
__global__ void vector_scale_f32_kernel(
float *dst,
int dim,
float alpha)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y = alpha * dst[idx];
dst[idx] = y;
}
}
extern "C" void array_cuda_vector_scale_f32(
float *dst,
int dim,
float alpha,
cudaStream_t stream)
{
vector_scale_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dst, dim, alpha);
}
__global__ void vector_exp_f32_kernel(
float *xs,
int dim)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = expf(xs[idx]);
xs[idx] = x;
}
}
extern "C" void array_cuda_vector_exp_f32(
float *xs,
int dim,
cudaStream_t stream)
{
vector_exp_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
xs, dim);
}
__global__ void vector_set_f32_kernel(
const float *src,
int dim,
float alpha,
float *dst)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y = alpha * src[idx];
dst[idx] = y;
}
}
extern "C" void array_cuda_vector_set_f32(
const float *src,
int dim,
float alpha,
float *dst,
cudaStream_t stream)
{
vector_set_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
src, dim, alpha, dst);
}
__global__ void vector_add_f32_kernel(
const float *src,
int dim,
float alpha,
float beta,
float *dst)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y = alpha * src[idx] + beta * dst[idx];
dst[idx] = y;
}
}
extern "C" void array_cuda_vector_add_f32(
const float *src,
int dim,
float alpha,
float beta,
float *dst,
cudaStream_t stream)
{
vector_add_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
src, dim, alpha, beta, dst);
}
__global__ void vector_avg_online_f32_kernel(
const float *src,
int dim,
float alpha,
float *dst)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y = dst[idx];
y = y + alpha * (src[idx] - y);
dst[idx] = y;
}
}
extern "C" void array_cuda_vector_avg_online_f32(
const float *src,
int dim,
float alpha,
float *dst,
cudaStream_t stream)
{
vector_avg_online_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
src, dim, alpha, dst);
}
__global__ void vector_elemwise_mult_f32_kernel(
const float *xs,
int len,
float *ys)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
float y = xs[idx] * ys[idx];
ys[idx] = y;
}
}
extern "C" void array_cuda_vector_elemwise_mult_f32(
const float *xs,
int len,
float *ys,
cudaStream_t stream)
{
vector_elemwise_mult_f32_kernel<<<(len+1024-1)/1024, 1024, 0, stream>>>(
xs, len, ys);
}
__global__ void vector_elemwise_div_f32_kernel(
const float *xs,
int len,
float *ys)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
float y = ys[idx] / xs[idx];
ys[idx] = y;
}
}
extern "C" void array_cuda_vector_elemwise_div_f32(
const float *xs,
int len,
float *ys,
cudaStream_t stream)
{
vector_elemwise_div_f32_kernel<<<(len+1024-1)/1024, 1024, 0, stream>>>(
xs, len, ys);
}
|
8,769 | #include <stdio.h>
#include <pthread.h>
#include <sys/time.h>
int cpu_cores;
long long num_tosses;
int gpu_cores;
struct toss_t {
int id;
pthread_t pid;
long long num_tosses;
long long in_circle;
};
// from gcc rand
inline __device__ __host__ unsigned my_rand(unsigned *seed) {
return *seed = 1103515245u * *seed + 12345u;
}
const unsigned MY_RAND_MAX = ~0u;
long long Timer() {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1000LL + t.tv_usec / 1000;
}
void *cpu_toss(void *args) {
toss_t *me = (toss_t *) args;
long long num_tosses = me->num_tosses;
float x = 0.0f, y = 0.0f;
long long in_circle = 0;
unsigned seed = me->id;
for (long long i = 0; i < num_tosses; i++) {
x = my_rand(&seed) / (float) MY_RAND_MAX;
y = my_rand(&seed) / (float) MY_RAND_MAX;
if (x * x + y * y <= 1.0f) in_circle += 1;
}
me->in_circle = in_circle;
return me;
}
__global__ void gpu_toss(long long num_tosses, long long *result) {
int num_threads = blockDim.x * gridDim.x;
int pid = blockDim.x * blockIdx.x + threadIdx.x;
long long work = num_tosses / num_threads;
if (pid < num_tosses - work * num_threads) {
// remaining work
work += 1;
}
long long in_circle = 0;
unsigned seed = pid;
float x, y;
for (long long i = 0; i < work; i++) {
x = my_rand(&seed) / (float) MY_RAND_MAX;
y = my_rand(&seed) / (float) MY_RAND_MAX;
if (x * x + y * y <= 1.0f) in_circle += 1;
}
result[pid] = in_circle;
}
int main(int argc, char *argv[]) {
if (argc < 3) {
fprintf(stderr, "usage: ./pi <threads> <tosses> [<blocks>]\n");
return 1;
}
cpu_cores = atoi(argv[1]);
num_tosses = atoll(argv[2]);
gpu_cores = 0;
if (argc == 4) gpu_cores = atoi(argv[3]);
if (cpu_cores <= 0 || gpu_cores < 0) {
fprintf(stderr, "thread count must not be negative\n");
return 1;
}
long long answer = 0;
int t1;
if (gpu_cores > 0) { // GPU CUDA parallel
int total_cores = cpu_cores * gpu_cores;
long long *gpu_answer;
cudaMalloc(&gpu_answer, sizeof(long long) * total_cores);
t1 = Timer();
gpu_toss<<<cpu_cores, gpu_cores>>>(num_tosses, gpu_answer);
long long *arr = new long long[total_cores];
cudaMemcpy(arr, gpu_answer, sizeof(long long) * total_cores, cudaMemcpyDeviceToHost);
for (int i = 0; i < total_cores; i++)
answer += arr[i];
delete[] arr;
}
else { // CPU parallel
toss_t *jobs = new toss_t[cpu_cores];
long long remain = num_tosses;
t1 = Timer();
for (int i = 0; i < cpu_cores; i++) {
jobs[i].id = i;
long long use = remain / (cpu_cores - i);
jobs[i].num_tosses = use;
remain -= use;
jobs[i].in_circle = 0;
pthread_create(&jobs[i].pid, NULL, cpu_toss, &jobs[i]);
}
for (int i = 0; i < cpu_cores; i++) {
pthread_join(jobs[i].pid, NULL);
answer += jobs[i].in_circle;
}
delete[] jobs;
}
double pi = 4.0 * answer / num_tosses;
int t2 = Timer();
printf("Pi: %f\n", pi);
printf("use time: %fs\n", float(t2-t1)/1000);
return 0;
}
|
8,770 | #include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
static const unsigned int NUM_AGENTS = 1 << 14;
//static const unsigned int NUM_AGENTS = 2000000;
static const unsigned int NUM_FIRMS = 1 << 6;
//static const unsigned int NUM_FIRMS = 10000;
static const unsigned int NUM_ITER = 1 << 0;
//static const unsigned int NUM_ITER = 10000;
static const float Q = 0.5;
static const float BIAS = 0.5;
static const float P = Q + BIAS;
static const int THREADS_PER_BLOCK = 1 << 10;
static const int NUM_BLOCKS = ceil(NUM_AGENTS / THREADS_PER_BLOCK);
unsigned int *firms;
unsigned int *firms_tmp;
unsigned int *agents;
curandState *states;
__global__
void init(unsigned int *firms, unsigned int *firms_tmp,
unsigned int *agents, curandState *states, unsigned long seed)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < NUM_AGENTS) {
curandState state;
// init random states
curand_init(seed, (unsigned long long)idx, 0, &state);
// randomly select an initial firm for agent
agents[idx] = curand(&state) % NUM_FIRMS;
// tally agents assigned to firms
atomicAdd(&firms_tmp[agents[idx]], 1);
// copy local state to global state array
states[idx] = state;
}
}
__global__
void move(unsigned int *firms, unsigned int *firms_tmp,
unsigned int *agents, curandState *states, unsigned int N)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ int s_firms[NUM_FIRMS];
if (idx < NUM_AGENTS) {
curandState state = states[idx];
unsigned int firm = agents[idx];
unsigned int firmSize = firms[firm];
unsigned int newFirm;
unsigned int newFirmSize;
float p;
for (int i = 0; i < N; i++) {
// reset local tally to 0
for (int j = threadIdx.x; j < NUM_FIRMS; j += blockDim.x) {
s_firms[j] = 0;
}
__syncthreads();
// randomly select another firm
newFirm = curand(&state) % NUM_FIRMS;
newFirmSize = firms[newFirm];
// compare firms to get probabiliy of moving
p = newFirmSize > firmSize ? P : Q;
if (curand_uniform(&state) < p) { // if moving
// decrement local tally at old firm
atomicSub(&s_firms[firm], 1);
firm = newFirm;
firmSize = newFirmSize;
// increment local tally at new firm
atomicAdd(&s_firms[firm], 1);
}
__syncthreads();
for (int j = threadIdx.x; j < NUM_FIRMS; j += blockDim.x) {
atomicAdd(&firms_tmp[j], s_firms[j]);
}
__syncthreads();
}
agents[idx] = firm;
states[idx] = state;
}
}
/*
__global__
void stats(unsigned int *transactionPrice, unsigned int numTrades, unsigned int price) {
}
*/
int main()
{
int sum = 0;
//unsigned long int seed = 0;
//unsigned long int seed = (unsigned long int) time(NULL);
unsigned long int seed = 1572534477;
size_t firmSize = NUM_FIRMS*sizeof(unsigned int); // size of firm array
size_t agentSize = NUM_AGENTS*sizeof(unsigned int); // size of firm array
size_t stateSize = NUM_AGENTS*sizeof(curandState); // size of state array
printf("Seed: %lu, Agents: %u, Firms: %u, Blocks: %i, Threads per block: %i, Threads: %i, Iterations: %u\n", seed, NUM_AGENTS, NUM_FIRMS, NUM_BLOCKS, THREADS_PER_BLOCK, NUM_BLOCKS * THREADS_PER_BLOCK, NUM_ITER);
// allocate memeory
// TODO implement error handling
cudaMallocManaged(&firms, firmSize);
cudaMallocManaged(&firms_tmp, firmSize);
cudaMallocManaged(&agents, agentSize);
cudaMallocManaged(&states, stateSize);
init<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(firms, firms_tmp, agents, states, seed);
cudaDeviceSynchronize();
// copy firms_tmp to firms
for (int i = 0; i < NUM_FIRMS; i++) {
firms[i] = firms_tmp[i];
}
for (int i = 0; i < NUM_FIRMS; i++) {
printf("%5u", firms[i]);
}
printf("\n");
sum = 0;
for (int i = 0; i < NUM_FIRMS; i++) {
sum += firms[i];
}
//printf("%d\n", sum);
// outer loop
for (int i = 0; i < 100000 ; i++) {
move<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(firms, firms_tmp, agents, states, NUM_ITER);
cudaDeviceSynchronize();
// copy firms_tmp to firms
for (int i = 0; i < NUM_FIRMS; i++) {
firms[i] = firms_tmp[i];
}
/*
for (int i = 0; i < NUM_FIRMS; i++) {
printf("%5u", firms[i]);
}
printf("\n");
printf("\n");
sum = 0;
for (int i = 0; i < NUM_FIRMS; i++) {
sum += firms[i];
}
printf("Agent Count: %d\n", sum);
*/
}
for (int i = 0; i < NUM_FIRMS; i++) {
printf("%5u", firms[i]);
}
printf("\n");
printf("\n");
sum = 0;
for (int i = 0; i < NUM_FIRMS; i++) {
sum += firms[i];
}
printf("Agent Count: %d\n", sum);
// free memory
cudaFree(firms);
cudaFree(agents);
cudaFree(states);
return EXIT_SUCCESS;
}
|
8,771 | #include <emmintrin.h>
#include <sys/time.h>
#include <stdio.h>
int N = 64000000;
int doPrint = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// HELPER CODE TO INITIALIZE, PRINT AND TIME
struct timeval start, end;
void initialize(float *a, int N) {
int i;
for (i = 0; i < N; ++i) {
a[i] = pow(rand() % 10, 2);
}
}
void print(float* a, int N) {
if (doPrint) {
int i;
for (i = 0; i < N; ++i)
printf("%f ", a[i]);
printf("\n");
}
}
void starttime() {
gettimeofday( &start, 0 );
}
void endtime(const char* c) {
gettimeofday( &end, 0 );
double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0;
printf("%s: %f ms\n", c, elapsed);
}
void init(float* a, int N, const char* c) {
printf("***************** %s **********************\n", c);
initialize(a, N);
print(a, N);
starttime();
}
void finish(float* a, int N, const char* c) {
endtime(c);
print(a, N);
printf("***************************************************\n");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// Normal C function to square root values
void normal(float* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = sqrt(a[i]);
}
// GPU function to square root values
__global__ void gpu_sqrt(float* a, int N) {
int element = blockIdx.x*blockDim.x + threadIdx.x;
if (element < N) a[element] = sqrt(a[element]);
}
void gpu(float* a, int N) {
int numThreads = 1024; // This can vary, up to 1024
int numCores = N / 1024 + 1;
float* gpuA;
cudaMalloc(&gpuA, N*sizeof(float)); // Allocate enough memory on the GPU
cudaMemcpy(gpuA, a, N*sizeof(float), cudaMemcpyHostToDevice); // Copy array from CPU to GPU
gpu_sqrt<<<numCores, numThreads>>>(gpuA, N); // Call GPU Sqrt
cudaMemcpy(a, gpuA, N*sizeof(float), cudaMemcpyDeviceToHost); // Copy array from GPU to CPU
cudaFree(&gpuA); // Free the memory on the GPU
}
int main()
{
//////////////////////////////////////////////////////////////////////////
// Necessary if you are doing SSE. Align on a 128-bit boundary (16 bytes)
float* a;
posix_memalign((void**)&a, 16, N * sizeof(float));
/////////////////////////////////////////////////////////////////////////
// Test 1: Sequential For Loop
init(a, N, "Normal");
normal(a, N);
finish(a, N, "Normal");
// Test 2: Vectorization
init(a, N, "GPU");
gpu(a, N);
finish(a, N, "GPU");
return 0;
}
|
8,772 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void unique_gid_calc_2d(int * input){
int tid = threadIdx.x;
int block_offset = blockIdx.x * blockDim.x;
int row_offset = blockDim.x * gridDim.x * blockIdx.y;
int gid = tid + row_offset + block_offset;
printf("blockIdx.x = %d, threadIdx=%d, gid=%d, value=%d\n",
blockIdx.x, threadIdx.x, gid, input[gid]);
}
int main(int argc, char ** argv) {
int array_size = 16;
int array_byte_size = sizeof(int)*array_size;
int h_data[] = {23,9,4,53,65,12,1,33, 22, 43,56,4,76,81,94,32};
for (int i = 0 ; i < array_size; i++){
printf("%d ", h_data[i]);
}
printf("\n \n");
int *d_data;
cudaMalloc((void**)&d_data, array_byte_size);
cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
dim3 block(4);
dim3 grid(2,2);
//unique_idx_calc_threadIdx <<< grid, block >>> (d_data);
unique_gid_calc_2d <<< grid, block >>> (d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
8,773 | #include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
int checkResults(float*res, float* cudaRes,int length)
{
int nDiffs=0;
const float smallVal = .3f; // Keeping this extra high as we have repetitive addition and sequence matters
for(int i=0; i<length; i++)
if(fabs(cudaRes[i]-res[i])>smallVal)
{nDiffs++;
//printf("%f %f\n",cudaRes[i],res[i]);
}
return nDiffs;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
int r=fscanf(fp,"%f",&arr[i]);
if(r == EOF){
rewind(fp);
}
arr[i]-=5; // This is to make the data zero mean. Otherwise we reach large numbers and lose precision
}
}
void inclusiveScan_SEQ(float *in, float *out,int length) {
float sum=0.f;
for (int i =0; i < length; i++) {
sum+=in[i];
out[i]=sum;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N = atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
float *in = (float *)malloc(size);
float *out = (float *)malloc(size);
float *cuda_out = (float *)malloc(size);
float time = 0.f;
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
initializeArray(fp,in, N);
thrust::host_vector<float> H(N);
for (int i = 0; i < N; i++)
{
H[i] = in[i];
}
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
thrust::device_vector<float> D = H;
thrust::inclusive_scan(D.begin(), D.end(), D.begin());
thrust::copy(D.begin(), D.end(), cuda_out);
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&time, startEvent_inc, stopEvent_inc);
inclusiveScan_SEQ(in, out, N);
int nDiffs = checkResults(out, cuda_out,N);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%d\n%f\n%f\n",N,cuda_out[N-1],time);
//dynamically delete all
return(0);
} |
8,774 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
//#include <opencv2/opencv.hpp>
__global__ void mykernel(int *arr, int *stop){
int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread
int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction
int localsum = 0;
for (int i = id; i < *stop; i+= numThreads){
double tmp = sin(i*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
localsum = (localsum + z) % 10000;
}
printf(" %d ", localsum);
arr[id] = localsum;
/*
if(id < *stop){
double tmp = sin(id*1.0);
double tmp2 = tmp*tmp;
int z = (int)(tmp2*10000.0);
arr[id] = z % 10000;
}
*/
}
int main(int argc, char *argv[]){
//assert(argc==2);
int stop = (int)atol(argv[1]);
assert(stop >= 1.0);
printf("Hello World!\n");
int blocks = 4;
int threads = 5;
int result = 0;
int *arr;
int arrsize;
if(blocks*threads < stop){
arrsize = blocks*threads;
}else{
arrsize = stop;
}
arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu
int *devarr;
int *devstop;
cudaMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu
cudaMalloc((int**) &devstop , sizeof(int)); //mem in gpu
cudaMemcpy(devarr, arr, sizeof(int)*arrsize, cudaMemcpyHostToDevice); //transfer
cudaMemcpy(devstop, &stop, sizeof(int), cudaMemcpyHostToDevice); //transfer
mykernel<<<blocks,threads>>>(devarr, devstop); //1,1 block, threads- launch config
cudaMemcpy(arr, devarr, sizeof(int)*arrsize, cudaMemcpyDeviceToHost);
printf("arrsize: %d\n", arrsize);
for(int i = 0; i<arrsize; i++){
//printf(" %d ", arr[i]);
result = (result + arr[i]) % 10000;
}
printf("PIN is: %d\n",result);
//scanf("%d%d", a, b);
free(arr);
cudaFree(devarr);
cudaFree(devstop);
return 0;
} |
8,775 | extern "C" // ensure function name to be left alone
{
__global__ void normal_pdf_gpu(const double *x, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if(i<n) y[i] = exp(-0.5*x[i]*x[i])*rsqrt(2.0*M_PI);
}
}
|
8,776 |
/*
Use CUDA functions to calculate block size
*/
#include <iostream>
#include <stdlib.h>
#include <cmath>
#include <string>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
// Position struct contains x and y coordinates
struct Position {
float x, y;
std::string toString() {
return "(" + std::to_string(x) + "," + std::to_string(y) + ")";
}
__device__ __host__ void operator+=(const Position& a) {
x = x + a.x;
y = y + a.y;
}
__device__ __host__ void operator=(const Position& a) {
x = a.x;
y = a.y;
}
};
// Particle struct has current location, best location and velocity
struct Particle {
Position best_position;
Position current_position;
Position velocity;
float best_value;
};
const unsigned int N = 5000;
const unsigned int ITERATIONS = 1000;
const float SEARCH_MIN = -1000.0f;
const float SEARCH_MAX = 1000.0f;
const float w = 0.9f;
const float c_ind = 1.0f;
const float c_team = 2.0f;
// return a random float between low and high
float randomFloat(float low, float high) {
float range = high-low;
float pct = static_cast <float>(rand()) / static_cast <float>(RAND_MAX);
return low + pct * range;
}
// function to optimize
__device__ __host__ float calcValue(Position p) {
return pow(p.x, 2) + pow(p.y, 2);
}
// Initialize state for random numbers
__global__ void init_kernel(curandState *state, long seed) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, idx, 0, state);
}
// Returns the index of the particle with the best position
__global__ void updateTeamBestIndex(Particle *d_particles, float *d_team_best_value, int *d_team_best_index, int N) {
*d_team_best_value = d_particles[0].best_value;
*d_team_best_index = 0;
for (int i = 1; i < N; i++) {
if (d_particles[i].best_value < *d_team_best_value) {
*d_team_best_value = d_particles[i].best_value;
*d_team_best_index = i;
}
}
}
// Calculate velocity for a particle
// __device__ void updateParticleVelocity(Particle &p, Position team_best_position, float w, float c_ind, float c_team, curandState *state) {
// float r_ind = curand_uniform(state);
// float r_team = curand_uniform(state);
// p.velocity.x = w * p.velocity.x +
// r_ind * c_ind * (p.best_position.x - p.current_position.x) +
// r_team * c_team * (team_best_position.x - p.current_position.x);
// p.velocity.y = w * p.velocity.y +
// r_ind * c_ind * (p.best_position.y - p.current_position.y) +
// r_team * c_team * (team_best_position.y - p.current_position.y);
// }
// Update velocity for all particles
__global__ void updateVelocity(Particle* d_particles, int *d_team_best_index, float w, float c_ind, float c_team, int N, curandState *state) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
// updateParticleVelocity(d_particles[idx], d_particles[*d_team_best_index].best_position, w, c_ind, c_team, state);
float r_ind = curand_uniform(state);
float r_team = curand_uniform(state);
d_particles[idx].velocity.x = w * d_particles[idx].velocity.x +
r_ind * c_ind * (d_particles[idx].best_position.x - d_particles[idx].current_position.x) +
r_team * c_team * (d_particles[*d_team_best_index].best_position.x - d_particles[idx].current_position.x);
d_particles[idx].velocity.y = w * d_particles[idx].velocity.y +
r_ind * c_ind * (d_particles[idx].best_position.y - d_particles[idx].current_position.y) +
r_team * c_team * (d_particles[*d_team_best_index].best_position.y - d_particles[idx].current_position.y);
}
}
__global__ void updatePosition(Particle *d_particles, int N) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
d_particles[idx].current_position += d_particles[idx].velocity;
float newValue = calcValue(d_particles[idx].current_position);
if (newValue < d_particles[idx].best_value) {
d_particles[idx].best_value = newValue;
d_particles[idx].best_position = d_particles[idx].current_position;
}
}
}
int main(void) {
// for timing
long start = std::clock();
// Random seed for cpu
std::srand(std::time(NULL));
// Random seed for gpu
curandState *state;
cudaMalloc(&state, sizeof(curandState));
init_kernel<<<1,1>>>(state, clock());
// Initialize particles
Particle* h_particles = new Particle[N];
Particle* d_particles; // for the gpu
for (int i = 0; i < N; i++) {
// Random starting position
h_particles[i].current_position.x = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].current_position.y = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].best_position.x = h_particles[i].current_position.x;
h_particles[i].best_position.y = h_particles[i].current_position.y;
h_particles[i].best_value = calcValue(h_particles[i].best_position);
// Random starting velocity
h_particles[i].velocity.x = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].velocity.y = randomFloat(SEARCH_MIN, SEARCH_MAX);
}
// Allocate memory + copy data to gpu
size_t particleSize = sizeof(Particle) * N;
cudaMalloc((void **)&d_particles, particleSize);
cudaMemcpy(d_particles, h_particles, particleSize, cudaMemcpyHostToDevice); // dest, source, size, direction
// initialize variables for gpu
int *d_team_best_index;
float *d_team_best_value;
// Allocate gpu memory
cudaMalloc((void **)&d_team_best_index, sizeof(int));
cudaMalloc((void **)&d_team_best_value, sizeof(float));
// Initialize team best index and value
updateTeamBestIndex<<<1,1>>>(d_particles, d_team_best_value, d_team_best_index, N);
// assign thread and blockcount
int blockSize = 1024;
int gridSize = (N + blockSize - 1) / blockSize;
// For i in interations
for (int i = 0; i < ITERATIONS; i++) {
updateVelocity<<<gridSize, blockSize>>>(d_particles, d_team_best_index, w, c_ind, c_team, N, state);
updatePosition<<<gridSize, blockSize>>>(d_particles, N);
updateTeamBestIndex<<<1,1>>>(d_particles, d_team_best_value, d_team_best_index, N);
}
// copy best particle back to host
int team_best_index;
cudaMemcpy(&team_best_index, d_team_best_index, sizeof(int), cudaMemcpyDeviceToHost);
// copy particle data back to host
cudaMemcpy(h_particles, d_particles, particleSize, cudaMemcpyDeviceToHost);
long stop = std::clock();
long elapsed = (stop - start) * 1000 / CLOCKS_PER_SEC;
// print results
std::cout << "Ending Best: " << std::endl;
std::cout << "Team best value: " << h_particles[team_best_index].best_value << std::endl;
std::cout << "Team best position: " << h_particles[team_best_index].best_position.toString() << std::endl;
std::cout << "Run time: " << elapsed << "ms" << std::endl;
cudaFree(d_particles);
cudaFree(d_team_best_index);
cudaFree(d_team_best_value);
cudaFree(state);
return 0;
} |
8,777 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include <cuda.h>
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
uch *GPUImg, *GPUCopyImg, *GPUResult; // Where images are stored in GPU
struct ImgProp{
int Hpixels;
int Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that flips the given image vertically
// each thread only flips a single pixel (R,G,B)
__global__
void Vflip(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
ui BlkPerRow = (Hpixels + ThrPerBlk - 1) / ThrPerBlk; // ceil
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYmirrorrow = Vpixels - 1 - MYrow;
ui MYsrcOffset = MYrow * RowBytes;
ui MYdstOffset = MYmirrorrow * RowBytes;
ui MYsrcIndex = MYsrcOffset + 3 * MYcol;
ui MYdstIndex = MYdstOffset + 3 * MYcol;
// swap pixels RGB @MYcol , @MYmirrorcol
ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex];
ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1];
ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2];
}
// Kernel that flips the given image horizontally
// each thread only flips a single pixel (R,G,B)
__global__
void Hflip(uch *ImgDst, uch *ImgSrc, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
ui BlkPerRow = (Hpixels + ThrPerBlk -1 ) / ThrPerBlk; // ceil
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYmirrorcol = Hpixels - 1 - MYcol;
ui MYoffset = MYrow * RowBytes;
ui MYsrcIndex = MYoffset + 3 * MYcol;
ui MYdstIndex = MYoffset + 3 * MYmirrorcol;
// swap pixels RGB @MYcol , @MYmirrorcol
ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex];
ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1];
ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2];
}
// Kernel that copies an image from one part of the
// GPU memory (ImgSrc) to another (ImgDst)
__global__
void PixCopy(uch *ImgDst, uch *ImgSrc, ui FS)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid > FS) return; // outside the allocated memory
ImgDst[MYgtid] = ImgSrc[MYgtid];
}
/*
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(cudaError_t error_id)
{
if (error_id != CUDA_SUCCESS)
{
printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
*/
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
char Flip = 'H';
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; // GPU code run times
cudaError_t cudaStatus, cudaStatus2;
cudaEvent_t time1, time2, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks, GPUDataTransfer;
cudaDeviceProp GPUprop;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
strcpy(ProgName, "imflipG");
switch (argc){
case 5: ThrPerBlk=atoi(argv[4]);
case 4: Flip = toupper(argv[3][0]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [V/H/C/T] [ThrPerBlk]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp H", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp V 128",ProgName);
printf("\n\nH=horizontal flip, V=vertical flip, T=Transpose, C=copy image\n\n");
exit(EXIT_FAILURE);
}
if ((Flip != 'V') && (Flip != 'H') && (Flip != 'C') && (Flip != 'T')) {
printf("Invalid flip option '%c'. Must be 'V','H', 'T', or 'C'... \n", Flip);
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
free(TheImg);
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
exit(EXIT_FAILURE);
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(EXIT_FAILURE);
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// Allocate GPU buffer for the input and output images
cudaStatus = cudaMalloc((void**)&GPUImg, IMAGESIZE);
cudaStatus2 = cudaMalloc((void**)&GPUCopyImg, IMAGESIZE);
if ((cudaStatus != cudaSuccess) || (cudaStatus2 != cudaSuccess)){
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory");
exit(EXIT_FAILURE);
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy CPU to GPU failed!");
exit(EXIT_FAILURE);
}
cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done
//dim3 dimBlock(ThrPerBlk);
//dim3 dimGrid(ip.Hpixels*BlkPerRow);
BlkPerRow = (IPH + ThrPerBlk -1 ) / ThrPerBlk;
NumBlocks = IPV*BlkPerRow;
switch (Flip){
case 'H': Hflip <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH);
GPUResult = GPUCopyImg;
GPUDataTransfer = 2*IMAGESIZE;
break;
case 'V': Vflip <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH, IPV);
GPUResult = GPUCopyImg;
GPUDataTransfer = 2*IMAGESIZE;
break;
case 'T': Hflip <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH);
Vflip <<< NumBlocks, ThrPerBlk >>> (GPUImg, GPUCopyImg, IPH, IPV);
GPUResult = GPUImg;
GPUDataTransfer = 4*IMAGESIZE;
break;
case 'C': NumBlocks = (IMAGESIZE+ThrPerBlk-1) / ThrPerBlk;
PixCopy <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IMAGESIZE);
GPUResult = GPUCopyImg;
GPUDataTransfer = 2*IMAGESIZE;
break;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
exit(EXIT_FAILURE);
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk
printf("\n\n--------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("--------------------------------------------------------------------------\n");
printf("%s %s %s %c %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName,
Flip, ThrPerBlk, NumBlocks, BlkPerRow);
printf("--------------------------------------------------------------------------\n");
printf("CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU));
printf("Kernel Execution =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecutionTime, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, kernelExecutionTime));
printf("GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
printf("--------------------------------------------------------------------------\n");
printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((2 * IMAGESIZE + GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime));
printf("--------------------------------------------------------------------------\n\n");
// Deallocate CPU, GPU memory and destroy events.
cudaFree(GPUImg);
cudaFree(GPUCopyImg);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
}
|
8,778 | #include "includes.h"
__global__ void add(int *a, int *b, int *c,int size) {
c[size*blockIdx.x+ threadIdx.x] = a[size*blockIdx.x+ threadIdx.x] + b[size*blockIdx.x+ threadIdx.x];
} |
8,779 | #include <stdio.h>
#include <math.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void reverse(int * in, int * out, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
out[index] = in[size - index - 1];
}
int main()
{
int * in, * out;
int * d_in, * d_out;
int size = N * sizeof(int);
int i;
cudaMalloc((void**)&d_in, size);
cudaMalloc((void**)&d_out, size);
in = (int *)malloc(size);
out = (int *)malloc(size);
for(i = 0; i<N; ++i)
{
in[i] = i;
}
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
reverse<<< N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>(d_in, d_out, N);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
for(i=0; i<N; ++i)
{
if(out[i] != in[N-i-1]) {
printf("error\n");
break;
}
}
if(i == N){
printf("correct\n");
}
free(in); free(out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
8,780 | #include "includes.h"
__global__ void Match1(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
int p1 = threadIdx.x + M1W*blockIdx.x;
float max_score = 0.0f;
int index = -1;
for (int p2=0;p2<NPTS;p2++) {
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += d_pts1[p1*NDIM + d]*d_pts2[p2*NDIM + d];
if (score>max_score) {
max_score = score;
index = p2;
}
}
d_score[p1] = max_score;
d_index[p1] = index;
} |
8,781 |
#include <cuda.h>
#include <stdio.h>
int main( int argc, char ** argv )
{
printf("Hello CUDA world\n");
return 0;
}
|
8,782 | #include <iostream>
#include <math.h>
__global__ void reduce1(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main(void)
{
int N = 1<<28;
int *input, *output;
cudaMallocManaged(&input, N * sizeof(int));
cudaMallocManaged(&output, N * sizeof(int));
for (int i = 0; i < N; i++) {
input[i] = 2;
output[i] = 0;
}
int blockSize = 128;
int numBlocks = (N + blockSize - 1) / blockSize;
int smemSize = blockSize * sizeof(int);
reduce1<<<numBlocks, blockSize, smemSize>>>(input, output);
cudaDeviceSynchronize();
int final_result = 0;
for (int i = 0; i < numBlocks; i++) {
final_result += output[i];
}
std::cout << "final result = " << final_result << "\n";
// Free memory
cudaFree(input);
cudaFree(output);
return 0;
}
|
8,783 | #include "includes.h"
__global__ void scan(float *input, float *output, float *aux, int len) {
//@@declaring shared memeory of size 2*inputSize
__shared__ float XY[2 * BLOCK_SIZE];
//@@X-axis block id
int bx = blockIdx.x;
//@@X-axis thread id
int tx = threadIdx.x;
int i = 2 * bx * blockDim.x + tx;
//@@ loading data from global memory to shared memory stage 1
if (i<len)
XY[tx] = input[i];
//@@ loading data from global memory to shared memory stage 2
if (i + blockDim.x<len)
XY[tx + blockDim.x] = input[i + blockDim.x];
//@@making sure that all threads in a block are done with loading data from global memory to shared memory
//@@before proceeding to the calculations phase
__syncthreads();
for (unsigned int stride = 1; stride <= BLOCK_SIZE; stride *= 2){
//@@making sure that all threads in a block are done with previous step before starting the next
__syncthreads();
int index = (tx + 1)*stride * 2 - 1;
if (index < 2 * BLOCK_SIZE)
XY[index] += XY[index - stride];
}
for (int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) {
//@@making sure that all threads in a block are done with previous step before starting the next
__syncthreads();
int index = (tx + 1)*stride * 2 - 1;
if (index + stride < 2 * BLOCK_SIZE)
XY[index + stride] += XY[index];
}
//@@making sure that all threads in a block are done with previous step before starting the next
__syncthreads();
if (i < len)
output[i] = XY[tx];
if (i + blockDim.x < len)
output[i + blockDim.x] = XY[tx + blockDim.x];
//@@storing the block sum to the aux array
if (aux != NULL && tx == 0)
aux[bx] = XY[2 * blockDim.x - 1];
} |
8,784 | #include "includes.h"
__global__ void d_addToCurrentTransform(float* d_currentTransform, float* d_invViewMatrix) {
float result[12] = {0.f};
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 4; ++j) {
for (int k = 0; k < 4; ++k) {
result[i * 4 + j] += d_invViewMatrix[i * 4 + k] * d_currentTransform[k * 4 + j];
}
}
}
for (int i = 0; i < 12; ++i) { // The last row of currentTransform remains (0,0,0,1)
d_currentTransform[i] = result[i];
}
} |
8,785 | extern "C"
__global__ void stdfilt(int *d_Data, int *d_Result, int dataW, int dataH, int kernel_nWidth, int kernel_nHeight)
{
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int ave = 0, ave_cnt = 0, dev = 0, dev_cnt = 0, tmp_dev = 0;
int inf = 1 / ave;
int kernel_mi = 0, kernel_mj = 0;
int offset_x = 0, offset_y = 0;
int mj = 0, mi = 0;
// int tid;
// tid = dataW*row + col;
if (row > dataH || col > dataW)
return;
kernel_mi = kernel_nHeight / 2;
kernel_mj = kernel_nWidth / 2;
for (mi = kernel_mi*-1; mi <= kernel_mi; mi += 1) {
for (mj = kernel_mj*-1; mj <= kernel_mj; mj += 1) {
offset_y = row + mi; // Ŀκ x,y ũⰡ offset
offset_x = col + mj;
if (offset_y < 0 || offset_y >= dataH) // y Ŀκ ۰ų ũ , - mi // ũ -mi ν ̹
offset_y = row + (mi*-1);
if (offset_x < 0 || offset_x >= dataW)
offset_x = col + (mj*-1);
//
//if (kernel->data[(kernel_mi + mi)*kernel->nWidth + kernel_mj + mj] != 1) //ʿ if
//{
// printf("this is first if\n");
// continue;
//}
//if ((offset_y*dst->nWidth) + offset_x >= src->nHeight*src->nWidth) //ʿ if
//{
// printf("this is second if\n");
// continue;
//}
ave += d_Data[(offset_y*dataW) + offset_x]; /// Ϳ ؼ հ
ave_cnt++;
}
}
ave /= ave_cnt;
for (mi = kernel_mi*-1; mi <= kernel_mi; mi += 1) { /// Ϳ ؼ հ
for (mj = kernel_mj*-1; mj <= kernel_mj; mj += 1) {
offset_y = row + mi;
offset_x = col + mj;
if (offset_y < 0 || offset_y >= dataH)
offset_y = row + (mi*-1);
if (offset_x < 0 || offset_x >= dataW)
offset_x = col + (mj*-1);
//if ((offset_y*dst->nWidth) + offset_x >= src->nHeight*src->nWidth) //ʿ if
//{
// printf("this is third if");
// continue;
//}
tmp_dev = (d_Data[(offset_y*dataW) + offset_x] - ave)*(d_Data[(offset_y*dataW) + offset_x] - ave); //
if (inf == tmp_dev)
tmp_dev = 0;
dev += tmp_dev;
dev_cnt += 1;
}
}
// tmp_dev = sqrt(dev/ (dev_cnt == kernel_nHeight*kernel_nWidth ? dev_cnt -= 1 : dev_cnt)); //ݿø
if (inf != tmp_dev)
d_Result[row*dataW + col] = tmp_dev;
// src[i*nWidth + j] = ave;
ave = 0;
ave_cnt = 0;
dev = 0;
dev_cnt = 0;
//d_Result[tid] = tid;
} |
8,786 | // nvcc -o Cuda_pi Cuda_pi.cu
// Run Unix: ./Cuda_pi
#include <stdio.h>
#include <math.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#define BLOCKSPERGRID 512
#define NUMTHREADS 8192
#define ITERATIONS 16e09
__global__ void calculatePi(double *piTotal, long int iterations, int totalThreads) {
long int initIteration, endIteration;
long int i = 0;
double piPartial = 0.0;
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
initIteration = (iterations / totalThreads) * index;
endIteration = initIteration + (iterations / totalThreads) - 1;
i = initIteration;
do {
piPartial = piPartial + (double)(4.0 / ((i * 2) + 1));
i++;
piPartial = piPartial - (double)(4.0 / ((i * 2) + 1));
i++;
} while(i < endIteration);
piTotal[index] = piPartial;
__syncthreads();
if(index == 0) {
for(i = 1; i < totalThreads; i++) {
piTotal[0] = piTotal[0] + piTotal[i];
}
}
}
int main() {
int blocksPerGrid, threadsPerBlock, i, size;
long int iterations;
int totalThreads;
double *h_pitotal, *d_pitotal;
blocksPerGrid = BLOCKSPERGRID;
cudaError_t err = cudaSuccess;
size = sizeof(double)*NUMTHREADS;
h_pitotal = (double *)malloc(size);
if (h_pitotal == NULL){
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for(i = 0; i < NUMTHREADS; i++) {
h_pitotal[i] = 0.0;
}
err = cudaMalloc((void **)&d_pitotal, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_pitotal, h_pitotal, sizeof(double)*NUMTHREADS, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Lanzar KERNEL
threadsPerBlock = NUMTHREADS/blocksPerGrid;
totalThreads = blocksPerGrid * threadsPerBlock;
iterations = ITERATIONS;
printf("CUDA kernel launch with %d blocks of %d threads Total: %i ", blocksPerGrid, threadsPerBlock, totalThreads );
calculatePi<<<blocksPerGrid, threadsPerBlock>>>(d_pitotal, iterations, totalThreads);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_pitotal, d_pitotal, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_pitotal);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Calculated pi: %.12f", *h_pitotal);
// Free host memory
free(h_pitotal);
err = cudaDeviceReset();
if (err != cudaSuccess){
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
} |
8,787 | #include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/copy.h>
struct function_object
{
__device__ float operator () (const float & x)
{
return 2.0 * x + 1.0;
}
};
int main()
{
/* 1) allcate */
thrust::host_vector < float > host_vec(3);
thrust::device_vector < float > device_input(3);
thrust::device_vector < float > device_output(3);
/* 2) initialize */
host_vec[0] = 1.1;
host_vec[1] = 3.3;
host_vec[2] = 2.2;
/* 3) copy host to device */
thrust::copy(host_vec.begin(), host_vec.end(), device_input.begin());
/* 4) transform devide_input to device_output */
thrust::transform(device_input.begin(), device_input.end(), device_output.begin(), function_object());
/* 5) copy device to host */
thrust::copy(device_output.begin(), device_output.end(), host_vec.begin());
std::cout << host_vec[0] << std::endl;
std::cout << host_vec[1] << std::endl;
std::cout << host_vec[2] << std::endl;
return 0;
}
|
8,788 | /*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年03月30日 星期三 13时37分15秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
const int gridSize=7,blockSize=1;
cudaError_t addWithCuda(int *sum);
__global__ void addKernel(int *sum)
{
int i = blockIdx.x*blockDim.x +threadIdx.x;
int j=0;
int tsum = 1;
//atomicAdd(sum,i);
//tsum = *tsum + i;
//atomicCAS(sum,tsum,*sum);
for(j=0;j<i;j++)
tsum *=10;
for(j=0;j<tsum;j++);
atomicAdd(sum,i);
printf("thread %d:tsum=%d,sum=%d\n",i,tsum,*sum);
}
int main()
{
int sum = 1;
cudaError_t cudaStatus;
int num = 0;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
cudaGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&sum);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("Final sum=%d\n",sum);
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(int *sum)
{
int *dev_sum = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_sum, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_sum,sum,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("addKernel<<<%d,%d>>>(%d)\n",gridSize,blockSize,*sum);
addKernel<<<gridSize,blockSize>>>(dev_sum);
cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(sum,dev_sum,sizeof(int),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_sum);
return cudaStatus;
}
|
8,789 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <algorithm>
using namespace std;
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <sys/time.h>
#include <unistd.h>
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
#define NUM_THREADS 1024
int features = 1024;
int sampels = 10000;
int classes = 10;
float ** training_x1; //3500 * 784
float ** training_x; //3500 * 784
float ** training_y; //3500 * 1
float ** testing_x; //145 * 784
float ** testing_y; //145 * 1
float ** label_onehot; //3500 * 10
void getData(float * res, char buff[])
{
char *token = strtok(buff," ,");
int counter=0;
while( token != NULL )
{
counter++;
res[counter-1] = atof(token);
token = strtok(NULL," ,");
}
}
void readCSV(char* file, float** mat, int x_dim, int y_dim)
{
FILE* stream = fopen(file, "r");
int size_per_pic = y_dim * 30;
char line[size_per_pic];
int num;
if (stream == NULL) {
perror ("Error opening file");
return;
}
int i = 0;
while (fgets(line, size_per_pic, stream))
{
char* tmp = strdup(line);
getData(mat[i], tmp);
i++;
}
}
void malloc_host(void){
training_x1 = (float**)malloc(sizeof(float*) * 10000);
for(int i = 0; i < 10000; i++){
training_x1[i] = (float*)malloc(sizeof(float) * 1024);
}
training_x = (float**)malloc(sizeof(float*) * 10000);
for(int i = 0; i < 10000; i++){
training_x[i] = (float*)malloc(sizeof(float) * 1024);
}
training_y = (float**)malloc(sizeof(float*) * 10000);
for(int i = 0; i < 10000; i++){
training_y[i] = (float*)malloc(sizeof(float) * 1);
}
testing_x = (float **)malloc(sizeof(float*) * 2000);
for(int i = 0; i < 2000; i++){
testing_x[i] = (float*)malloc(sizeof(float) * 1024);
}
testing_y = (float **)malloc(sizeof(float*) * 2000);
for(int i = 0; i < 2000; i++){
testing_y[i] = (float*)malloc(sizeof(float) * 1);
}
label_onehot = (float **)malloc(sizeof(float*) * 10000);
for (int i = 0; i < 10000; i++)
{
label_onehot[i] = (float*)malloc(sizeof(float) * 10);
}
}
__global__ void Mult_GPU( float *a, float *b, float *result, const int M, const int N, const int S) // M should be batch size
{
int threadId = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (threadId < M * S)
{
int row = threadId / S;
int column = threadId % S;
float temp=0;//reduce global mem access number
result[threadId] = 0;
for (int i = 0; i < N; i++)
{
//result[threadId] += a[row * N + i] * b[i * S + column];
temp += a[row * N + i] * b[i * S + column];
}
result[threadId]=temp;
}
}
__global__ void softmax_sum( float *predict, float *sum, const int label_size, const int data_size ){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < data_size){
float temp=0;
for(int i = 0; i < label_size; i++){
temp += exp(predict[tid * label_size + i]);
}
sum[tid]=temp;
}
}
__global__ void max( float *predict, float *max, const int label_size, const int data_size ){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < data_size){
for(int i = 0; i < label_size; i++){
int max_index = 0;
max[tid] = predict[tid * label_size];
if(predict[tid * label_size + max_index] < predict[tid * label_size + i]){
max[tid] = predict[tid * label_size + i];
}
}
}
}
__global__ void normalize(float *predict, float *max, const int label_size, const int data_size){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < data_size){
for(int i = 0; i < label_size; i++){
predict[tid * label_size + i] -= max[tid];
}
}
}
__global__ void softmax( float *softmax_value, float *predict, float *sum,const int label_size, const int data_size ){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < data_size){
for(int i = 0; i < label_size; i++){
softmax_value[tid * label_size + i] = exp(predict[tid * label_size + i]) / sum[tid];
}
}
}
__global__ void dz(float *softmax_value, float *label, float *dz, const int label_size, const int data_size){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < data_size){
for(int i = 0; i < label_size; i++){
dz[tid * label_size + i] = softmax_value[tid * label_size + i] - label[tid * label_size + i];
}
}
}
__global__ void grad(float *train_data, float *dz, float *grad, const int label_size, const int data_size, const int weight_size){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < weight_size){
for(int i = 0; i < label_size; i++){
float temp = grad[tid * label_size + i];
for(int j = 0; j < data_size; j++){
// grad[tid * label_size + i] += train_data[j * weight_size + tid] * dz[j * label_size + i];
temp += train_data[j * weight_size + tid] * dz[j * label_size + i];
}
grad[tid * label_size + i] = temp;
}
}
}
__global__ void weight_update(float *weight, float *grad, const int label_size, const int weight_size, const float learning_rate){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < weight_size){
for(int i = 0; i < label_size; i++){
grad[tid * label_size + i] /= 100;
weight[tid * label_size + i] -= (learning_rate * grad[tid * label_size + i]);
}
}
}
__global__ void initialize_dz(float *dz, const int label_size, const int data_size){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < data_size){
for(int i = 0; i < label_size; i++){
dz[tid * label_size + i] = 0;
}
}
}
__global__ void initialize_grad(float *grad, const int label_size, const int weight_size){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < weight_size){
for(int i = 0; i < label_size; i++){
grad[tid * label_size + i] = 0;
}
}
}
__global__ void initialize(float *sum, float *predict, const int data_size, const int label_size){
int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if (tid < data_size){
sum[tid] = 0;
for(int i = 0; i < label_size; i++){
predict[tid * label_size + i] = 0;
}
}
}
int randint(int l,int u)
{
int temp;
srand((unsigned)time(NULL));
temp = floor(l + (1.0*rand()/RAND_MAX)*(u - l + 1 ));
return temp;
}
void random_shuffle(float *data, float *label){
int len = 10000;
for (int i = 0 ; i < len; i++) {
int rand = randint(i, len - 1);
// swap
for(int j = 0; j < 1024; j++){
//swap(data[i][j], arr[rand][j]);
swap(data[i * 1024 + j], data[rand * 1024 + j]);
}
for(int k = 0; k < 10; k++){
//swap(data[i][j], arr[rand][j]);
swap(label[i * 10 + k], label[rand * 10 + k]);
}
}
}
void data_transpose(float *data1, float *data2){
int batch_size = 200;
int weight_size = 1024;
int label_size = 10;
for(int i = 0; i < batch_size; i++){
for(int j = 0; j < weight_size; j++){
data2[j * batch_size+ i] = data1[i * weight_size + j];
}
}
}
void devide_data(float *data1, float *data2, float *label1, float *label2, int index){
int batch_size = 200;
int weight_size = 1024;
int label_size = 10;
for(int i = 0; i < batch_size; i++){
for(int j = 0; j < weight_size; j++){
data1[i * weight_size + j] = data2[(index * batch_size + i) * weight_size + j];
}
}
for(int i = 0; i < batch_size; i++){
for(int j = 0; j < label_size; j++){
label1[i * label_size + j] = label2[(index * batch_size + i) * label_size + j];
}
}
}
int main(){
// Stream
cudaDeviceProp prop;
int deviceID;
cudaGetDevice(&deviceID);
cudaGetDeviceProperties(&prop, deviceID);
if (!prop.deviceOverlap)
{
printf("No device will handle overlaps. so no speed up from stream.\n");
return 0;
}
//创建一个CUDA流
cudaStream_t stream1;
cudaStreamCreate(&stream1);
cudaStream_t stream2;
cudaStreamCreate(&stream2);
// malloc_host();
malloc_host();
readCSV("training_x.csv", training_x, 10000,1024);
readCSV("training_y.csv", training_y, 1024, 1);
readCSV("testing_x.csv", testing_x, 2000, 1024);
readCSV("testing_y.csv", testing_y, 2000, 1);
readCSV("training_x.csv", training_x1, 10000,1024);
float learning_rate = 0.1;
int iter = 1;
int batch_size = 200;
int epochs = 50;
int data_size = 10000;
int label_size = 10;
int weight_size = 1024;
int train_data_bytes = 10000 * 1024 * sizeof(float);
int batch_data_bytes = 200 * 1024 * sizeof(float);
int weight_bytes = 1024 * 10 * sizeof(float);
int predict_bytes = 10000 * 10 * sizeof(float);
int batch_predict_bytes = 200 * 10 * sizeof(float);
float *h_train_data = (float *) malloc( train_data_bytes ) ;
float *h_train_data_T = (float *) malloc( train_data_bytes ) ;
float *h_batch_data = (float *) malloc( batch_data_bytes / 2) ;
float *h_batch_data_T = (float *) malloc( batch_data_bytes ) ;
float *h_label_onehot = (float *) malloc( predict_bytes ) ;
float *h_batch_label = (float *) malloc( batch_predict_bytes ) ;
float *h_weight = (float *) malloc( weight_bytes ) ;
float *h_predict = (float *) malloc( batch_predict_bytes / 2 ) ;
float *h_max = (float *) malloc( 100 * sizeof(float) ) ;
float *h_sum = (float *) malloc( 100 * sizeof(float) ) ;
float *h_softmax = (float *) malloc( batch_predict_bytes ) ;
float *h_dz = (float *) malloc( batch_predict_bytes ) ;
float *h_grad = (float *) malloc( weight_bytes ) ;
////////////////////// Initialize //////////////////////
////////////////////// One Hot //////////////////////
for(int i = 0; i < data_size; i++){
for(int j = 0; j < weight_size; j++){
h_train_data_T[j * 10000 + i] = training_x[i][j];
}
}
for(int i = 0; i < data_size; i++){
label_onehot[i][(int(training_y[i][0] - 1))] = 1;
}
for(int i = 0; i < data_size; i++){
for(int j = 0; j < label_size; j++){
h_label_onehot[i * label_size + j] = label_onehot[i][j];
}
}
for(int i = 0; i < data_size; i++){
for(int j = 0; j < weight_size; j++){
h_train_data[i * weight_size + j] = training_x[i][j];
}
}
for(int i = 0; i < weight_size; i++){
for(int j = 0; j < label_size; j++){
h_weight[i * label_size + j] = 1 ;
}
}
//////////////////// Initialize //////////////////////
///////////////////////////////// GPU_SIDE ///////////////////////////////////
float *d_train_data,* d_weight, *d_predict1, *d_predict_sum1, *d_sum1, *d_max1, *d_softmax_value1, *d_dz1, *d_grad1;
float *d_predict2, *d_predict_sum2, *d_sum2, *d_max2, *d_softmax_value2, *d_dz2, *d_grad2;
float *d_batch_data1, *d_batch_label1, *d_batch_data2, *d_batch_label2;
cudaGetErrorString(cudaMalloc( (void **) &d_train_data, train_data_bytes )) ;
cudaGetErrorString(cudaMalloc( (void **) &d_weight, weight_bytes)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_batch_data1, batch_data_bytes / 2 )) ;
cudaGetErrorString(cudaMalloc( (void **) &d_batch_label1, batch_predict_bytes / 2 )) ;
cudaGetErrorString(cudaMalloc( (void **) &d_batch_data2, batch_data_bytes / 2 )) ;
cudaGetErrorString(cudaMalloc( (void **) &d_batch_label2, batch_predict_bytes / 2 )) ;
cudaGetErrorString(cudaMalloc( (void **) &d_predict1, batch_predict_bytes / 2)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_predict2, batch_predict_bytes / 2)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_sum1, 100 * sizeof(float))) ;
cudaGetErrorString(cudaMalloc( (void **) &d_softmax_value1, batch_predict_bytes / 2)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_dz1, batch_predict_bytes / 2)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_grad1, weight_bytes)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_max1, 100 * sizeof(float))) ;
cudaGetErrorString(cudaMalloc( (void **) &d_sum2, 100 * sizeof(float))) ;
cudaGetErrorString(cudaMalloc( (void **) &d_softmax_value2, batch_predict_bytes / 2)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_dz2, batch_predict_bytes / 2)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_grad2, weight_bytes)) ;
cudaGetErrorString(cudaMalloc( (void **) &d_max2, 100 * sizeof(float))) ;
// //Configure blockDim
int bdx = 32, bdy = 32;
while(data_size > bdx * 65535)
{
bdx = bdx * 2;
bdy = bdy / 2;
}
while(weight_size > bdy * 65535)
{
bdy = bdy * 2;
bdx = bdx / 2;
}
dim3 blockDim( bdx,bdy ) ; // you will want to configure this
dim3 gridDim( (int)((data_size + blockDim.x-1)/blockDim.x), (int)((weight_size + blockDim.y-1)/blockDim.y) ) ;
//////////////////////////////// invoke Kernel (Logistic Regression) ////////////////////////////////
double timeStamp1 = getTimeStamp() ;
for(int train = 0; train < 1; train++){
//////////////////////Random shuffle data/////////////////////////////
random_shuffle(h_train_data, h_label_onehot);
for(int epoch = 0; epoch < epochs; epoch++){
////////////////////// Transfer data ////////////////////////////
devide_data(h_batch_data, h_train_data, h_batch_label, h_label_onehot, epoch);
cudaGetErrorString(cudaMemcpyAsync( d_weight, h_weight, weight_bytes, cudaMemcpyHostToDevice)) ;
cudaGetErrorString(cudaDeviceSynchronize());
cudaGetErrorString(cudaMemcpyAsync( d_batch_data1, h_batch_data, batch_data_bytes / 2, cudaMemcpyHostToDevice, stream1 )) ;
cudaGetErrorString(cudaMemcpyAsync( d_batch_label1, h_batch_label, batch_predict_bytes / 2, cudaMemcpyHostToDevice, stream1 )) ;
cudaGetErrorString(cudaMemcpyAsync( d_batch_data2 , h_batch_data + 100 * 1024, batch_data_bytes / 2, cudaMemcpyHostToDevice, stream2 )) ;
cudaGetErrorString(cudaMemcpyAsync( d_batch_label2, h_batch_label + 100 * 10, batch_predict_bytes / 2, cudaMemcpyHostToDevice, stream2 )) ;
////////////////////// Computation ///////////////////////////
//Initialize
initialize<<<gridDim, blockDim, 0, stream1>>>(d_sum1, d_predict1, batch_size / 2 , label_size);
initialize_dz<<<gridDim, blockDim, 0, stream1>>>(d_dz1, label_size, batch_size / 2);
initialize_grad<<<gridDim, blockDim, 0, stream1>>>(d_grad1, label_size, weight_size);
initialize<<<gridDim, blockDim, 0, stream2>>>(d_sum2, d_predict2, batch_size / 2, label_size);
initialize_dz<<<gridDim, blockDim, 0, stream2>>>(d_dz2, label_size, batch_size / 2);
initialize_grad<<<gridDim, blockDim, 0, stream2>>>(d_grad2, label_size, weight_size);
// //DOT
Mult_GPU<<<gridDim, blockDim, 0, stream1>>>( d_batch_data1, d_weight, d_predict1, batch_size / 2, weight_size, label_size) ;
Mult_GPU<<<gridDim, blockDim, 0, stream2>>>( d_batch_data2, d_weight, d_predict2, batch_size / 2, weight_size, label_size) ;
max<<<gridDim, blockDim, 0, stream1>>>( d_predict1, d_max1, label_size, batch_size / 2 );
max<<<gridDim, blockDim, 0, stream2>>>( d_predict2, d_max2, label_size, batch_size / 2 );
normalize<<<gridDim, blockDim, 0, stream1>>>(d_predict1, d_max1, label_size, batch_size / 2 );
normalize<<<gridDim, blockDim, 0, stream2>>>(d_predict2, d_max2, label_size, batch_size / 2 );
// // Softmax
softmax_sum<<<gridDim, blockDim, 0, stream1>>>( d_predict1, d_sum1, label_size, batch_size / 2 );
softmax<<<gridDim, blockDim, 0, stream1>>>( d_softmax_value1, d_predict1, d_sum1, label_size, batch_size / 2 );
softmax_sum<<<gridDim, blockDim, 0, stream2>>>( d_predict2, d_sum2, label_size, batch_size / 2 );
softmax<<<gridDim, blockDim, 0, stream2>>>( d_softmax_value2, d_predict2, d_sum2, label_size, batch_size / 2 );
// // Weight Update
dz<<<gridDim, blockDim, 0, stream1>>>(d_softmax_value1, d_batch_label1, d_dz1, label_size, batch_size / 2 );
grad<<<gridDim, blockDim, 0, stream1>>>(d_batch_data1, d_dz1, d_grad1, label_size, batch_size / 2 , weight_size);
dz<<<gridDim, blockDim, 0, stream2>>>(d_softmax_value2, d_batch_label2, d_dz2, label_size, batch_size / 2 );
grad<<<gridDim, blockDim, 0, stream2>>>(d_batch_data2, d_dz2, d_grad2, label_size, batch_size / 2 , weight_size);
//等待Stream流执行完成
// cudaStreamSynchronize(stream1);
// cudaStreamSynchronize(stream2);
weight_update<<<gridDim, blockDim, 0, stream1>>>(d_weight, d_grad1, label_size, weight_size, learning_rate);
weight_update<<<gridDim, blockDim, 0, stream2>>>(d_weight, d_grad2, label_size, weight_size, learning_rate);
cudaGetErrorString(cudaDeviceSynchronize());
}
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
// double timeStamp2 = getTimeStamp() ;
cudaGetErrorString(cudaMemcpyAsync( h_weight, d_weight, weight_bytes, cudaMemcpyDeviceToHost)) ;
for(int i = 0; i < weight_size; i++){
for(int j = 0; j < label_size; j++){
printf("h_weight: %f\n", h_weight[i * label_size + j]);
}
}
// cudaStreamDestroy(stream1);
// cudaStreamDestroy(stream2);
// ///////////////////////////// Test Case /////////////////////////////////////
// cudaGetErrorString(cudaMemcpyAsync( h_sum, d_sum1, 100 * sizeof(float), cudaMemcpyDeviceToHost)) ;
// for(int i = 0; i < 100; i++){
// printf("sum : %f\n", h_sum[i]);
// }
// cudaGetErrorString(cudaMemcpyAsync( h_predict, d_softmax_value2, batch_predict_bytes / 2, cudaMemcpyDeviceToHost)) ;
// for(int i = 0; i < 100; i++){
// for(int j = 0; j < label_size; j++){
// printf(" h_predict: %f\n", h_predict[i * label_size + j]);
// }
// }
// cudaGetErrorString(cudaMemcpyAsync( h_batch_data, d_batch_data2, batch_data_bytes / 2, cudaMemcpyDeviceToHost)) ;
// for(int i = 0; i < 100; i++){
// for(int j = 0; j < weight_size; j++){
// printf(" h_train_data: %f\n", h_batch_data[i * weight_size + j]);
// }
// }
// float *h_label = (float *) malloc( batch_predict_bytes / 2 ) ;
// cudaGetErrorString(cudaMemcpyAsync( h_label, d_batch_label1, batch_predict_bytes / 2, cudaMemcpyDeviceToHost)) ;
// for(int i = 0; i < batch_size / 2; i++){
// for(int j = 0; j < label_size; j++){
// printf("label %f\n", h_label[i * label_size + j]);
// }
// }
//////////////////////////////// Data Transfer Success ////////////////////////////////
// cudaGetErrorString(cudaMemcpyAsync( h_weight, d_weight, weight_bytes, cudaMemcpyDeviceToHost)) ;
// for(int i = 0; i < weight_size; i++){
// for(int j = 0; j < label_size; j++){
// printf("h_weight: %f\n", h_weight[i * label_size + j]);
// }
// }
// printf("%.6f\n", timeStamp2-timeStamp1);
// // Test case
// for(int i = 0; i < data_size; i++){
// for(int j = 0; j < weight_size; j++){
// h_train_data[i * weight_size + j] = training_x1[i][j];
// //printf(" h_train_data: %f\n", h_train_data[i * label_size + j]);
// }
// }
// float *h_test_predict = (float *) malloc( predict_bytes ) ;
// float *d_test_predict, *d_test_max;
// cudaGetErrorString(cudaMalloc( (void **) &d_test_predict, predict_bytes)) ;
// cudaGetErrorString(cudaMalloc( (void **) &d_test_max, 10000 * sizeof(float))) ;
// cudaGetErrorString(cudaMemcpy( d_train_data, h_train_data, train_data_bytes, cudaMemcpyHostToDevice )) ;
// cudaGetErrorString(cudaMemcpy( d_weight, h_weight, weight_bytes, cudaMemcpyHostToDevice )) ;
// Mult_GPU<<<gridDim, blockDim>>>( d_train_data, d_weight, d_test_predict, data_size, weight_size, label_size) ;
// cudaGetErrorString(cudaDeviceSynchronize());
// // max<<<gridDim, blockDim>>>( d_test_predict, d_test_max, label_size, data_size );
// // cudaGetErrorString(cudaDeviceSynchronize());
// // normalize<<<gridDim, blockDim>>>(d_test_predict, d_test_max, label_size, data_size);
// // cudaGetErrorString(cudaDeviceSynchronize());
// cudaGetErrorString(cudaMemcpy( h_test_predict, d_test_predict, predict_bytes, cudaMemcpyDeviceToHost )) ;
// for(int i = 0; i < 10000; i++){
// for(int j = 0; j < 10; j++){
// if(i == 0 )printf("h_predict: %f\n", h_predict[i * label_size + j]);
// }
// }
///////////////////////// Error ///////////////////////////////
// float total_error = 0;
// for(int i = 0; i < batch_size; i++){
// for(int j = 0; j < label_size; j++){
// total_error -= label_onehot[i][j] * log(h_softmax[i * label_size + j]) ;
// }
// }
// printf("error: %f\n", total_error );
} |
8,790 | #define X_BLOCK 1024
#define PITCH 4096
extern "C"
__global__ void DoRows(float *constant,float *input0,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967291) : 4095) : 0))]*3.5482936e-2)+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967292) : 4095) : 0))]*5.850147e-2))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967293) : 4095) : 0))]*8.63096e-2))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967294) : 4095) : 0))]*0.113945305))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+4294967295) : 4095) : 0))]*0.13461047))+(input0[((blockIdx.y*PITCH)+(threadIdx.x+(blockIdx.x*X_BLOCK)))]*0.14230047))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+1))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+1)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+1) : 4095) : 0))]*0.13461047))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+2))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+2)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+2) : 4095) : 0))]*0.113945305))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+3))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+3)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+3) : 4095) : 0))]*8.63096e-2))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+4))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+4)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+4) : 4095) : 0))]*5.850147e-2))+(input0[((blockIdx.y*PITCH)+((((int )((threadIdx.x+(blockIdx.x*X_BLOCK))+5))>=0) ? ((((threadIdx.x+(blockIdx.x*X_BLOCK))+5)<4096) ? ((threadIdx.x+(blockIdx.x*X_BLOCK))+5) : 4095) : 0))]*3.5482936e-2));
}
#undef X_BLOCK
#undef PITCH
#define X_BLOCK 1024
#define PITCH 4096
extern "C"
__global__ void DoCols(float *constant,float *input1,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = (((((((((((input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+4294967291))>=0) ? (((blockIdx.y+4294967291)<8192) ? (blockIdx.y+4294967291) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*3.5482936e-2)+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+4294967292))>=0) ? (((blockIdx.y+4294967292)<8192) ? (blockIdx.y+4294967292) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*5.850147e-2))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+4294967293))>=0) ? (((blockIdx.y+4294967293)<8192) ? (blockIdx.y+4294967293) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*8.63096e-2))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+4294967294))>=0) ? (((blockIdx.y+4294967294)<8192) ? (blockIdx.y+4294967294) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*0.113945305))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+4294967295))>=0) ? (((blockIdx.y+4294967295)<8192) ? (blockIdx.y+4294967295) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*0.13461047))+(input1[((blockIdx.y*PITCH)+(threadIdx.x+(blockIdx.x*X_BLOCK)))]*0.14230047))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+1))>=0) ? (((blockIdx.y+1)<8192) ? (blockIdx.y+1) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*0.13461047))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+2))>=0) ? (((blockIdx.y+2)<8192) ? (blockIdx.y+2) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*0.113945305))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+3))>=0) ? (((blockIdx.y+3)<8192) ? (blockIdx.y+3) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*8.63096e-2))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+4))>=0) ? (((blockIdx.y+4)<8192) ? (blockIdx.y+4) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*5.850147e-2))+(input1[((blockIdx.y*PITCH)+(((threadIdx.x+(blockIdx.x*X_BLOCK))+(((((int )(blockIdx.y+5))>=0) ? (((blockIdx.y+5)<8192) ? (blockIdx.y+5) : 8191) : 0)*4096))-(blockIdx.y*4096)))]*3.5482936e-2));
}
#undef X_BLOCK
#undef PITCH
|
8,791 | #include <stdio.h>
#define N (65536*2048)
#define TPB 512
__global__ void mykernel(int *da, int *db, int *dc) {
int index=threadIdx.x+blockIdx.x*blockDim.x;
dc[index] = da[index]+db[index];
}
int main(void)
{
int *a = new int [N];
int *b = new int [N];
int *c = new int [N];
int *da, *db, *dc;
int size = sizeof(int);
for (int i=0; i < N; i++) {
a[i]=i;
b[i]=i+1;
}
cudaMalloc((void **)&da, size*N);
cudaMalloc((void **)&db, size*N);
cudaMalloc((void **)&dc, size*N);
cudaMemcpy(da, a, size*N, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, size*N, cudaMemcpyHostToDevice);
int M = N/TPB;
mykernel<<<M, TPB>>>(da, db, dc);
cudaMemcpy(c, dc, size*N, cudaMemcpyDeviceToHost);
//printf("hellow World!");
for (int i=0; i < 10; i++){
//printf("%d\t", c[i]);
}
free(a); free(b); free(c);
cudaFree(da); cudaFree(db); cudaFree(dc);
return 0;
}
|
8,792 | #include<stdio.h>
#include <bits/stdc++.h>
#include<cuda.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
using namespace std;
void get_adj_matrix(float* graph, int n, float d, FILE *inputFilePtr ){
if ( inputFilePtr == NULL ) {
printf( "input.txt file failed to open." );
return ;
}
int m, indexing;
fscanf(inputFilePtr, "%d", &m);
fscanf(inputFilePtr, "%d", &indexing);
for(int i = 0; i< n ; i++){
for(int j = 0; j< n; ++j){
graph[i* n + j] = (1 - d)/float(n);
}
}
while(m--){
int source, destin;
fscanf(inputFilePtr, "%d", &source);
fscanf(inputFilePtr, "%d", &destin);
if (indexing == 0){
graph[destin* n + source] += d* 1.0 ;
}
else{
graph[(destin - 1)* n + source - 1] += d* 1.0;
}
}
}
__global__ void manage_adj_matrix(float* gpu_graph, int n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < n){
float sum = 0.0;
for (int i = 0; i< n; ++i){
sum += gpu_graph[i* n + id];
}
for (int i = 0; i < n; ++i){
if (sum != 0.0){
gpu_graph[i* n + id] /= sum;
}
else{
gpu_graph[i* n + id] = (1/(float)n);
}
}
}
}
__global__ void initialize_rank(float* gpu_r, int n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < n){
gpu_r[id] = (1/(float)n);
}
}
__global__ void store_rank(float* gpu_r,float* gpu_r_last, int n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < n){
gpu_r_last[id] = gpu_r[id];
}
}
__global__ void matmul(float* gpu_graph, float* gpu_r, float* gpu_r_last, int n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < n){
float sum = 0.0;
for (int j = 0; j< n; ++j){
sum += gpu_r_last[j] * gpu_graph[id* n + j];
}
gpu_r[id] = sum;
}
}
__global__ void rank_diff(float* gpu_r,float* gpu_r_last, int n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < n){
gpu_r_last[id] = abs(gpu_r_last[id] - gpu_r[id]);
}
}
__global__ void init_pair_array(pair<float, int>* gpu_r_nodes, float * gpu_r, int n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < n){
gpu_r_nodes[id].first = gpu_r[id];
gpu_r_nodes[id].second = id + 1;
}
}
void power_method(float *graph, float *r, int n, int nblocks, int BLOCKSIZE, int max_iter = 1000, float eps = 0.000001 ){
float* r_last = (float*) malloc(n * sizeof(float));
float* gpu_graph;
cudaMalloc(&gpu_graph, sizeof(float)*n*n);
cudaMemcpy(gpu_graph, graph, sizeof(float)*n*n, cudaMemcpyHostToDevice);
float* gpu_r;
cudaMalloc(&gpu_r, sizeof(float)*n);
//cudaMemcpy(gpu_r, r, sizeof(float)*n, cudaMemcpyHostToDevice);
float* gpu_r_last;
cudaMalloc(&gpu_r_last, sizeof(float)*n);
//cudaMemcpy(gpu_r_last, r_last, sizeof(float)*n, cudaMemcpyHostToDevice);
initialize_rank<<<nblocks, BLOCKSIZE>>>(gpu_r, n);
cudaDeviceSynchronize();
while(max_iter--){
store_rank<<<nblocks, BLOCKSIZE>>>(gpu_r, gpu_r_last, n);
cudaDeviceSynchronize();
matmul<<<nblocks, BLOCKSIZE>>>(gpu_graph, gpu_r, gpu_r_last, n);
cudaDeviceSynchronize();
rank_diff<<<nblocks, BLOCKSIZE>>>(gpu_r, gpu_r_last, n);
cudaDeviceSynchronize();
cudaMemcpy(r_last, gpu_r_last, n* sizeof(float), cudaMemcpyDeviceToHost);
float result = thrust::reduce( r_last, r_last + n);
if(result < eps){
cudaMemcpy(r, gpu_r, n* sizeof(float), cudaMemcpyDeviceToHost);
return;
}
}
cudaMemcpy(r, gpu_r, n* sizeof(float), cudaMemcpyDeviceToHost);
return;
}
void top_nodes(float* r, int n, int nblocks, int BLOCKSIZE, int count = 10){
pair<float, int> *r_nodes = (pair<float, int> *) malloc ( n * sizeof (pair<float, int>) );
pair<float, int> *gpu_r_nodes;
cudaMalloc(&gpu_r_nodes, n * sizeof (pair<float, int>));
float* gpu_r;
cudaMalloc(&gpu_r, sizeof(float)*n);
cudaMemcpy(gpu_r, r, sizeof(float)*n, cudaMemcpyHostToDevice);
init_pair_array<<<nblocks, BLOCKSIZE>>>(gpu_r_nodes, gpu_r, n);
cudaMemcpy(r_nodes, gpu_r_nodes, n * sizeof (pair<float, int>), cudaMemcpyDeviceToHost);
thrust::sort(thrust::host, r_nodes, r_nodes + n);
int rank =1;
while(rank <= count){
printf("Rank %d Node is %d\n", rank, r_nodes[n - rank].second);
rank++;
}
}
int main(int argc, char** argv){
clock_t start, end;
FILE *inputFilePtr;
char * inputfile = argv[1];
char * bsize = argv[2];
int BLOCKSIZE = atoi(bsize);
inputFilePtr = fopen(inputfile, "r");
int n;
fscanf(inputFilePtr, "%d", &n);
int nblocks = ceil(float(n) / BLOCKSIZE);
float* graph = (float*)malloc(n*n*sizeof(float));
float* r = (float*) malloc(n * sizeof(float));
float d = 0.85;
get_adj_matrix(graph, n, d, inputFilePtr);
float* gpu_graph;
cudaMalloc(&gpu_graph, sizeof(float)*n*n);
cudaMemcpy(gpu_graph, graph, sizeof(float)*n*n, cudaMemcpyHostToDevice);
start = clock();
manage_adj_matrix<<<nblocks, BLOCKSIZE>>>(gpu_graph, n);
cudaMemcpy(graph, gpu_graph, sizeof(float)*n*n, cudaMemcpyDeviceToHost);
power_method(graph, r, n, nblocks, BLOCKSIZE );
top_nodes(r, n, nblocks, BLOCKSIZE);
end = clock();
printf("Time taken :%f for parallel implementation with %d nodes.\n", float(end - start), n);
return 0;
} |
8,793 | // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gm19_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gm19_BLOCKS 512
#define gm19_THREADS 128
#define gm19_ARRAY_SECTIONS (gm19_BLOCKS*gm19_THREADS/32)
#define gm19_g 524287
#define gm19_halfg 262144
#define gm19_k 15
#define gm19_q 28
#define gm19_qg 14680036
unsigned gm19_PPPP[12] __attribute__ ((aligned(16))) =
{1048574,1048574,1048574,1048574,524286,524286,524286,524286,524287,524287,524287,524287};
typedef struct{
unsigned xN[32] __attribute__ ((aligned(16))),
xP[32] __attribute__ ((aligned(16)));
} gm19_state;
typedef gm19_state gm19_sse_state;
__host__ unsigned int gm19_sse_generate_(gm19_sse_state* state){
unsigned output1; unsigned output2 __attribute__ ((unused));
asm volatile("movaps (%4),%%xmm7\n" \
"movaps 32(%4),%%xmm6\n" \
"movaps (%2),%%xmm0\n" \
"movaps (%3),%%xmm1\n" \
"movaps %%xmm0,(%3)\n" \
"movaps %%xmm0,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm0\n" \
"psubd %%xmm1,%%xmm0\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm0,%%xmm0\n" \
"paddd %%xmm0,%%xmm0\n" \
"paddd %%xmm0,%%xmm0\n" \
"paddd %%xmm0,%%xmm0\n" \
"psubd %%xmm2,%%xmm0\n" \
"paddd %%xmm1,%%xmm0\n" \
"movaps %%xmm0,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm0\n" \
"paddd %%xmm1,%%xmm0\n" \
"movaps %%xmm0,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm0\n" \
"movaps %%xmm0,(%2)\n" \
"movaps 16(%2),%%xmm3\n" \
"movaps 16(%3),%%xmm1\n" \
"movaps %%xmm3,16(%3)\n" \
"movaps %%xmm3,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm3\n" \
"psubd %%xmm1,%%xmm3\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm3,%%xmm3\n" \
"paddd %%xmm3,%%xmm3\n" \
"paddd %%xmm3,%%xmm3\n" \
"paddd %%xmm3,%%xmm3\n" \
"psubd %%xmm2,%%xmm3\n" \
"paddd %%xmm1,%%xmm3\n" \
"movaps %%xmm3,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm3\n" \
"paddd %%xmm1,%%xmm3\n" \
"movaps %%xmm3,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm3\n" \
"movaps %%xmm3,16(%2)\n" \
"movaps 32(%2),%%xmm4\n" \
"movaps 32(%3),%%xmm1\n" \
"movaps %%xmm4,32(%3)\n" \
"movaps %%xmm4,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm4\n" \
"psubd %%xmm1,%%xmm4\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm4,%%xmm4\n" \
"paddd %%xmm4,%%xmm4\n" \
"paddd %%xmm4,%%xmm4\n" \
"paddd %%xmm4,%%xmm4\n" \
"psubd %%xmm2,%%xmm4\n" \
"paddd %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm4\n" \
"paddd %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm4\n" \
"movaps %%xmm4,32(%2)\n" \
"movaps 48(%2),%%xmm5\n" \
"movaps 48(%3),%%xmm1\n" \
"movaps %%xmm5,48(%3)\n" \
"movaps %%xmm5,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm5\n" \
"psubd %%xmm1,%%xmm5\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm5,%%xmm5\n" \
"paddd %%xmm5,%%xmm5\n" \
"paddd %%xmm5,%%xmm5\n" \
"paddd %%xmm5,%%xmm5\n" \
"psubd %%xmm2,%%xmm5\n" \
"paddd %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm5\n" \
"paddd %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm5\n" \
"movaps %%xmm5,48(%2)\n" \
"psrld $18,%%xmm0\n" \
"psrld $18,%%xmm3\n" \
"psrld $18,%%xmm4\n" \
"psrld $18,%%xmm5\n" \
"packssdw %%xmm3,%%xmm0\n" \
"packssdw %%xmm5,%%xmm4\n" \
"packsswb %%xmm4,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%0\n" \
"movaps 64(%2),%%xmm0\n" \
"movaps 64(%3),%%xmm1\n" \
"movaps %%xmm0,64(%3)\n" \
"movaps %%xmm0,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm0\n" \
"psubd %%xmm1,%%xmm0\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm0,%%xmm0\n" \
"paddd %%xmm0,%%xmm0\n" \
"paddd %%xmm0,%%xmm0\n" \
"paddd %%xmm0,%%xmm0\n" \
"psubd %%xmm2,%%xmm0\n" \
"paddd %%xmm1,%%xmm0\n" \
"movaps %%xmm0,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm0\n" \
"paddd %%xmm1,%%xmm0\n" \
"movaps %%xmm0,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm0\n" \
"movaps %%xmm0,64(%2)\n" \
"movaps 80(%2),%%xmm3\n" \
"movaps 80(%3),%%xmm1\n" \
"movaps %%xmm3,80(%3)\n" \
"movaps %%xmm3,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm3\n" \
"psubd %%xmm1,%%xmm3\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm3,%%xmm3\n" \
"paddd %%xmm3,%%xmm3\n" \
"paddd %%xmm3,%%xmm3\n" \
"paddd %%xmm3,%%xmm3\n" \
"psubd %%xmm2,%%xmm3\n" \
"paddd %%xmm1,%%xmm3\n" \
"movaps %%xmm3,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm3\n" \
"paddd %%xmm1,%%xmm3\n" \
"movaps %%xmm3,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm3\n" \
"movaps %%xmm3,80(%2)\n" \
"movaps 96(%2),%%xmm4\n" \
"movaps 96(%3),%%xmm1\n" \
"movaps %%xmm4,96(%3)\n" \
"movaps %%xmm4,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm4\n" \
"psubd %%xmm1,%%xmm4\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm4,%%xmm4\n" \
"paddd %%xmm4,%%xmm4\n" \
"paddd %%xmm4,%%xmm4\n" \
"paddd %%xmm4,%%xmm4\n" \
"psubd %%xmm2,%%xmm4\n" \
"paddd %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm4\n" \
"paddd %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm4\n" \
"movaps %%xmm4,96(%2)\n" \
"movaps 112(%2),%%xmm5\n" \
"movaps 112(%3),%%xmm1\n" \
"movaps %%xmm5,112(%3)\n" \
"movaps %%xmm5,%%xmm2\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm7,%%xmm5\n" \
"psubd %%xmm1,%%xmm5\n" \
"paddd %%xmm1,%%xmm1\n" \
"paddd %%xmm5,%%xmm5\n" \
"paddd %%xmm5,%%xmm5\n" \
"paddd %%xmm5,%%xmm5\n" \
"paddd %%xmm5,%%xmm5\n" \
"psubd %%xmm2,%%xmm5\n" \
"paddd %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrld $19,%%xmm1\n" \
"pand %%xmm6,%%xmm5\n" \
"paddd %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"pcmpgtd 16(%4),%%xmm1\n" \
"pand %%xmm6,%%xmm1\n" \
"psubd %%xmm1,%%xmm5\n" \
"movaps %%xmm5,112(%2)\n" \
"psrld $18,%%xmm0\n" \
"psrld $18,%%xmm3\n" \
"psrld $18,%%xmm4\n" \
"psrld $18,%%xmm5\n" \
"packssdw %%xmm3,%%xmm0\n" \
"packssdw %%xmm5,%%xmm4\n" \
"packsswb %%xmm4,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%1\n" \
"shll $16,%1\n" \
"addl %1,%0\n" \
"":"=&r"(output1),"=&r"(output2):"r"(state->xN),"r"(state->xP),"r"(gm19_PPPP));
return output1;
}
__device__ __host__ void gm19_get_sse_state_(gm19_state* state,gm19_sse_state* sse_state){
int i; for(i=0;i<32;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];}
}
__device__ __host__ unsigned gm19_CNext(unsigned N,unsigned P){
return (gm19_qg+gm19_k*N-gm19_q*P)%gm19_g;
}
__device__ __host__ unsigned gm19_CNext2(unsigned N,unsigned P,unsigned myk,unsigned myq){
unsigned long long curr1,curr2,curr3;
curr1=(unsigned long long)myk*(unsigned long long)N;
curr2=(unsigned long long)myq*(unsigned long long)P;
curr3=((unsigned long long)myq*(unsigned long long)gm19_g+curr1-curr2)%gm19_g;
return curr3;
}
__device__ __host__ unsigned gm19_GetNextN(unsigned x0,unsigned x1,unsigned n){ // returns x_{2^n}
unsigned myk=gm19_k,myq=gm19_q,i,x=x1;
for(i=0;i<n;i++){
x=gm19_CNext2(x,x0,myk,myq);
myk=gm19_CNext2(myk,2,myk,myq);
myq=gm19_CNext2(myq,0,myq,0);
}
return x;
}
__device__ __host__ unsigned gm19_GetNextAny(unsigned x0,unsigned x1,unsigned long long N){ // returns x_N
unsigned long long i; unsigned xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gm19_GetNextN(xp,xn,shift);
xnnew=gm19_GetNextN(xn,gm19_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp;
}
__device__ __host__ void gm19_skipahead_(gm19_state* state, unsigned long long offset){
unsigned xn,xp,j;
for(j=0;j<32;j++){
xp=gm19_GetNextAny(state->xP[j],state->xN[j],offset);
xn=gm19_GetNextAny(state->xP[j],state->xN[j],offset+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
__device__ __host__ void gm19_init_(gm19_state* state){
unsigned x0=514932,x1=127293,xp,xn,j;
for(j=0;j<32;j++){
xp=gm19_GetNextAny(x0,x1,8382841959ULL);
xn=gm19_GetNextAny(x0,x1,8382841960ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
__device__ __host__ void gm19_init_sequence_(gm19_state* state,unsigned SequenceNumber){
gm19_init_(state); // 0 <= SequenceNumber < 1000; length of each sequence <= 6*10^6
gm19_skipahead_(state,6927047ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ unsigned int gm19_generate_(gm19_state* state){
int i; unsigned temp,sum=0,bit=1;
for(i=0;i<32;i++){
temp=gm19_CNext(state->xN[i],state->xP[i]);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum += ((temp<gm19_halfg)?0:bit); bit*=2;
}
return sum;
}
__device__ __host__ float gm19_generate_uniform_float_(gm19_state* state){
int i; unsigned temp,sum=0,bit=1;
for(i=0;i<32;i++){
temp=gm19_CNext(state->xN[i],state->xP[i]);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum += ((temp<gm19_halfg)?0:bit); bit*=2;
}
return ((float) sum) * 2.3283064365386963e-10;
}
__host__ void gm19_print_state_(gm19_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm19_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm19_g); printf((i<31)?",":"}\n\n");}
}
__host__ void gm19_print_sse_state_(gm19_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm19_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm19_g); printf((i<31)?",":"}\n\n");}
}
__global__ void gm19_kernel_generate_array(gm19_state* state, unsigned int* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm19_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm19_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm19_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm19_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm19_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm19_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm19_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=sum; }
}
}
__host__ void gm19_generate_gpu_array_(gm19_state* state, unsigned int* dev_out, long length){
long mylength = length/gm19_ARRAY_SECTIONS;
gm19_state* dev_state;
long* dev_length;
if((mylength*gm19_ARRAY_SECTIONS)<length) mylength++;
gm19_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm19_state)));
gm19_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm19_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm19_state),cudaMemcpyHostToDevice));
gm19_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm19_kernel_generate_array<<<gm19_BLOCKS,gm19_THREADS>>>(dev_state,dev_out,dev_length);
gm19_CUDA_CALL(cudaGetLastError());
gm19_CUDA_CALL(cudaFree(dev_state)); gm19_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm19_kernel_generate_array_float(gm19_state* state, float* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm19_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm19_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm19_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm19_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm19_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm19_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm19_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm19_generate_gpu_array_float_(gm19_state* state, float* dev_out, long length){
long mylength = length/gm19_ARRAY_SECTIONS;
gm19_state* dev_state;
long* dev_length;
if((mylength*gm19_ARRAY_SECTIONS)<length) mylength++;
gm19_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm19_state)));
gm19_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm19_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm19_state),cudaMemcpyHostToDevice));
gm19_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm19_kernel_generate_array_float<<<gm19_BLOCKS,gm19_THREADS>>>(dev_state,dev_out,dev_length);
gm19_CUDA_CALL(cudaGetLastError());
gm19_CUDA_CALL(cudaFree(dev_state)); gm19_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm19_kernel_generate_array_double(gm19_state* state, double* out, long* length) {
unsigned temp,i,orbit,seqNum,sum; long offset;
__shared__ unsigned xP[gm19_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm19_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm19_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm19_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm19_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm19_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm19_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm19_generate_gpu_array_double_(gm19_state* state, double* dev_out, long length){
long mylength = length/gm19_ARRAY_SECTIONS;
gm19_state* dev_state;
long* dev_length;
if((mylength*gm19_ARRAY_SECTIONS)<length) mylength++;
gm19_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm19_state)));
gm19_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm19_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm19_state),cudaMemcpyHostToDevice));
gm19_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm19_kernel_generate_array_double<<<gm19_BLOCKS,gm19_THREADS>>>(dev_state,dev_out,dev_length);
gm19_CUDA_CALL(cudaGetLastError());
gm19_CUDA_CALL(cudaFree(dev_state)); gm19_CUDA_CALL(cudaFree(dev_length));
}
__host__ void gm19_generate_array_(gm19_state* state, unsigned int* out, long length){
long mylength = length/gm19_ARRAY_SECTIONS;
gm19_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gm19_ARRAY_SECTIONS)<length) mylength++;
gm19_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm19_state)));
gm19_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gm19_ARRAY_SECTIONS*sizeof(unsigned int)));
gm19_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm19_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm19_state),cudaMemcpyHostToDevice));
gm19_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm19_kernel_generate_array<<<gm19_BLOCKS,gm19_THREADS>>>(dev_state,dev_out,dev_length);
gm19_CUDA_CALL(cudaGetLastError());
gm19_CUDA_CALL(cudaMemcpy(out,dev_out,length*sizeof(unsigned int),cudaMemcpyDeviceToHost));
gm19_CUDA_CALL(cudaFree(dev_state));
gm19_CUDA_CALL(cudaFree(dev_out));
gm19_CUDA_CALL(cudaFree(dev_length));
}
|
8,794 | #include "gmv.cuh"
#include "stdio.h"
//Calculates c=alpha*A*x+beta*c, for matrix A (dimensions nxm), vectors c,x and scalars alpha, beta
__global__
void k_gmv_f32(int n, int m, float alpha, float* A, int stride_row_a, int stride_col_a,float* x, int stride_x, float beta, float* c, int stride_c){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int col=bx*TILE_SIZE+tx;
if (col<n){
int q=m/TILE_SIZE;
int rem=m%TILE_SIZE;
float* ptrA=A+col*stride_col_a;
//float* a_end=ptrA+TILE_SIZE*stride_col_a;
float* ptrX=x;
__shared__ float buf[TILE_SIZE];
float sum=0.0;
for (int i=0;i<q;i++){
for (int j=0;j<TILE_SIZE;j++){
buf[j]=ptrX[j*stride_x];
}
#pragma unroll
for (int j=0;j<TILE_SIZE;j++){
sum+=buf[j]*ptrA[j*stride_row_a];
}
ptrA+=TILE_SIZE*stride_row_a;
ptrX+=TILE_SIZE*stride_x;
__syncthreads();
}
if (rem>0){
for (int j=0;j<rem;j++){
buf[j]=ptrX[j*stride_x];
}
for (int j=0;j<rem;j++){
sum+=buf[j]*ptrA[j*stride_row_a];
}
}
c[col*stride_c]=beta*c[col*stride_c]+alpha*sum;
}
}
__host__
void gmv_f32_device(int n, int m, float alpha, float* A_d, int stride_row_a, int stride_col_a, float* x, int stride_x, float beta, float* C_d, int stride_c){
float bsmx=64; //blocksize x
dim3 threadLayout=dim3(bsmx,1,1);
dim3 grid=dim3(ceil(n/bsmx),1,1);
k_gmv_f32<<<grid,threadLayout>>>(n,m,alpha,A_d,stride_row_a,stride_col_a,x,stride_x,beta,C_d,stride_c);
}
__host__
void gmv_f32(int n, int m, float alpha, float* A_h, int stride_row_a, int stride_col_a, float* x_h, int stride_x, float beta, float* C_h, int stride_c){
if ((n==0) || (m==0)){
return;
}
float* A_d;
float* x_d;
float* C_d;
int sizeA=sizeof(float)*n*m;
int sizeX=sizeof(float)*m;
cudaMalloc((void**)&A_d, sizeA);
cudaMalloc((void**)&x_d,sizeX);
cudaMalloc((void**)&C_d,sizeA);
cudaError_t copy1=cudaMemcpy((void*) A_d, (void*) A_h, sizeA,cudaMemcpyHostToDevice);
cudaError_t copy2=cudaMemcpy((void*) x_d, (void*) x_h, sizeX,cudaMemcpyHostToDevice);
cudaError_t copy3=cudaMemcpy((void*) C_d, (void*) C_h, sizeA,cudaMemcpyHostToDevice);
if ((copy1==cudaSuccess) && (copy2==cudaSuccess) && (copy3==cudaSuccess)){
float bsmx=64; //blocksize x
dim3 threadLayout=dim3(bsmx,1,1);
dim3 grid=dim3(ceil(n/bsmx),1,1);
k_gmv_f32<<<grid,threadLayout>>>(n,m,alpha,A_d,stride_row_a,stride_col_a,x_d,stride_x,beta,C_d,stride_c);
cudaMemcpy((void*)C_h,(void*)C_d,sizeA,cudaMemcpyDeviceToHost);
}
else{
printf("Error copying value to device in gmv_f32\n");
}
cudaFree(A_d);
cudaFree(x_d);
cudaFree(C_d);
} |
8,795 | #include<bits/stdc++.h>
using namespace std;
typedef unsigned int uint;
typedef unsigned long long ull;
const int PADDING = 16;
__device__ uint LR_dev(uint a, int x){
return a << x | a >> 32-x;
}
__global__ void sha1_kernel(ull* res, ull IDX, uint h0, uint h1, uint h2, uint h3, uint h4, uint nbits) {
ull id = threadIdx.x | (ull)blockIdx.x << 8 | (ull)blockIdx.y << 20 | (ull)IDX << 32, idx = id;
uint w[16];
for(int i = 0; i < PADDING/4; i++, idx >>= 16){
w[i] = 0x40404040u | (idx&15) << 24 | (idx>>4&15) << 16 | (idx>>8&15) << 8 | (idx>>12&15);
}
w[PADDING/4] = 0x0a800000;
for(int i = PADDING/4+1; i < 15; i++) w[i] = 0;
w[15] = nbits;
uint a, b, c, d, e, f, k;
a = h0; b = h1; c = h2; d = h3; e = h4;
for(int i = 0; i < 16; i++){
f = (b&c)|(~b&d);
k = 0x5A827999;
uint tmp = LR_dev(a, 5) + f + e + k + w[i&15];
e = d; d = c; c = LR_dev(b, 30); b = a; a = tmp;
}
for(int i = 16; i < 20; i++){
w[i&15] = LR_dev(w[i-3&15]^w[i-8&15]^w[i-14&15]^w[i&15], 1);
f = (b&c)|(~b&d);
k = 0x5A827999;
uint tmp = LR_dev(a, 5) + f + e + k + w[i&15];
e = d; d = c; c = LR_dev(b, 30); b = a; a = tmp;
}
for(int i = 20; i < 40; i++){
w[i&15] = LR_dev(w[i-3&15]^w[i-8&15]^w[i-14&15]^w[i&15], 1);
f = (b^c^d);
k = 0x6ED9EBA1;
uint tmp = LR_dev(a, 5) + f + e + k + w[i&15];
e = d; d = c; c = LR_dev(b, 30); b = a; a = tmp;
}
for(int i = 40; i < 60; i++){
w[i&15] = LR_dev(w[i-3&15]^w[i-8&15]^w[i-14&15]^w[i&15], 1);
f = (b&c)|(b&d)|(c&d);
k = 0x8F1BBCDC;
uint tmp = LR_dev(a, 5) + f + e + k + w[i&15];
e = d; d = c; c = LR_dev(b, 30); b = a; a = tmp;
}
for(int i = 60; i < 80; i++){
w[i&15] = LR_dev(w[i-3&15]^w[i-8&15]^w[i-14&15]^w[i&15], 1);
f = b^c^d;
k = 0xCA62C1D6;
uint tmp = LR_dev(a, 5) + f + e + k + w[i&15];
e = d; d = c; c = LR_dev(b, 30); b = a; a = tmp;
}
h0 = h0 + a;
h1 = h1 + b;
h2 = h2 + c;
h3 = h3 + d;
h4 = h4 + e;
// if(h0 == 0 && (h1 >> 20) == 0) *res = id;
if(h0 == 0) *res = id;
}
struct sha1{
sha1(uint h0, uint h1, uint h2, uint h3, uint h4, uint nbits):
h0(h0), h1(h1), h2(h2), h3(h3), h4(h4),
nbits(nbits){}
uint h0, h1, h2, h3, h4, nbits;
};
string modify_message()
{
string log = "";
char c;
while(scanf("%c", &c) != EOF) log += c; log.pop_back();
while(log.size() % 64 != 0) log += " ";
for(int t = 0; t < PADDING; t++) log += "@";
log += "\n";
int l = log.size()-1;
for(int i = 0; i < log.size() && log[i]; i++) l--;
log += "\x0";
int m = 1, p;
while(l >= m*10) m *= 10;
for(p = 7; m; p++, m /= 10) log[p] = l/m + '0', l %= m;
if(log[p] != '\0'){
printf("message length's digit is changed while modifying\n");
printf("Please add or remove +- 50? letters \n");
exit(-1);
}
return log;
}
union block{
uint v[16];
char s[64];
};
uint LR(uint a, int x){
return a << x | a >> 32-x;
}
sha1 get_hash(string log){
uint h0, h1, h2, h3, h4;
block buf[10000] = {};
h0 = 0x67452301;
h1 = 0xEFCDAB89;
h2 = 0x98BADCFE;
h3 = 0x10325476;
h4 = 0xC3D2E1F0;
memcpy(buf, log.c_str(), log.size());
int len = log.size(), nbits = len * 8;
buf[0].s[len++] = 0x80;
uint nblock = (len+7)/64 + 1;
buf[nblock-1].v[14] = 0;
buf[nblock-1].v[15] = nbits;
for(int t = 0; t+1 < nblock; t++){
block cur = buf[t];
for(int i = 0; i < 16; i++){
if(t == nblock-1 && i == 15) continue;
swap(cur.s[i*4+0], cur.s[i*4+3]);
swap(cur.s[i*4+1], cur.s[i*4+2]);
}
uint w[16];
for(int i = 0; i < 16; i++) w[i] = cur.v[i];
uint a, b, c, d, e, f, k;
a = h0; b = h1; c = h2; d = h3; e = h4;
for(int i = 0; i < 80; i++){
if(i >= 16) w[i%16] = LR(w[(i-3+16)%16]^w[(i-8+16)%16]^w[(i-14+16)%16]^w[(i-16+16)%16], 1);
if(i <= 19){
f = (b&c)|(~b&d);
k = 0x5A827999;
}
else if(i <= 39){
f = (b^c^d);
k = 0x6ED9EBA1;
}
else if(i <= 59){
f = (b&c)|(b&d)|(c&d);
k = 0x8F1BBCDC;
}
else if(i <= 79){
f = b^c^d;
k = 0xCA62C1D6;
}
uint tmp = LR(a, 5) + f + e + k + w[i%16];
e = d; d = c; c = LR(b, 30); b = a; a = tmp;
}
h0 = h0 + a;
h1 = h1 + b;
h2 = h2 + c;
h3 = h3 + d;
h4 = h4 + e;
}
return sha1(h0, h1, h2, h3, h4, nbits);
}
void run_on_gpu(string log, sha1 hash){
ull *res;
ull res_copy;
dim3 threadsPerBlock(256, 1);
dim3 numBlocks(4096, 4096);
cudaMalloc(&res, sizeof(ull));
for(int i = 0;; i++){
sha1_kernel<<<numBlocks, threadsPerBlock>>>(res, i, hash.h0, hash.h1, hash.h2, hash.h3, hash.h4, hash.nbits);
cudaMemcpy(&res_copy, res, sizeof(ull), cudaMemcpyDeviceToHost);
if(res_copy) break;
}
cout << log.substr(0, (int)log.size() - PADDING-1);
for(int i = 0; i < PADDING; i++){
printf("%c", 64 | res_copy&15);
res_copy /= 16;
}
}
int main()
{
string log = modify_message();
sha1 hash = get_hash(log);
run_on_gpu(log, hash);
}
|
8,796 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "constants.cuh"
#include "matrix_functions.cuh"
#include "sys.cuh"
void invforsens2(struct sparse *invu, struct sys *g, double h_inf_peak_frq) {
double *e_a_diff_full, *inv;
struct sparse *e_tmp;
struct sparse *diff;
e_tmp = (struct sparse*)malloc(sizeof(struct sparse));
diff = (struct sparse*)malloc(sizeof(struct sparse));
inv = (double*)malloc(g->e.nrow*g->e.ncol * sizeof(double));
e_tmp->nnz = g->e.nnz;
e_tmp->nrow = g->e.nrow;
e_tmp->ncol = g->e.ncol;
e_tmp->row = (int*)malloc(e_tmp->nnz * sizeof(int));
e_tmp->col = (int*)malloc(e_tmp->nnz * sizeof(int));
e_tmp->val = (double*)malloc(e_tmp->nnz * sizeof(double));
for (int j = 0; j < g->e.nnz; j++) { //sE
e_tmp->row[j] = g->e.row[j];
e_tmp->col[j] = g->e.col[j];
e_tmp->val[j] = h_inf_peak_frq * g->e.val[j];
}
sparse_diff(e_tmp, &g->a, diff); //sE - A
free(e_tmp->row);
free(e_tmp->col);
free(e_tmp->val);
sparse_to_dense(diff, &e_a_diff_full);
matrix_inverse(e_a_diff_full, inv, g->e.nrow); //inv(sE-A)
dense_to_sparse(inv, g->e.nrow, g->e.ncol, invu);
free(e_a_diff_full);
free(inv);
free(diff->row);
free(diff->col);
free(diff->val);
free(e_tmp);
free(diff);
} |
8,797 | #include "includes.h"
__global__ void FillOnes( float *vec, int size ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= size ) {
return;
}
vec[ idx ] = 1.0f;
} |
8,798 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE)
{
// sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
// setting ID
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if(myId >= ARRAY_SIZE)
{
return;
}
// setting itself
int myVal = d_in[myId];
// finding the number to add, checking out-of-bounds
int myAdd;
if((myId - step)<0)
{
myAdd = 0;
}
else
{
myAdd = d_in[myId-step];
}
// setting output
d_out[myId] = myVal + myAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads)
{
// initializing starting variables
unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1;
int step = 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int * d_intermediate;
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES);
cudaMemcpy(d_intermediate, d_in, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
int i = 1;
while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE))
{
// for debugging purposes
// printf("round %d: step %d\n", i, step);
// i++;
// one step/kernel at a time to do synchronization across blocks
hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE);
cudaMemcpy(d_intermediate, d_out, ARRAY_BYTES, cudaMemcpyDeviceToDevice);
step <<= 1; // double step size at each iteration
}
cudaFree(d_intermediate);
}
int main(int argc, char **argv)
{
printf("Hillis and Steele ONLINE... \n");
// defining vars
const unsigned int num_threads = 512;
const unsigned int ARRAY_SIZE = 1<<21;
const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
printf("defined vars... \n");
printf("ARRAY_SIZE: %d\n", ARRAY_SIZE);
// setting host in
int h_in[ARRAY_SIZE];
int h_out[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
h_out[i] = 0;
}
printf("filled array... \n");
// setting device pointers
int * d_in;
int * d_out;
printf("defined device pointers... \n");
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
printf("malloc device pointers... \n");
// transfer arrays to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
printf("copy device pointers... \n");
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// kernel time!!!
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f;
// back to host
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// printing
for(int i = 400; i<408; i++)
{
printf("index %d: count %d\n", i, h_out[i]);
}
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
8,799 | #include <stdio.h>
__global__ void device_add(int *a, int *b, int *res)
{
res[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
#define N 8
void random_ints(int *arr, int n)
{
int i;
for(i = 0; i < n; i++)
arr[i] = rand();
}
void print_arr(int *arr, int n)
{
int i, last;
for(i = 0, last = n -1; i < last; i++)
printf("%i,", arr[i]);
printf("%i\n", arr[last]);
}
int main(void)
{
int *a, *b, *res;
int *dev_a, *dev_b, *dev_res;
int size = N * sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_res, size);
a = (int*) malloc(size);
b = (int*) malloc(size);
res = (int*) malloc(size);
random_ints(a, N);
random_ints(b, N);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
device_add<<<N, 1>>>(dev_a, dev_b, dev_res);
cudaMemcpy(res, dev_res, size, cudaMemcpyDeviceToHost);
print_arr(res, N);
free(a);
free(b);
free(res);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_res);
return 0;
}
|
8,800 | //
// Created by root on 2020/11/24.
//
#include "cuda_runtime.h"
#include "cufft.h"
#include "stdio.h"
int nprint = 30;
void generate_fake_samples(int N, float **out) {
float *result = (float *) malloc(sizeof(float ) * N);
double delta = M_PI / 20.0;
for (int i = 0; i < N; i++) {
result[i] = cos(i * delta);
}
*out = result;
}
void real_to_complex(float *r, cufftComplex **complex, int N) {
(*complex) = (cufftComplex *) malloc(sizeof(cufftComplex) * N);
for (int i = 0; i < N; i++) {
(*complex)[i].x = r[i];
(*complex)[i].y = 0;
}
}
// nvcc -lcufft CuFFTTest.cu -o CuFFTTest
int main() {
int N = 2048;
float *samples;
cufftHandle plan;
cufftComplex *dComplexSamples, *complexSamples, *complexFreq;
// allocate memory
samples = (float *) malloc(N * sizeof(float ));
complexSamples = (cufftComplex *) malloc(N * sizeof(cufftComplex));
complexFreq = (cufftComplex *) malloc(N * sizeof(cufftComplex));
cudaMalloc(&dComplexSamples, sizeof(cufftComplex) * N);
// generate input
generate_fake_samples(N, &samples);
printf("Initial samples:\n");
for (int i = 0; i < nprint; i++) {
printf("%.2f\t", samples[i]);
}
printf("\n");
real_to_complex(samples, &complexSamples, N);
// create cufft plan with type complex to complex
cufftPlan1d(&plan, N, CUFFT_C2C, 1);
// copy data to device
cudaMemcpy(dComplexSamples, complexSamples, N * sizeof(cufftComplex ), cudaMemcpyHostToDevice);
// execute forward fourier transform
cufftExecC2C(plan, dComplexSamples, dComplexSamples, CUFFT_FORWARD);
// get data from device and print demo data
cudaMemcpy(complexFreq, dComplexSamples, sizeof(cufftComplex) * N, cudaMemcpyDeviceToHost);
printf("Fourier coefficient:\n");
for (int i = 0; i < nprint; i++) {
printf("(%.2f, %.2f)\t", complexFreq[i].x , complexFreq[i].y);
}
printf("\n");
free(samples);
free(complexSamples);
free(complexFreq);
cudaFree(dComplexSamples);
cufftDestroy(plan);
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.