serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
8,301 | #include <stdio.h>
#include <cuda.h>
__global__ void dynshared(int sz) {
extern __shared__ int s[];
if (threadIdx.x < sz) s[threadIdx.x] = threadIdx.x;
__syncthreads();
if (threadIdx.x < sz && threadIdx.x % 2) printf("%d\n", s[threadIdx.x]);
}
int main() {
int sz;
scanf("%d", &sz);
dynshared<<<1, 32, sz * sizeof(int)>>>(sz);
cudaDeviceSynchronize();
return 0;
}
|
8,302 | /*
* INPUT:
* m: total num of points
* n: n dimensions
* k: num of nearest points
* V: point coordinates
* OUTPUT:
* out: k nearest neighbors
*/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#define INIT_MAX 10000000
#define TILE_WIDTH 32
#define TILE_DEPTH 128
#define MAX_BLOCK_SIZE 256
#define MAX_PTRNUM_IN_SMEM 1024
void showResult(int m, int k, int *out);
// compute the square of distance of the ith point and jth point
__global__ void computeDist(int m, int n, int *V, int *D)
{
__shared__ int rowVector[TILE_WIDTH][TILE_DEPTH];
__shared__ int colVector[TILE_DEPTH][TILE_WIDTH];
__shared__ int dist[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row;
int col;
int px;
int py;
for(py=ty; py<TILE_WIDTH; py+=blockDim.y)
{
for(px=tx; px<TILE_WIDTH; px+=blockDim.x)
{
row = by*TILE_WIDTH+py;
col = bx*TILE_WIDTH+px;
dist[py][px] = 0;
__syncthreads();
for(int i=0; i<(int)(ceil((float)n/TILE_DEPTH)); i++)
{
for(int j=tx; j<TILE_DEPTH; j+=blockDim.x)
{
rowVector[py][j] = V[row*n+i*TILE_DEPTH+j];
}
for(int j=ty; j<TILE_DEPTH; j+=blockDim.y)
{
colVector[j][px] = V[col*n+i*TILE_DEPTH+j];
}
__syncthreads();
for(int j=0; j<TILE_DEPTH; j++)
{
dist[py][px] += (rowVector[py][j]-colVector[j][px])*(rowVector[py][j]-colVector[j][px]);
}
__syncthreads();
}
D[row*m+col] = dist[py][px];
}
}
}
extern __shared__ int SMem[];
//find the min value and index in the count^th loop
__device__ int findMin(int m, int k, int count, int *D, int *out)
{
int i = blockIdx.x;
int tid = threadIdx.x;
int s = blockDim.x/2;
int resultValue = INIT_MAX;
int resultIndex = INIT_MAX;
int indexBase = (m<MAX_PTRNUM_IN_SMEM)? m: MAX_PTRNUM_IN_SMEM;
for(int num=0; num<m; num+=MAX_PTRNUM_IN_SMEM)
{
for(int j=tid; j<indexBase; j+=blockDim.x)
{
if(j+num == i)
{
SMem[j] = INIT_MAX;
}
else
{
SMem[j] = D[i*m+num+j];
}
//index
SMem[indexBase+j] = j+num;
__syncthreads();
}
if(tid < count)
{
if(out[i*k+tid]-num>=0 && out[i*k+tid]-num<indexBase)
{
SMem[ out[i*k+tid]-num ] = INIT_MAX;
}
__syncthreads();
}
__syncthreads();
// for(s=indexBase/2; s>0; s>>=1)
for(s=indexBase/2; s>32; s>>=1)
{
for(int j=tid; j<indexBase; j+=blockDim.x)
{
if(j < s)
{
if(SMem[j] == SMem[j+s])
{
if(SMem[indexBase+j] > SMem[indexBase+j+s])
{
SMem[indexBase+j] = SMem[indexBase+j+s];
}
}
else if(SMem[j] > SMem[j+s])
{
SMem[j] = SMem[j+s];
SMem[indexBase+j] = SMem[indexBase+j+s];
}
}
__syncthreads();
}
}
/*
if(indexBase >= 1024)
{
if(tid < 512)
{
if(SMem[tid] == SMem[tid+512])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+512])
{
SMem[indexBase+tid] = SMem[indexBase+tid+512];
}
}
else if(SMem[tid] > SMem[tid+512])
{
SMem[tid] = SMem[tid+512];
SMem[indexBase+tid] = SMem[indexBase+tid+512];
}
}
__syncthreads();
}
if(indexBase >= 512)
{
if(tid < 256)
{
if(SMem[tid] == SMem[tid+256])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+256])
{
SMem[indexBase+tid] = SMem[indexBase+tid+256];
}
}
else if(SMem[tid] > SMem[tid+256])
{
SMem[tid] = SMem[tid+256];
SMem[indexBase+tid] = SMem[indexBase+tid+256];
}
}
__syncthreads();
}
if(indexBase >= 256)
{
if(tid < 128)
{
if(SMem[tid] == SMem[tid+128])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+128])
{
SMem[indexBase+tid] = SMem[indexBase+tid+128];
}
}
else if(SMem[tid] > SMem[tid+128])
{
SMem[tid] = SMem[tid+128];
SMem[indexBase+tid] = SMem[indexBase+tid+128];
}
}
__syncthreads();
}
if(indexBase >= 128)
{
if(tid < 64)
{
if(SMem[tid] == SMem[tid+64])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+64])
{
SMem[indexBase+tid] = SMem[indexBase+tid+64];
}
}
else if(SMem[tid] > SMem[tid+64])
{
SMem[tid] = SMem[tid+64];
SMem[indexBase+tid] = SMem[indexBase+tid+64];
}
}
__syncthreads();
}
*/
if(tid < 32)
{
/*
#pragma unroll 5
for(s=32; s>0; s>>=1)
{
if(SMem[tid] == SMem[tid+s])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+s])
{
SMem[indexBase+tid] = SMem[indexBase+tid+s];
}
}
else if(SMem[tid] > SMem[tid+s])
{
SMem[tid] = SMem[tid+s];
SMem[indexBase+tid] = SMem[indexBase+tid+s];
}
}
*/
if(SMem[tid] == SMem[tid+32])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+32])
{
SMem[indexBase+tid] = SMem[indexBase+tid+32];
}
}
else if(SMem[tid] > SMem[tid+32])
{
SMem[tid] = SMem[tid+32];
SMem[indexBase+tid] = SMem[indexBase+tid+32];
}
if(SMem[tid] == SMem[tid+16])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+16])
{
SMem[indexBase+tid] = SMem[indexBase+tid+16];
}
}
else if(SMem[tid] > SMem[tid+16])
{
SMem[tid] = SMem[tid+16];
SMem[indexBase+tid] = SMem[indexBase+tid+16];
}
if(SMem[tid] == SMem[tid+8])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+8])
{
SMem[indexBase+tid] = SMem[indexBase+tid+8];
}
}
else if(SMem[tid] > SMem[tid+8])
{
SMem[tid] = SMem[tid+8];
SMem[indexBase+tid] = SMem[indexBase+tid+8];
}
if(SMem[tid] == SMem[tid+4])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+4])
{
SMem[indexBase+tid] = SMem[indexBase+tid+4];
}
}
else if(SMem[tid] > SMem[tid+4])
{
SMem[tid] = SMem[tid+4];
SMem[indexBase+tid] = SMem[indexBase+tid+4];
}
if(SMem[tid] == SMem[tid+2])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+2])
{
SMem[indexBase+tid] = SMem[indexBase+tid+2];
}
}
else if(SMem[tid] > SMem[tid+2])
{
SMem[tid] = SMem[tid+2];
SMem[indexBase+tid] = SMem[indexBase+tid+2];
}
if(SMem[tid] == SMem[tid+1])
{
if(SMem[indexBase+tid] > SMem[indexBase+tid+1])
{
SMem[indexBase+tid] = SMem[indexBase+tid+1];
}
}
else if(SMem[tid] > SMem[tid+1])
{
SMem[tid] = SMem[tid+1];
SMem[indexBase+tid] = SMem[indexBase+tid+1];
}
}
__syncthreads();
if(resultValue == SMem[0])
{
if(resultIndex > SMem[indexBase])
{
resultIndex = SMem[indexBase];
}
}
else if (resultValue > SMem[0])
{
resultValue = SMem[0];
resultIndex = SMem[indexBase];
}
__syncthreads();
}
return resultIndex;
}
// compute the k nearest neighbors
__global__ void knn(int m, int k, int *V, int *D, int *out)
{
int i;
int count;
i = blockIdx.x;
__syncthreads();
for(count=0; count<k; count++)
{
out[i*k+count] = findMin(m, k, count, D, out);
__syncthreads();
}
}
void showResult(int m, int k, int *out)
{
int i,j;
for(i=0; i<m; i++)
{
for(j=0; j<k; j++)
{
printf("%d ", out[i*k+j]);
if(j == k-1)
{
printf("\n");
}
}
}
}
int main(int argc, char *argv[])
{
int m,n,k;
int i;
int *V, *out; //host copies
int *d_V, *d_out; //device copies
int *D;
FILE *fp;
if(argc != 2)
{
printf("Usage: knn <inputfile>\n");
exit(1);
}
if((fp = fopen(argv[1], "r")) == NULL)
{
printf("Error open input file!\n");
exit(1);
}
while(fscanf(fp, "%d %d %d", &m, &n, &k) != EOF)
{
V = (int *) malloc(m*n*sizeof(int));
out = (int *) malloc(m*k*sizeof(int));
for(i=0; i<m*n; i++)
{
fscanf(fp, "%d", &V[i]);
}
// compute the execution time
cudaEvent_t start, stop;
// create event
cudaEventCreate(&start);
cudaEventCreate(&stop);
// record event
cudaEventRecord(start);
// allocate space for devices copies
cudaMalloc((void **)&d_V, m*n*sizeof(int));
cudaMalloc((void **)&d_out, m*k*sizeof(int));
cudaMalloc((void **)&D, m*m*sizeof(int));
// copy host values to devices copies
cudaMemcpy(d_V, V, m*n*sizeof(int), cudaMemcpyHostToDevice);
int gridDimX = (int)(ceil((float)m/TILE_WIDTH));
int gridDimY = (int)(ceil((float)m/TILE_WIDTH));
dim3 grid(gridDimX, gridDimY);
dim3 block(TILE_WIDTH, TILE_WIDTH);
// launch knn() kernel on GPU
computeDist<<<grid, block>>>(m, n, d_V, D);
cudaDeviceSynchronize();
int threadNum = (m<MAX_BLOCK_SIZE)? m: MAX_BLOCK_SIZE;
int ptrNumInSMEM = (m<MAX_PTRNUM_IN_SMEM)? m: MAX_PTRNUM_IN_SMEM;
knn<<<m, threadNum, 2*ptrNumInSMEM*sizeof(int)>>>(m, k, d_V, D, d_out);
// copy result back to host
cudaMemcpy(out, d_out, m*k*sizeof(int), cudaMemcpyDeviceToHost);
// cleanup
cudaFree(d_V);
cudaFree(d_out);
cudaFree(D);
// record event and synchronize
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time;
// get event elapsed time
cudaEventElapsedTime(&time, start, stop);
showResult(m, k, out);
if(m == 1024) {
printf("SMALL:");
} else if(m == 4096) {
printf("MIDDLE:");
} else if(m == 16384) {
printf("LARGE:");
}
printf("%f\n", time);
free(V);
free(out);
}
fclose(fp);
return 0;
}
|
8,303 | //pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is racy.
//
//It uses uses memcpy and copies too many bytes.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
short y;
char z;
} s_t; //< sizeof(s_t) == 6
__global__ void k(s_t *in, s_t *out) {
memcpy(&out[threadIdx.x], &in[threadIdx.x], 12); //< copy two elements
}
|
8,304 | #include "includes.h"
#pragma once
/*
//åñëè äëÿ âñåõ êàðò õâàòèò âîçìîæíîñòåé âèäåîêàðòû (ÐÀÁÎÒÀÅÒ)
*/
__global__ void MapAdd32(int* one, const int* result, unsigned int mx, unsigned int width)
{
unsigned int ppp = blockIdx.x * blockDim.x * 32 + threadIdx.x;
unsigned int rix = ppp % width;
unsigned int riy = (ppp / mx) + ((ppp % mx) / width);
unsigned int xxx = riy * width + rix;
unsigned int ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
ppp++;
rix = ppp % width;
riy = (ppp / mx) + ((ppp % mx) / width);
xxx = riy * width + rix;
ddx = riy * mx + rix;
one[ddx] = result[xxx];
} |
8,305 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <cstdio>
__global__ void addTen(float* d, int wielkosc){
int ThreadsInBlock = blockDim.x * blockDim.y* blockDim.z;
int PositionThreadInBlock = threadIdx.x + threadIdx.y* blockDim.x + threadIdx.z * blockDim.x*blockDim.y;
int PositionBlockInGrid = blockIdx.x + blockIdx.y*gridDim.x;
int index = PositionBlockInGrid * ThreadsInBlock + PositionThreadInBlock;
if(index < wielkosc){
d[index] += 10;
}
}
int main()
{
curandGenerator_t generator;
curandCreateGenerator(&generator,CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(generator,time(0));
const int wielkosc = 123456;
int rozmiar = sizeof(float)*wielkosc;
float tablica[wielkosc];
float *d;
cudaMalloc(&d,rozmiar);
curandGenerateUniform(generator,d,wielkosc);
dim3 block(8,8,8);
dim3 grid(16,16);
addTen<<<grid,block>>>(d,wielkosc);
cudaMemcpy(tablica,d,rozmiar,cudaMemcpyDeviceToHost);
cudaFree(d);
for (int i = 0; i < wielkosc; i++)
{
printf("\n %f", tablica[i]);
}
} |
8,306 | #include "points.cuh"
#include <iostream>
__host__ __device__ Points::Points() : m_x(NULL), m_y(NULL){}
__host__ __device__ Points::Points(float* x, float* y): m_x(x), m_y(y){}
__host__ __device__ float2 Points::get_point(int idx) const{
return make_float2(m_x[idx], m_y[idx]);
}
__host__ void Points::print_point_d_2_h(int idx) const{
//hack
float* host_mx;
host_mx= (float*) malloc(sizeof(float)*(idx+1) );
cudaMemcpy(host_mx, m_x,sizeof(float)*(idx+1), cudaMemcpyDeviceToHost);
float* host_my;
host_my= (float*) malloc(sizeof(float)*(idx+1) );
cudaMemcpy(host_my, m_y,sizeof(float)*(idx+1), cudaMemcpyDeviceToHost);
std::cout << "x: " << host_mx[idx] << " y:" << host_my[idx] << std::endl;
}
__host__ __device__ void Points::set_point(int idx, const float2& p){
m_x[idx] = p.x;
m_y[idx] = p.y;
}
//set the actual pointers
__host__ __device__ void Points::set(float* x , float* y){
m_x= x;
m_y= y;
}
|
8,307 | /* compile with: nvcc -O3 hw1.cu -o hw1 */
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
///////////////////////////////////////////////// DO NOT CHANGE ///////////////////////////////////////
#define IMG_HEIGHT 256
#define IMG_WIDTH 256
//#define N_IMAGES 10000
#define N_IMAGES 500
#define NUM_THREADS 256
typedef unsigned char uchar;
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
void process_image(uchar *img_in, uchar *img_out) {
int histogram[256] = { 0 };
for (int i = 0; i < IMG_WIDTH * IMG_HEIGHT; i++) {
histogram[img_in[i]]++;
}
int cdf[256] = { 0 };
int hist_sum = 0;
for (int i = 0; i < 256; i++) {
hist_sum += histogram[i];
cdf[i] = hist_sum;
}
int cdf_min = 0;
for (int i = 0; i < 256; i++) {
if (cdf[i] != 0) {
cdf_min = cdf[i];
break;
}
}
uchar map[256] = { 0 };
for (int i = 0; i < 256; i++) {
int map_value = (float)(cdf[i] - cdf_min) / (IMG_WIDTH * IMG_HEIGHT - cdf_min) * 255;
map[i] = (uchar)map_value;
}
for (int i = 0; i < IMG_WIDTH * IMG_HEIGHT; i++) {
img_out[i] = map[img_in[i]];
}
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
long long int distance_sqr_between_image_arrays(uchar *img_arr1, uchar *img_arr2) {
long long int distance_sqr = 0;
for (int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) {
distance_sqr += SQR(img_arr1[i] - img_arr2[i]);
}
return distance_sqr;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ int arr_min(int arr[], int arr_size) {//return the min
int tid = threadIdx.x;
int half_length = arr_size / 2;
while (half_length >= 1) {
for (int i = tid; i < half_length; i += blockDim.x) {
if(arr[tid + i] < arr[i]) arr[i] = arr[tid + i];
}
__syncthreads();
half_length /= 2;
}
return arr[0]; //TODO
}
// this function implements the Kiggle-Stone algorithm
__device__ void prefix_sum(int arr[], int arr_size, int histogram[]) {
int tbsize = blockDim.x;
int tid = threadIdx.x;
int inc;
for (int stride = 1; stride < tbsize; stride *= 2) {
if (tid >= arr_size)
continue;
if (tid >= stride) {
inc = arr[tid - stride];
}
__syncthreads();
if (tid >= stride) {
arr[tid] += inc;
}
__syncthreads();
}
return;
}
__device__ void mapCalc(int map[], int min, int cdf[]) {
int id = threadIdx.x;
int map_value = ((double)(cdf[id] - min)/(IMG_WIDTH * IMG_HEIGHT - min)) * 255;
map[id] = map_value;
}
__global__ void process_image_kernel(uchar *in, uchar *out) {
__shared__ int l_histogram[256];
__shared__ int l_cdf[256];
__shared__ int map[256];
int tid = threadIdx.x;
int bid = blockIdx.x;
int tbsize = blockDim.x;
// zero histogram
l_histogram[tid] = 0;
__syncthreads();
for(int i = tid; i < IMG_WIDTH * IMG_HEIGHT; i += tbsize)
atomicAdd(&l_histogram[in[(IMG_WIDTH * IMG_HEIGHT)*bid + i]], 1);
__syncthreads();
// prepare the cdf array in advance
l_cdf[tid] = l_histogram[tid];
__syncthreads();
prefix_sum(l_cdf, 256, l_histogram);
__syncthreads();
int min = arr_min(l_histogram, 256);
__syncthreads();
mapCalc(map, min, l_cdf);
__syncthreads();
for(int i = tid; i < IMG_WIDTH * IMG_HEIGHT; i += tbsize) {
out[(IMG_WIDTH * IMG_HEIGHT)*bid + i] =
map[in[(IMG_WIDTH * IMG_HEIGHT)*bid + i]];
}
__syncthreads();
return ; //TODO
}
int main() {
///////////////////////////////////////////////// DO NOT CHANGE ///////////////////////////////////////
uchar *images_in;
uchar *images_out_cpu; //output of CPU computation. In CPU memory.
uchar *images_out_gpu_serial; //output of GPU task serial computation. In CPU memory.
uchar *images_out_gpu_bulk; //output of GPU bulk computation. In CPU memory.
CUDA_CHECK( cudaHostAlloc(&images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
CUDA_CHECK( cudaHostAlloc(&images_out_cpu, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
CUDA_CHECK( cudaHostAlloc(&images_out_gpu_serial, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
CUDA_CHECK( cudaHostAlloc(&images_out_gpu_bulk, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
/* instead of loading real images, we'll load the arrays with random data */
srand(0);
for (long long int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) {
images_in[i] = rand() % 256;
}
double t_start, t_finish;
// CPU computation. For reference. Do not change
printf("\n=== CPU ===\n");
t_start = get_time_msec();
for (int i = 0; i < N_IMAGES; i++) {
uchar *img_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT];
uchar *img_out = &images_out_cpu[i * IMG_WIDTH * IMG_HEIGHT];
process_image(img_in, img_out);
}
t_finish = get_time_msec();
printf("total time %f [msec]\n", t_finish - t_start);
long long int distance_sqr;
///////////////////////////////////////////////////////////////////////////////////////////////////////////
uchar *image_in;
uchar *image_out;
// GPU task serial computation
printf("\n=== GPU Task Serial ===\n"); //Do not change
//TODO: allocate GPU memory for a single input image and a single output image
CUDA_CHECK( cudaMalloc((void **)&image_in, IMG_HEIGHT * IMG_WIDTH) );
CUDA_CHECK( cudaMalloc((void **)&image_out, IMG_HEIGHT * IMG_WIDTH) );
t_start = get_time_msec(); //Do not change
//TODO: in a for loop:
for (int i=0; i < N_IMAGES; i++) {
// Copying src image from the input images
CUDA_CHECK(cudaMemcpy(image_in, &images_in[i * IMG_WIDTH*IMG_HEIGHT], IMG_WIDTH*IMG_HEIGHT, cudaMemcpyDefault));
process_image_kernel <<< 1, NUM_THREADS >>> (image_in, image_out);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy(&images_out_gpu_serial[i * IMG_HEIGHT*IMG_WIDTH], image_out, IMG_WIDTH*IMG_HEIGHT, cudaMemcpyDefault));
}
cudaFree(image_in);
cudaFree(image_out);
t_finish = get_time_msec(); //Do not change
distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_serial); // Do not change
printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); //Do not change
// GPU bulk
printf("\n=== GPU Bulk ===\n"); //Do not change
//TODO: allocate GPU memory for a all input images and all output images
CUDA_CHECK( cudaMalloc((void **)&image_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH) );
CUDA_CHECK( cudaMalloc((void **)&image_out, N_IMAGES * IMG_HEIGHT * IMG_WIDTH) );
//TODO: copy all input images from images_in to the GPU memory you allocated
t_start = get_time_msec(); //Do not change
CUDA_CHECK(cudaMemcpy(image_in, images_in, N_IMAGES*IMG_WIDTH*IMG_HEIGHT, cudaMemcpyDefault));
//TODO: invoke a kernel with N_IMAGES threadblocks, each working on a different image
process_image_kernel <<< N_IMAGES, NUM_THREADS >>> (image_in, image_out);
CUDA_CHECK(cudaDeviceSynchronize());
//TODO: copy output images from GPU memory to images_out_gpu_bulk
CUDA_CHECK(cudaMemcpy(images_out_gpu_bulk, image_out, N_IMAGES*IMG_WIDTH*IMG_HEIGHT, cudaMemcpyDefault));
t_finish = get_time_msec(); //Do not change
cudaFree(image_in);
cudaFree(image_out);
distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_bulk); // Do not change
printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); //Do not chhange
return 0;
}
|
8,308 | #include <cuda_fp16.h>
#include <iostream>
#include <cmath>
#include <cfloat>
#include <math.h>
#include <algorithm>
using namespace std;
// nvcc -ccbin clang++-3.8 cufp16_test.cu
// NOTE: didn't get any error like building ArrayFire
int main()
{
cout << isinf(NAN) << endl; // 0
cout << isinf(INFINITY) << endl; // 1
}
|
8,309 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define ASCIIMIN 32
#define ASCIIMAX 126
#define CHARS_PER_THREADS 256
#define THREADS_PER_BLOCK 256
void __global__ kernel(int nLines, char* dev_chars, int nChars, int* dev_counts, int nCounts) {
const unsigned int tidb = threadIdx.x;
const unsigned int ti = blockIdx.x*blockDim.x + tidb;
__shared__ int shared_counts[ASCIIMAX - ASCIIMIN + 1];
if (tidb == 0) {
for (int i = 0; i < ASCIIMAX - ASCIIMIN - 1; ++i) {
shared_counts[i] = 0;
}
}
__syncthreads();
if (ti < nLines) {
for (int i = 0; i < CHARS_PER_THREADS; ++i) {
int ascii = (int)dev_chars[CHARS_PER_THREADS * ti + i];
atomicAdd(&shared_counts[ascii - ASCIIMIN], 1);
}
}
__syncthreads();
if (tidb == 0) {
for (int i = 0; i < nCounts; ++i) {
atomicAdd(&dev_counts[i], shared_counts[i]);
}
}
}
int isValid(char* c){
int asciicode = (int)*c;
int valid = (asciicode <= 126);
valid = valid && (asciicode >= 32);
return valid;
}
int main(int argc, char** argv){
clock_t t1, t2;
printf("Initialisation...\n");
t1 = clock();
//Declarations
FILE* inputFile = NULL;
FILE* outputFile = NULL;
char* inputFileName = NULL;
char* outputFileName = NULL;
int nChars = 0;
char* chars;
int nCounts = ASCIIMAX - ASCIIMIN + 1;
int* counts;
char* dev_chars;
int* dev_counts;
int opt;
//Get comand line options
while ((opt = getopt (argc, argv, "i:o:")) != -1) {
switch(opt) {
case 'i':
inputFileName = optarg;
break;
case 'o':
outputFileName = optarg;
break;
}
}
//Count number of chars in inputFile
inputFile = fopen(inputFileName,"r");
if (!inputFile) return 1;
nChars = 0;
for (char c = getc(inputFile); c != EOF; c = getc(inputFile)){
if(isValid(&c)) ++nChars;
}
fclose(inputFile);
//Allocate memory
counts = (int*) malloc(nCounts * sizeof(int));
chars = (char*) malloc(nChars * sizeof(char));
if(chars == NULL) {
printf("Input file too large!\n");
return 1;
}
printf("%d chars processed\n", nChars);
cudaMalloc( (void**)&dev_chars, nChars * sizeof(char));
cudaMalloc( (void**)&dev_counts, nCounts * sizeof(int));
//Filling chars array
inputFile = fopen(inputFileName,"r");
if (!inputFile) return 1;
int i = 0;
for (char c = getc(inputFile); c != EOF; c = getc(inputFile)){
if(isValid(&c)){
if (c>=65 && c<=90) chars[i] = (char)(c + 32);
else chars[i] = c;
++i;
}
}
fclose(inputFile);
t1 = clock() - t1;
printf("Process...\n");
t2 = clock();
//Initialize counter array
for (int i = 0; i < nCounts; ++i){
counts[i] = 0;
}
cudaMemcpy(dev_chars, chars, nChars * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_counts, counts, nCounts * sizeof(int), cudaMemcpyHostToDevice);
//Count chars
for (int i = 0; i < nChars; ++i){
int ascii = (int)chars[i];
++counts[ascii - ASCIIMIN];
}
int nLines = (nChars + CHARS_PER_THREADS - 1) / CHARS_PER_THREADS;
kernel<<<(nLines + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(nLines, dev_chars, nChars, dev_counts, nCounts);
cudaMemcpy(counts, dev_counts, nCounts * sizeof(int), cudaMemcpyDeviceToHost);
t2 = clock() - t2;
//Write in outputFile
outputFile = fopen(outputFileName, "w+");
if (!outputFile) return 1;
for (int i = 0; i < 127 - ASCIIMIN; ++i){
if (i + ASCIIMIN < 65 || i + ASCIIMIN > 90) {
fprintf(outputFile, "%c:%d\n", (char)(i + ASCIIMIN), (int)counts[i]);
}
}
fclose(outputFile);
//Return memory
cudaFree(dev_chars);
cudaFree(dev_counts);
free(chars);
free(counts);
printf("Timings:\nInitialisation: %f\nProcess: %f\n", (float)1000 * t1/CLOCKS_PER_SEC, (float)1000 * t2/CLOCKS_PER_SEC);
return 0;
}
|
8,310 | //#ifndef _MATRIXMUL_KERNEL_H_
//#define _MATRIXMUL_KERNEL_H_
extern "C"
/* Signature:
*/
__global__ void
pass_kernel(
int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, int arg8, int arg9,
int* bargs,
int* barg1)
{
int i = threadIdx.x;
switch (i) {
case 0:
bargs[0] = arg1 * 10;
break;
case 1:
bargs[1] = arg2 * 10;
break;
case 2:
bargs[2] = arg3 * 10;
break;
case 3:
bargs[3] = arg4 * 10;
break;
case 4:
bargs[4] = arg5 * 10;
break;
case 5:
bargs[5] = arg6 * 10;
break;
case 6:
bargs[6] = arg7 * 10;
break;
case 7:
bargs[7] = arg8 * 10;
break;
case 8:
bargs[8] = arg9 * 10;
break;
}
*barg1 = arg9;
}
|
8,311 | #include<cuda.h>
#include<math.h>
#include<iostream>
__global__ void lag_matrix_kernel(const double* X1, const double* X2, double* lag_matrix, double* lag_x1,
double* y_label, double bias, int rows, int cols, int p){
// now the i-th elem: i = threadsIdx + blockIdx*blockDim
int idx = threadIdx.x + blockIdx.x * blockDim.x; // idx of row
if(idx<rows){
// fill in the lag_matrix
for(int i = 0; i<p; i++){
lag_matrix[idx + i*rows] = X1[i+idx];
}
for(int j = 0; j<p; j++){
lag_matrix[idx+(j+p)*rows] = X2[j+idx];
}
lag_matrix[(cols-1)*rows + idx] = bias; // fill in the bias
// fill in the lag_x1
for(int i = 0; i<p; i++){
lag_x1[idx + i*rows] = X1[i+idx];
}
lag_x1[p*rows + idx] = bias;
// fill in the label
y_label[idx] = X1[idx+p];
}
return;
}
//
void gen_lag_matrix(const double* x1, const double* x2, double* lag_matrix, double* lag_x1, double* y_label,
double bias, int n, int p, int rows, int cols, int threads_per_block){
// we want to let each thread fill out each row
int num_blocks = (rows+threads_per_block - 1)/threads_per_block;
double * d_x1;
double * d_x2;
double * d_matrix_lag = new double[rows*cols];
double * d_x1_lag = new double[rows*(p+1)];
double * d_label = new double[rows];
//std::cout << A[nsq-] << std::endl;
//std::cout << B[nsq-] << std::endl;
cudaMallocManaged((void**)&d_x1, sizeof(double)*n);
cudaMallocManaged((void**)&d_x2, sizeof(double)*n);
cudaMallocManaged((void**)&d_matrix_lag, sizeof(double)*(rows*cols));
cudaMallocManaged((void**)&d_x1_lag, sizeof(double)*(rows*(p+1)));
cudaMallocManaged((void**)&d_label, sizeof(double)*rows);
cudaMemcpy(d_x1, x1, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_x2, x2, sizeof(double)*n, cudaMemcpyHostToDevice);
// timing
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
lag_matrix_kernel<<<num_blocks,threads_per_block>>>(d_x1, d_x2, d_matrix_lag, d_x1_lag, d_label, bias, rows, cols, p);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
// bring the result back -- dont need for task1
cudaMemcpy(lag_matrix, d_matrix_lag, (rows*cols)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(lag_x1, d_x1_lag, (rows*(p+1))*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(y_label, d_label, rows*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x1);
cudaFree(d_x2);
cudaFree(d_matrix_lag);
cudaFree(d_x1_lag);
cudaFree(d_label);
std::cout << "Lagged matrix time: " << ms << " ms" << std::endl;
}
|
8,312 | #include "includes.h"
#define iceil(num, den) (num + den - 1) / den
#define ARRAY_SIZE 20 //must be an even number; this number/2 = number of points //sets random array and constant mem size
//#define BIN 100 //divides the grid into square bins to vote on. perfect square value
#define NUM_LINES 4 //top X voted lines. Picks first X Largest from top left to bottom right of grid space.
/*GRID evaluated for bin voting
* Must always be a square grid with origin at center
*/
#define dimension 5
#define LXBOUND (-1*dimension) //lowest X
#define RXBOUND (dimension) //highest X
#define LYBOUND (-1*dimension) //lowest Y
#define UYBOUND (dimension) //highest Y
////////////////////////////////
#define INCREMENT 1 //precision, length of 1 side of the square(bin)
//The (abs)difference between between two sides is the length of the grid. Length/Increment determines how many bins
#define column (((RXBOUND - LXBOUND) / INCREMENT) * ((RXBOUND - LXBOUND) / INCREMENT)) / ((RXBOUND + UYBOUND) / INCREMENT)
__constant__ int d_coordarray[ARRAY_SIZE];//Place coordinates in constant memory
//show grid with votes. Becomes unuseful when bins > 20x20
__global__ void kernelHough(int size, int* d_binarray) {
/*
take a piece of the array. discretize into y=mx+b format per point. check all points and increment all bins touched
at the end recombine all shared memory to a global bin tally. Take the most significant X numbers as lines.
discretized from point(1,1) ==(m,n)==> (-1,1)
check each bin for count and sum them to a global array in sync
NUM of coordinates will check all bins for their own equation and increment appropriately
*/
// Number from 0 through arraysize / 2
const int thread = 2 * (blockDim.x * blockIdx.x + threadIdx.x);
// Slope is discretized space = -x
const float slope = -1.0 * d_coordarray[thread];
// Intercept in discretized space = y
const float intercept = d_coordarray[thread + 1];
int counter = 0;//keeps current array index being checked
//loop through entire graph
for (float x = LXBOUND; x < RXBOUND; x += INCREMENT) {
const float xMin = x;
const float xMax = x + INCREMENT;
for (float y = UYBOUND; y > LYBOUND; y -= INCREMENT) {
const float yMin = y - INCREMENT;
const float yMax = y;
//calculates possible y range associated with the known x range
const float lower_range = slope * xMin + intercept;
const float upper_range = slope * xMax + intercept;
//if the possible y ranges corresponding to the x values exist within the actual y range increment bin
if ((lower_range <= yMax && lower_range >= yMin) || (upper_range <= yMax && upper_range >= yMin))
atomicAdd(&d_binarray[counter], 1);//increment bin, protected from race condition
counter++;
}
}
} |
8,313 | /*
* file name: convolution2D.cu
*
* CPE810A: Homework 3: Convolution
*
* Yupeng Cao, 10454637
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <ctime>
// Define constant memory for kernel storage on Device
#define KERNEL_RADIUS 128
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__constant__ float d_Kernel[KERNEL_W];
// Define Tile Size
#define TILE_W 16 // active cell width
#define TILE_H 16 // active cell height
#define TILE_SIZE (TILE_W + KERNEL_RADIUS * 2) * (TILE_W + KERNEL_RADIUS * 2)
#define UNROLL_INNER
clock_t start, row, col;
/*
*********************************************************************
Define Error Checking methods
cudaSafeCall: Check data allocate
cudaCheckError: Check kernel function execution
*********************************************************************
*/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/*
*********************************************************************
function name: convolutionRowGPU
parameters:
d_OutputRow: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionRowGPU(float* d_OutputRow, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[ TILE_H * (TILE_W + KERNEL_RADIUS * 2) ];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int x; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int shift = threadIdx.y * (TILE_W + dimK * 2);
// left
x = x0 - dimK;
if ( x < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - dimK];
// right
x = x0 + dimK;
if ( x > dimX-1 )
data[threadIdx.x + blockDim.x + shift] = 0;
else
data[threadIdx.x + blockDim.x + shift] = d_Input[gLoc + dimK];
__syncthreads();
// convolution
float sum = 0;
x = dimK + threadIdx.x;
for (int i = -dimK; i <= dimK; i++)
sum += data[x + i + shift] * d_Kernel[dimK + i];
d_OutputRow[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionCOlGPU
parameters:
d_OutputCol: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionColGPU(float* d_OutputCol, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[TILE_W * (TILE_H + KERNEL_RADIUS * 2)];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int y; // image based coordinate
// original image based coordinate
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
const int shift = threadIdx.y * (TILE_W);
// upper
y = y0 - dimK;
if ( y < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - (dimX * dimK)];
// lower
y = y0 + dimK;
const int shift1 = shift + (blockDim.y * TILE_W);
if ( y > dimY-1 )
data[threadIdx.x + shift1] = 0;
else
data[threadIdx.x + shift1] = d_Input[gLoc + (dimX * dimK)];
__syncthreads();
// convolution
float sum = 0;
for (int i = 0; i <= dimK*2; i++)
sum += data[threadIdx.x + (threadIdx.y + i) * TILE_W] * d_Kernel[i];
d_OutputCol[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionRowCPU
Do Row Convolution by using CPU
*********************************************************************
*/
void convolutionRowCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + i * ySize + (j + (c - kernel_size + 2)));
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
/*
*********************************************************************
function name: convolutionColCPU
Do Col Convolution by using CPU
*********************************************************************
*/
void convolutionColCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + (i + (c - kernel_size + 2)) * ySize + j);
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
int check_input(int dimX, int dimY, int dimK){
if (dimX > 0 && dimY > 0 && dimK > 0){
return 1;
}else{
printf("Input for dimX, dimY, dimK must larger than 0");
return -1;
}
}
/*
*********************************************************************
Main Function
*********************************************************************
*/
int main(int argc, char *argv[])
{
// Check input parameter
if (argc == 4){
printf("Input Data\n");
}else{
printf("Error input Parameter \n");
printf("Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>\n");
printf("Please input <dimX>, <dimY>, <dimK> \n");
printf("dimX and dimY are width and heights for input image and dimK is size for mask \n");
return 0;
}
int dimX = atoi(argv[1]);
int dimY = atoi(argv[2]);
int dimK = atoi(argv[3]);
if (dimK > KERNEL_RADIUS){
printf("Your Mask Size is too large. \n");
printf("We recommend you change a reasonable number. \n");
}
if (check_input(dimX, dimY, dimK) == 1){
printf("Input is Valid \n\n");
}else{
return -1;
}
srand((unsigned)time(NULL));
// Initialize image size and kernel size
unsigned int img_size = dimX * dimY;
const int kernel_length = dimK;
// Allocate space for input on host
float* h_Kernel = (float *)malloc(kernel_length * sizeof(float));
float* h_Input = (float *)malloc(dimX * dimY * sizeof(float));
// Initialize Mask and Image.
for (unsigned int i = 0; i < kernel_length; ++i)
{
h_Kernel[i] = (float)(rand() % 16);
}
for (unsigned i = 0; i < img_size; ++i)
{
h_Input[i] = (float)(rand() % 16);
}
// Allocate space for saving results on host
float *h_OutputRowCPU, *h_OutputColCPU, *h_OutputRowGPU, *h_OutputColGPU;
h_OutputRowCPU = (float *)malloc(img_size * sizeof(float));
h_OutputColCPU = (float *)malloc(img_size * sizeof(float));
h_OutputRowGPU = (float *)malloc(img_size * sizeof(float));
h_OutputColGPU = (float *)malloc(img_size * sizeof(float));
// Allocate space for data on device
float *d_Input, *d_OutputRow, *d_OutputCol;
CudaSafeCall(cudaMalloc((void **)&d_Input, img_size * sizeof(float)));
CudaSafeCall(cudaMalloc((void **)&d_OutputRow, img_size * sizeof(float)));
CudaSafeCall(cudaMalloc((void **)&d_OutputCol, img_size * sizeof(float)));
// Move data from host to device
CudaSafeCall(cudaMemcpy(d_Input, h_Input, img_size * sizeof(float), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpyToSymbol(d_Kernel, h_Kernel, kernel_length));
// Initialize grid and block
dim3 blocks(TILE_W, TILE_H);
dim3 grids(dimX/TILE_W, dimY/TILE_H);
start = clock();
convolutionRowGPU<<<grids, blocks>>>(d_OutputRow, d_Input, dimX, dimY, dimK);
CudaCheckError();
cudaDeviceSynchronize();
row = clock();
double running_time = (double)(row - start) / CLOCKS_PER_SEC;
printf("Row Convolution by using GPU: %f ms.\n", running_time);
//start = clock();
convolutionColGPU<<<grids, blocks>>>(d_OutputCol, d_Input, dimX, dimY, dimK);
CudaCheckError();
cudaDeviceSynchronize();
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using GPU: %f ms.\n", running_time);
CudaSafeCall(cudaMemcpy(h_OutputRowGPU, d_OutputRow, img_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(h_OutputColGPU, d_OutputCol, img_size, cudaMemcpyDeviceToHost));
//start = clock();
convolutionRowCPU(h_OutputRowCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Row Convolution by using CPU: %f ms.\n", running_time);
//start = clock();
convolutionColCPU(h_OutputColCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using CPU: %f ms.\n", running_time);
double computation_scale = static_cast<double>(dimX) * static_cast<double>(dimY) * static_cast<double>(dimK);
double throughput = (computation_scale * 1.0e-9f) / (running_time / 1000.0f);
printf("Throughput Performance: %f GFLOPs. \n", throughput);
cudaFree(d_OutputRow);
cudaFree(d_OutputCol);
cudaFree(d_Kernel);
cudaFreeHost(h_Kernel);
cudaFreeHost(h_Input);
cudaFreeHost(h_OutputRowGPU);
cudaFreeHost(h_OutputColGPU);
return 0;
}
|
8,314 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cuda.h>
float *a, *b; // host data
float *c, *c2; // results
__global__ void vecAdd(float *A,float *B,float *C,int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Limit thread execution more than its limit incase it will stop buffer overflow.
if(i<N){
C[i] = A[i] + B[i];
}
}
void vecAdd_h(float *A1,float *B1, float *C1, float N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] + B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(float);
int block_size, block_no;
a = (float *)malloc(nBytes);
b = (float *)malloc(nBytes);
c = (float *)malloc(nBytes);
c2 = (float *)malloc(nBytes);
float *a_d,*b_d,*c_d;
block_size=1000;
block_no = n/block_size;
dim3 threadPerBlock(block_size,1,1);
dim3 dimBlock(block_no,1,1);
for(int i = 0; i < n; i++ ) {
a[i] = sin(i)*sin(i);
b[i] = cos(i)*cos(i);
}
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,nBytes);
cudaMalloc((void **)&b_d,nBytes);
cudaMalloc((void **)&c_d,nBytes);
printf("Copying to device..\n");
cudaMemcpy(a_d,a,nBytes,cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,nBytes,cudaMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<dimBlock,threadPerBlock>>>(a_d,b_d,c_d,n);
cudaThreadSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
int cpy_C=cudaMemcpy(c,c_d,nBytes,cudaMemcpyDeviceToHost);
printf("%d\n",cpy_C);
printf("Number of elements: %d GPU Time: %f CPU Time: %f\n",n,time_d,time_h);
for (int i=0; i<3;i++){
printf("%f\n", c[i]);
}
for (int i=0; i<3;i++){
printf("%f\n", c2[i]);
}
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
8,315 | /* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdio>
#ifdef USE_NVTX
#include "nvToolsExt.h"
const uint32_t colors[] = { 0xff00ff00, 0xff0000ff, 0xffffff00, 0xffff00ff, 0xff00ffff, 0xffff0000, 0xffffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
__global__ void init_data_kernel( int n, double* x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n )
{
x[i] = n - i;
}
}
__global__ void daxpy_kernel(int n, double a, double * x, double * y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
y[i] = a*x[i] + y[i];
}
}
__global__ void check_results_kernel( int n, double correctvalue, double * x )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
if ( x[i] != correctvalue )
{
printf("ERROR at index = %d, expected = %f, actual: %f\n",i,correctvalue,x[i]);
}
}
}
void init_host_data( int n, double * x )
{
PUSH_RANGE("init_host_data",1)
for (int i=0; i<n; ++i)
{
x[i] = i;
}
POP_RANGE
}
void init_data(int n, double* x, double* x_d, double* y_d)
{
PUSH_RANGE("init_data",2)
cudaStream_t copy_stream;
cudaStream_t compute_stream;
cudaStreamCreate(©_stream);
cudaStreamCreate(&compute_stream);
cudaMemcpyAsync( x_d, x, n*sizeof(double), cudaMemcpyDefault, copy_stream );
init_data_kernel<<<ceil(n/256),256,0,compute_stream>>>(n, y_d);
cudaStreamSynchronize(copy_stream);
cudaStreamSynchronize(compute_stream);
cudaStreamDestroy(compute_stream);
cudaStreamDestroy(copy_stream);
POP_RANGE
}
void daxpy(int n, double a, double* x_d, double* y_d)
{
PUSH_RANGE("daxpy",3)
daxpy_kernel<<<ceil(n/256),256>>>(n,a,x_d,y_d);
cudaDeviceSynchronize();
POP_RANGE
}
void check_results( int n, double correctvalue, double* x_d )
{
PUSH_RANGE("check_results",4)
check_results_kernel<<<ceil(n/256),256>>>(n,correctvalue,x_d);
POP_RANGE
}
void run_test(int n)
{
PUSH_RANGE("run_test",0)
double* x;
double* x_d;
double* y_d;
cudaSetDevice(0);
cudaMallocHost((void**) &x, n*sizeof(double));
cudaMalloc((void**)&x_d,n*sizeof(double));
cudaMalloc((void**)&y_d,n*sizeof(double));
init_host_data(n, x);
init_data(n,x,x_d,y_d);
daxpy(n,1.0,x_d,y_d);
check_results(n, n, y_d);
cudaFree(y_d);
cudaFree(x_d);
cudaFreeHost(x);
cudaDeviceSynchronize();
POP_RANGE
}
int main()
{
int n = 1<<22;
run_test(n);
return 0;
}
|
8,316 | #include <cmath>
__global__ void mylog(double* value)
{
value[threadIdx.x] = std::log(value[threadIdx.x]);
}
|
8,317 | #include "includes.h"
__global__ void kernel(float *g_data, float value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + value;
} |
8,318 | /**
* @file microbenchmarking_transfers.cu
* @detail This file describes the implementation of the functions involved in the
* PCIe bandwidth and latency.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include <stdio.h>
void swap(float & v1, float & v2){
float tmp = v1;
v1 = v2;
v2 = tmp;
}
int partition(float *array, int left, int right){
int part = right;
swap(array[part],array[(right+left) / 2]);
--right;
while(true){
while(array[left] < array[part]){
++left;
}
while(right >= left && array[part] <= array[right]){
--right;
}
if(right < left) break;
swap(array[left],array[right]);
++left;
--right;
}
swap(array[part],array[left]);
return left;
}
void qs(float * array, const int left, const int right){
if(left < right){
const int part = partition(array, left, right);
qs(array, part + 1,right);
qs(array, left,part - 1);
}
}
/**
* @brief Quicksort
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details Quicksort
*
* @param array elements array
* @param size array size
*/
void serialQuickSort(float *array, const int size){
qs(array, 0,size-1);
}
/**
* @brief Calculate times median
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function returns the median from a set of times.
*
* @param h_times times array.
* @param N array size.
*
* @return time median.
*/
float getMedianTime(float *h_times, int N)
{
float median = 0;
float * h_sorted_times = (float *)malloc(N * sizeof(float));
for(int n = 0; n < N; n++)
h_sorted_times[n] = h_times[n];
//Sort execution times
serialQuickSort(h_sorted_times, N);
//Calculate median
if(N%2 == 0)
{
median = (h_sorted_times[N/2] + h_sorted_times[(N/2)+1])/2;
}
else
{
int p = N/2;
median = h_sorted_times[p];
}
free(h_sorted_times);
return median;
}
/**
* @brief Calculate PCIe HTD latency.
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function returns the latency of the PCIe for HTD memory transfers.
*
* @param d_data device data.
* @param h_data host data.
* @param nreps iterations.
* @return HTD PCIe latency (ms).
*/
float getLoHTD(char *d_data, char *h_data, int nreps)
{
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
float LoHTD = 0.0;
float *h_times = (float *)malloc(nreps * sizeof(float));
memset(h_times, 0, nreps * sizeof(float));
for(int k = 0; k < nreps; k++)
{
cudaEventRecord(start_event, 0);
//we only transfer 1 byte.
cudaMemcpy(d_data, h_data, sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&LoHTD, start_event, stop_event);
h_times[k] = LoHTD;
}
LoHTD = getMedianTime(h_times, nreps);
free(h_times);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
return LoHTD;
}
/**
* @brief Calculate PCIe DTH latency
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function returns the latency of the PCIe for DTH memory transfers.
*
* @param d_data Device data.
* @param h_data Host data.
* @param nreps iterations
* @return DTH PCIe latency (ms).
*/
float getLoDTH(char *d_data, char *h_data, int nreps)
{
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
float LoDTH = 0.0;
float *h_times = (float *)malloc(nreps * sizeof(float));
memset(h_times, 0, nreps * sizeof(float));
for(int k = 0; k < nreps; k++)
{
cudaEventRecord(start_event, 0);
//we only transfer 1 byte.
cudaMemcpy(h_data, d_data, sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&LoDTH, start_event, stop_event);
h_times[k] = LoDTH;
}
LoDTH = getMedianTime(h_times, nreps);
free(h_times);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
return LoDTH;
}
/**
* @brief DTH PCIe bandwidth (ms/byte).
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function returns the bandwidth of the PCIe for DTH memory transfers.
*
* @param d_data Device data.
* @param h_data Host data.
* @param LoDTH DTH PCIe latency.
* @param nreps Iterations.
* @return DTH PCIe bandwidth (ms/byte).
*/
float getGDTH(char *d_data, char *h_data, float LoDTH, int nreps)
{
float time = 0;
float GDTH = 0.0;
float timeSumGDTH = 0.0;
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
int n = 0;
int total_bytes = 0;
//From 16 MB to 512 MB
for(int size = 16; size <= 512; size=size*2)
{
cudaEventRecord(start_event, 0);
cudaMemcpy(h_data, d_data, size * 1024 * 1024* sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&time, start_event, stop_event);
timeSumGDTH += time;
total_bytes += size * 1024 * 1024;
n++;
}
GDTH = (timeSumGDTH - n*LoDTH)/total_bytes;
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
return GDTH;
}
/**
* @brief HTD PCIe bandwidth (ms/byte).
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function returns the bandwidth of the PCIe for HTD memory transfers.
*
* @param d_data Device data.
* @param h_data Host data.
* @param LoHTD HTD PCIe latency.
* @param nreps Iterations.
* @return HTD PCIe bandwidth (ms/byte).
*/
float getGHTD(char *d_data, char *h_data, float LoHTD, int nreps)
{
float time = 0;
float GHTD = 0.0;
float timeSumGHTD = 0.0;
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
int n = 0;
int total_bytes = 0;
//From 16 MB to 512 MB
for(int size = 16; size <= 512; size=size*2)
{
cudaEventRecord(start_event, 0);
cudaMemcpy(d_data, h_data, size * 1024 * 1024* sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&time, start_event, stop_event);
timeSumGHTD += time;
total_bytes += size * 1024 * 1024;
n++;
}
GHTD = (timeSumGHTD - n*LoHTD)/total_bytes;
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
return GHTD;
}
/**
* @brief Overlap DTH PCIe bandwidth.
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function returns the bandwidth of the PCIe for HTD memory transfers, when a DTH memory transfer
* is concurrently executed.
*
* @param d_data Device data.
* @param h_data Host data.
* @param LoDTH DTH PCIe latency.
* @param nreps Iterations.
* @return Overlap DTH PCIe bandwidth (byte/ms).
*/
float getOverlappedGDTH(char *d_data, char *h_data, float LoDTH, int nreps)
{
float time = 0;
float GDTH = 0.0;
float timeSumGDTH = 0.0;
cudaStream_t *stream_benchmark = (cudaStream_t *)malloc(2 * sizeof(cudaStream_t));
for(int i = 0; i < 2; i++)
cudaStreamCreate(&(stream_benchmark[i]));
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
int n = 0;
int total_bytes = 0;
//From 16 MB to 512 MB
for(int size = 16; size <= 512; size=size*2)
{
cudaEventRecord(start_event, stream_benchmark[0]);
//DTH
cudaMemcpyAsync(h_data,
d_data,
size * 1024 * 1024* sizeof(char),
cudaMemcpyDeviceToHost, stream_benchmark[0]);
cudaEventRecord(stop_event, stream_benchmark[0]);
//HTD
cudaMemcpyAsync(d_data + (size * 1024 * 1024),
h_data + (size * 1024 * 1024),
size * 1024 * 1024* sizeof(char),
cudaMemcpyHostToDevice, stream_benchmark[1]);
cudaDeviceSynchronize();
cudaEventElapsedTime(&time, start_event, stop_event);
timeSumGDTH += time;
total_bytes += size * 1024 * 1024;
n++;
}
GDTH = (timeSumGDTH - n*LoDTH)/total_bytes;
for(int i = 0; i < 2; i++)
cudaStreamDestroy(stream_benchmark[i]);
free(stream_benchmark);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
return GDTH;
}
/**
* @brief Overlap HTD PCIe bandwidth.
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function returns the bandwidth of the PCIe for DTH memory transfers, when a HTD memory transfer
* is concurrently executed.
*
* @param d_data Device data.
* @param h_data Host data.
* @param LoHTD HTD PCIe latency.
* @param nreps Iterations.
* @return Overlap HTD PCIe bandwidth (byte/ms).
*/
float getOverlappedGHTD(char *d_data, char *h_data, float LoHTD, int nreps)
{
float time = 0;
float GHTD = 0.0;
float timeSumGHTD = 0.0;
cudaStream_t *stream_benchmark = (cudaStream_t *)malloc(2 * sizeof(cudaStream_t));
for(int i = 0; i < 2; i++)
cudaStreamCreate(&(stream_benchmark[i]));
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
int n = 0;
int total_bytes = 0;
//From 16 MB to 512 MB
for(int size = 16; size <= 512; size=size*2)
{
cudaEventRecord(start_event, stream_benchmark[0]);
cudaMemcpyAsync(d_data,
h_data,
size * 1024 * 1024* sizeof(char),
cudaMemcpyHostToDevice, stream_benchmark[0]);
cudaEventRecord(stop_event, stream_benchmark[0]);
cudaMemcpyAsync(h_data + (size * 1024 * 1024),
d_data + (size * 1024 * 1024),
size * 1024 * 1024* sizeof(char),
cudaMemcpyDeviceToHost, stream_benchmark[1]);
cudaDeviceSynchronize();
cudaEventElapsedTime(&time, start_event, stop_event);
timeSumGHTD += time;
total_bytes += size * 1024 * 1024;
n++;
}
GHTD = (timeSumGHTD - n*LoHTD)/total_bytes;
for(int i = 0; i < 2; i++)
cudaStreamDestroy(stream_benchmark[i]);
free(stream_benchmark);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
return GHTD;
}
/**
* @brief PCIe microbenchmarking.
* @author Antonio Jose Lazaro Munoz
* @date 17/02/2016
* @details This function calculates the values of PCIe latency and bandwidth
* for HTD and DTH memory transfers.
*
* @param gpu GPU id.
* @param LoHTD PCIe HTD latency (pointer).
* @param LoDTH PCIe DTH latency (pointer).
* @param GHTD HTD PCIe bandwidth (pointer).
* @param overlappedGHTD Overlap HTD PCIe bandwidth (pointer).
* @param GDTH DTH PCIe bandwidth (pointer).
* @param overlappedGDTH Overlap DTH PCIe bandwidth (pointer).
* @param nIter Iterations.
*/
void microbenchmarkingPCI(int gpu, float *LoHTD, float *LoDTH, float *GHTD, float *overlappedGHTD,
float *GDTH, float *overlappedGDTH, int nIter)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, gpu);
cudaSetDevice(gpu);
int tam = 1024*1024*1024; // 1GB
char *h_data_benchmark; cudaMallocHost((void**)&h_data_benchmark, tam * sizeof(char));
memset(h_data_benchmark, 0, tam * sizeof(char));
char *d_data_benchmark; cudaMalloc((void **)&d_data_benchmark, tam * sizeof(char));
cudaMemset(d_data_benchmark, 0, tam * sizeof(char));
*LoHTD = getLoHTD(d_data_benchmark, h_data_benchmark, nIter);
*GHTD = getGHTD(d_data_benchmark, h_data_benchmark, *LoHTD, nIter);
if(props.asyncEngineCount == 2)
*overlappedGHTD = getOverlappedGHTD(d_data_benchmark, h_data_benchmark, *LoHTD, nIter);
*LoDTH = getLoDTH(d_data_benchmark, h_data_benchmark, nIter);
*GDTH = getGDTH(d_data_benchmark, h_data_benchmark, *LoDTH, nIter);
if(props.asyncEngineCount == 2)
*overlappedGDTH = getOverlappedGDTH(d_data_benchmark, h_data_benchmark, *LoDTH, nIter);
cudaFreeHost(h_data_benchmark);
cudaFree(d_data_benchmark);
} |
8,319 | #include <iostream>
#include <stdio.h>
using namespace std;
#define DATA_TYPE unsigned char
/* 树节点 */
class Node
{
public:
Node* left;
Node* right;
DATA_TYPE data;
};
__global__ void testKer()
{
printf("Node's size in GPU: %d", sizeof(Node));
}
int main()
{
cout << "Node's size in CPU:" << sizeof(Node) << endl;
testKer<<<1, 1>>>();
cudaDeviceReset();
return 0;
}
|
8,320 | /*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: thrust_test.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class thrust_test.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
// #include <thrust/host_vector.h>
// #include <thrust/device_vector.h>
// #include <thrust/generate.h>
// #include <thrust/reduce.h>
// #include <thrust/functional.h>
// #include <cstdlib>
int main(void)
{
int *dInt;
cudaMalloc(&dInt, sizeof(int)*1);
// generate random data on the host
//thrust::host_vector<int> h_vec(100);
//thrust::generate(h_vec.begin(), h_vec.end(), rand);
// transfer to device and compute sum
//thrust::device_vector<int> d_vec(100);
//int x = thrust::reduce(d_vec.begin(), d_vec.end(), 0, thrust::plus<int>());
return 0;
}
|
8,321 | #include <cstdio>
#include <climits>
#define SERIAL_SCALE 2
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void kernelMain(int *input, int *output){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ int mem[1024];
int m=input[thid*SERIAL_PART];
for(unsigned int i=1;i<SERIAL_PART;++i)
{
int t=input[thid*SERIAL_PART+i];
if(t<m)
m=t;
}
mem[threadIdx.x]=m;
__syncthreads();
for(unsigned int shift=1;shift<1024;shift*=2)
{
int val=mem[threadIdx.x];
if(threadIdx.x>=shift)
{
if(val>mem[threadIdx.x-shift])
val=mem[threadIdx.x-shift];
}
__syncthreads();
mem[threadIdx.x]=val;
}
if(threadIdx.x==1023)
output[blockIdx.x]=mem[1023];
}
__global__ void kernelPrepare(int *input, int *output, int* args)
{
const unsigned int count=args[0];
const unsigned int n=args[1];
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(thid*count>=n)
return;
int m=input[thid*count];
for(unsigned int i=1;i<count && thid*count+i<n;++i)
{
if(m>input[thid*count+i])
m=input[thid*count+i];
}
output[thid]=m;
}
}
|
8,322 | void u_init_jac(double ***u, int N, double start_T) {
int i, j, k;
#pragma omp parallel for default(none) \
shared(u,N,start_T) \
private(i,j,k) schedule(static)
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
for (k = 0; k < N; ++k) {
// i -> z, j -> y, k -> x
if (j == 0)
u[i][j][k] = 0.0;
else if (j == N - 1 || i == 0 || i == N - 1 || k == 0 || k == N - 1)
u[i][j][k] = 20.0;
else
u[i][j][k] = start_T;
}
}
}
}
void f_init_jac(double ***f, int N) {
int i, j, k;
#pragma omp parallel for default(none) \
shared(f,N) \
private(i,j,k) schedule(static)
for (i = 1; i < N-1; ++i) {
for (j = 1; j < N-1; ++j) {
for (k = 1; k < N-1; ++k) {
if (i >= (N-2) / 6.0 && i < 0.5 * (N - 2) // -2/3 <= z <= 0
&& j >= 0 && j <= 0.25 * (N - 2) // -1 <= y <= -1/2
&& k >= 0 && k <= 5.0 * (N - 2) / 16.0 ) // -1 <= x <= -3/8
f[i][j][k] = 200.0;
else f[i][j][k] = 0.0;
}
}
}
}
|
8,323 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define THREAD_PER_BLOCK 512
__global__ void saxpy(int N, double *x, double *y, double *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N) {
for(int k = 0; k < 1000; ++k) {
z[i] = x[i] + y[i];
z[i] += 1;
}
}
}
int main(void)
{
clock_t start_time = clock();
int N = 1<<20;
double *x, *y, *z, *d_x, *d_y, *d_z;
int sum = 0;
clock_t cpumemalloc_start = clock();
x = (double*)malloc(N*sizeof(double));
y = (double*)malloc(N*sizeof(double));
z = (double*)malloc(N*sizeof(double));
clock_t cpumemalloc_end = clock();
float time_cpumemalloc = 1000*(((double)(cpumemalloc_end - cpumemalloc_start))/CLOCKS_PER_SEC);
printf("Time consumed cpu mem alloc: %f millsec \n", time_cpumemalloc);
clock_t deviceset_start = clock();
clock_t deviceset_end = clock();
float time_deviceset = 1000*(((double)(deviceset_end - deviceset_start))/CLOCKS_PER_SEC);
printf("Time consumed deviceset alloc: %f millsec \n", time_deviceset);
clock_t gpumemallo_start = clock();
cudaMalloc(&d_x, N*sizeof(double));
cudaMalloc(&d_y, N*sizeof(double));
cudaMalloc(&d_z, N*sizeof(double));
clock_t gpumemallo_end = clock();
float time_gpumemallo = 1000*(((double)(gpumemallo_end - gpumemallo_start))/CLOCKS_PER_SEC);
printf("Time consumed gpu mem alloc: %f millsec \n", time_gpumemallo);
clock_t value_assign_start = clock();
for (int i = 0; i < N; i++) {
x[i] = 1.0;
y[i] = 2.0;
}
clock_t value_assign_end= clock();
float time_value_assign = 1000*(((double)(value_assign_end - value_assign_start))/CLOCKS_PER_SEC);
printf("Time consumed value_assign alloc: %f millsec \n", time_value_assign);
clock_t gpumemcpy_start = clock();
cudaMemcpy(d_x, x, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(double), cudaMemcpyHostToDevice);
clock_t gpumemcpy_end = clock();
float time_gpucpy = 1000*(((double)(gpumemcpy_end - gpumemcpy_start))/CLOCKS_PER_SEC);
printf("Time consumed gpu memcpy: %f millsec \n", time_gpucpy);
clock_t gpucom_start = clock();
// Perform SAXPY on 1M elements
saxpy<<<(N+THREAD_PER_BLOCK-1)/THREAD_PER_BLOCK,THREAD_PER_BLOCK>>>(N, d_x, d_y, d_z);
clock_t gpucom_end = clock();
float time_gpucom = 1000*(((double)(gpucom_end - gpucom_start))/CLOCKS_PER_SEC);
printf("Time consumed gpu compute: %f millsec \n", time_gpucom);
clock_t gpumemcpyback_start = clock();
cudaMemcpy(z, d_z, N*sizeof(double), cudaMemcpyDeviceToHost);
clock_t gpumemcpyback_end = clock();
float time_gpumemcpyback = 1000*(((double)(gpumemcpyback_end - gpumemcpyback_start))/CLOCKS_PER_SEC);
printf("Time consumed gpu memcpy back: %f millsec \n", time_gpumemcpyback);
clock_t valuesum_start = clock();
for (int i = 0; i < N; ++i) {
sum += z[i];
}
printf("Sum: %d \n", sum);
clock_t valuesum_end = clock();
float time_valuesum = 1000*(((double)(gpumemcpy_end - gpumemcpy_start))/CLOCKS_PER_SEC);
printf("Time consumed gpu valuesum: %f millsec \n", time_valuesum);
clock_t gpumemfree_start = clock();
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
clock_t gpumemfree_end = clock();
float time_gpumemfree = 1000*(((double)(gpumemfree_end - gpumemfree_start))/CLOCKS_PER_SEC);
printf("Time consumed gpu mem free: %f millsec \n", time_gpumemfree);
clock_t memfree_start = clock();
free(x);
free(y);
free(z);
clock_t memfree_end = clock();
float time_memfree = 1000*(((double)(memfree_end - memfree_start))/CLOCKS_PER_SEC);
printf("Time consumed cpu mem free: %f millsec \n", time_memfree);
clock_t end_time = clock();
float time_c = 1000*(((double)(end_time - start_time))/CLOCKS_PER_SEC);
// time_c in milliseconds
printf("Time consumed: %f millsec \n", time_c);
}
|
8,324 | #include "includes.h"
__global__ void Match4(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M2W*(NDIM/4 + 1)]; //%%%%
__shared__ float4 buffer2[M2H*NDIM/4]; //%%%%
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM/4;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; //%%%%
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM/4;d+=M2W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d]; //%%%%
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d]; //%%%%
float4 v2 = buffer2[ty*(NDIM/4) + d]; //%%%%
score += v1.x*v2.x; score += v1.y*v2.y;
score += v1.z*v2.z; score += v1.w*v2.w;
}
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
} |
8,325 | #include <stdio.h>
#include <stdlib.h>
__host__ double** getWeightMatrix(int rows, int cols)
{
FILE *myFile;
myFile = fopen("WMatrix.txt", "r");
double** mat;
cudaMallocManaged(&mat, rows * sizeof(double*));
for (int i = 0; i < rows; i++)
{
cudaMallocManaged(&(mat[i]), cols * sizeof(double));
fscanf(myFile, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,",
&(mat[i][0]), &(mat[i][1]), &(mat[i][2]), &(mat[i][3]), &(mat[i][4]),
&(mat[i][5]), &(mat[i][6]), &(mat[i][7]), &(mat[i][8]), &(mat[i][9])
);
}
fclose(myFile);
return mat;
}
__host__ double* getBVector(int cols)
{
FILE *myFile;
myFile = fopen("bVector.txt", "r");
double* vector;
cudaMallocManaged(&vector, cols * sizeof(double));
fscanf(myFile, "%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,%lf,",
&(vector[0]), &(vector[1]), &(vector[2]), &(vector[3]), &(vector[4]),
&(vector[5]), &(vector[6]), &(vector[7]), &(vector[8]), &(vector[9])
);
fclose(myFile);
return vector;
}
__host__ void printMatrix(double **mat, int rows, int cols)
{
for (int i = 0; i < rows; i++)
{
printf("[");
for (int j = 0; j < cols; j++)
{
printf("%0.10lf\t", mat[i][j]);
}
printf("]\n");
}
printf("\n");
}
__host__ void getMNISTTest(int indice, int* label, int* vector, int rows, int cols)
{
if (indice > 10000)
{
printf("Solo hay 10,000 datos de test !");
return;
}
FILE *myFile;
myFile = fopen("mnist_test.csv", "r");
for (int i = 0; i < indice; i++)
{
fscanf(myFile, "%*[^\n]\n");
}
fscanf(myFile, "%d", label);
fscanf(myFile, ",");
for (int j = 0; j < rows; j++)
{
fscanf(myFile, "%d,", &(vector[j]));
}
fclose(myFile);
}
__host__ void printImage(int label, int* vector, int rows)
{
for (int j = 0, x; j < rows; j++)
{
x = vector[j];
if (x == 0)
{
printf("--");
}
else
{
printf("%d%d", label, label);
}
if ((j + 1) % 28 == 0)
{
printf("\n");
}
}
printf("\n");
}
__host__ double getMaxIndex(double* y, int cols)
{
double max = y[0];
int maxIndex = -1;
for (int i = 0; i < cols; i++)
{
if (y[i] >= max)
{
max = y[i];
maxIndex = i;
}
}
return maxIndex;
}
__global__ void productMatrixVectorKernel(int* X, double** W, double* WX, int cols)
{
int j = threadIdx.y;
double sum = 0;
for (int k = 0; k < cols; k++)
{
sum += X[k] * W[k][j];
}
WX[j] = sum;
__syncthreads();
}
__global__ void SumVectorVectorKernel(double* WX, double* b, double* y)
{
int j = threadIdx.y;
y[j] = WX[j] + b[j];
__syncthreads();
}
int main()
{
int rows = 784;
int cols = 10;
int indexTest = 44;
double** W = getWeightMatrix(rows, cols);
double* b = getBVector(cols);
int label;
int* X;
cudaMallocManaged(&X, rows * sizeof(int));
double* WX;
cudaMallocManaged(&WX, cols * sizeof(double));
double* y;
cudaMallocManaged(&y, cols * sizeof(double));
int prediction;
while(1)
{
printf("Indice del Data Set: ");
scanf("%d", &indexTest);
if (indexTest > 10000)
{
printf("Solo hay 10,000 datos\n\n");
continue;
}
getMNISTTest(indexTest, &label, X, rows, cols);
dim3 blocksPerGrid(1);
dim3 threadsPerBlock(1, cols);
productMatrixVectorKernel<<< blocksPerGrid, threadsPerBlock >>>(X, W, WX, rows);
cudaDeviceSynchronize();
SumVectorVectorKernel<<< blocksPerGrid, threadsPerBlock >>>(WX, b, y);
cudaDeviceSynchronize();
prediction = getMaxIndex(y, cols);
printf("Original %d, Prediccion %d\n", label, prediction);
printImage(label, X, rows);
}
}
|
8,326 | #include <stdio.h>
#include <time.h>
#define NUM_ELEMS 16
#define BLOCK_SIZE 4
__global__ void multiply(int *a, int *b, int *c) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int shared_mem[BLOCK_SIZE];
//Copy a and b into shared memory.
shared_mem[threadIdx.x] = a[index] * b[index];
//Transfer result from shared memory into output array c.
c[index] = shared_mem[threadIdx.x];
}
int main() {
//device memory
int *device1, *device2, *device3;
//host memory
int host1[NUM_ELEMS];
int host2[NUM_ELEMS];
int output[NUM_ELEMS];
size_t numBytes = NUM_ELEMS * sizeof(int);
int i = 0; //loop counter
clock_t cpu_time = clock(); //Start CPU clock.
float time = 0.0f;
cudaEvent_t start, stop;
//Start GPU clock.
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//Load host1 and host2 with values.
for (i = 0; i < NUM_ELEMS; i++) {
host1[i] = i+1;
host2[i] = i+5;
}
//Allocate memory for device vars.
cudaMalloc((void **)&device1, numBytes);
cudaMalloc((void **)&device2, numBytes);
cudaMalloc((void **)&device3, numBytes);
//Transfer values from host to device.
cudaMemcpy(device1, &host1, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(device2, &host2, numBytes, cudaMemcpyHostToDevice);
//Launch multiply kernel on GPU with given parameters.
//Specify NUM_ELEMS thread blocks, each with BLOCK_SIZE threads.
multiply <<<NUM_ELEMS/BLOCK_SIZE,BLOCK_SIZE>>>(device1, device2, device3);
//Get result from device to host.
cudaMemcpy(&output, device3, numBytes, cudaMemcpyDeviceToHost);
//Stop GPU clock - determine how long GPU kernel took to run.
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//Print out values.
for (i = 0; i < NUM_ELEMS; i++) {
printf("%d * %d = %d\n", host1[i], host2[i], output[i]);
}
//Free all variables.
cudaFree(device1);
cudaFree(device2);
cudaFree(device3);
//Calculate total runtime.
cpu_time = clock() - cpu_time; //CPU time
time += ((double)cpu_time)/CLOCKS_PER_SEC; //Add CPU time to GPU time.
printf("%f\n", time); //print out total runtime
return 0;
}
|
8,327 | /***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include "util.h"
__global__ void histo_prescan_kernel (unsigned int* input, int size, unsigned int* minmax)
{
__shared__ float Avg[PRESCAN_THREADS];
__shared__ float StdDev[PRESCAN_THREADS];
int stride = size/gridDim.x;
int addr = blockIdx.x*stride+threadIdx.x;
int end = blockIdx.x*stride + stride/8; // Only sample 1/8th of the input data
// Compute the average per thread
float avg = 0.0;
unsigned int count = 0;
while (addr < end){
avg += input[addr];
count++;
addr += blockDim.x;
}
avg /= count;
Avg[threadIdx.x] = avg;
// Compute the standard deviation per thread
int addr2 = blockIdx.x*stride+threadIdx.x;
float stddev = 0;
while (addr2 < end){
stddev += (input[addr2]-avg)*(input[addr2]-avg);
addr2 += blockDim.x;
}
stddev /= count;
StdDev[threadIdx.x] = sqrtf(stddev);
#define SUM(stride__)\
if(threadIdx.x < stride__){\
Avg[threadIdx.x] += Avg[threadIdx.x+stride__];\
StdDev[threadIdx.x] += StdDev[threadIdx.x+stride__];\
}
// Add all the averages and standard deviations from all the threads
// and take their arithmetic average (as a simplified approximation of the
// real average and standard deviation.
#if (PRESCAN_THREADS >= 32)
for (int stride = PRESCAN_THREADS/2; stride >= 32; stride = stride >> 1){
__syncthreads();
SUM(stride);
}
#endif
#if (PRESCAN_THREADS >= 16)
SUM(16);
#endif
#if (PRESCAN_THREADS >= 8)
SUM(8);
#endif
#if (PRESCAN_THREADS >= 4)
SUM(4);
#endif
#if (PRESCAN_THREADS >= 2)
SUM(2);
#endif
if (threadIdx.x == 0){
float avg = Avg[0]+Avg[1];
avg /= PRESCAN_THREADS;
float stddev = StdDev[0]+StdDev[1];
stddev /= PRESCAN_THREADS;
// Take the maximum and minimum range from all the blocks. This will
// be the final answer. The standard deviation is taken out to 10 sigma
// away from the average. The value 10 was obtained empirically.
atomicMin(minmax,((unsigned int)(avg-10*stddev))/(KB*1024));
atomicMax(minmax+1,((unsigned int)(avg+10*stddev))/(KB*1024));
}
}
|
8,328 | #include "includes.h"
__global__ void SoftmaxLossBackprop( const float *label, int num_labels, int batch_size, float *diff ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= batch_size ) {
return;
}
const int label_value = static_cast<int>(label[ idx ]);
// For each item in the batch, decrease the result of the label's value by 1
diff[ idx * num_labels + label_value ] -= 1.0f;
} |
8,329 | #include <cuda_runtime.h>
__global__ void cudaVectorAddKernel(const int *vectorA, const int *vectorB, int *vectorC, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
vectorC[i] = vectorA[i] + vectorB[i];
}
}
extern "C" void cudaVectorAdd(const int *vectorA, const int *vectorB, int *vectorC, int numElements)
{
int *cudaA, *cudaB, *cudaC;
cudaMalloc((void**)&cudaA, sizeof(int) * numElements);
cudaMalloc((void**)&cudaB, sizeof(int) * numElements);
cudaMalloc((void**)&cudaC, sizeof(int) * numElements);
cudaMemcpy(cudaA, vectorA, sizeof(int) * numElements, cudaMemcpyDefault);
cudaMemcpy(cudaB, vectorB, sizeof(int) * numElements, cudaMemcpyDefault);
int threads = 256;
int blocks = (numElements + threads - 1) / threads;
cudaVectorAddKernel <<< threads, blocks >>> (cudaA, cudaB, cudaC, numElements);
cudaMemcpy(vectorC, cudaC, sizeof(int) * numElements, cudaMemcpyDefault);
cudaFree((void**)&cudaA);
cudaFree((void**)&cudaB);
cudaFree((void**)&cudaC);
}
|
8,330 | /**
* Implementation of Hillis-Steele parallel prefix scan.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
template <typename T>
__global__ void exclusiveScanKernel(T *dataOut, const T *dataIn, const size_t dataSize) {
extern __shared__ T shmData[];
int thId = threadIdx.x;
int bufA = 0, bufB = 1;
// Load everything into shared memory. We need to copy twice to fill the shared memory space
shmData[bufA * dataSize + thId] = (thId == 0) ? 0 : dataIn[thId - 1];
shmData[bufB * dataSize + thId] = (thId == 0) ? 0 : dataIn[thId - 1];
__syncthreads();
for (int offset = 1; offset < dataSize; offset <<= 1) {
// Swap which side of the buffer we're writing into
bufA = 1 - bufA;
bufB = 1 - bufA;
// Do scan step
if (thId >= offset) {
shmData[bufA * dataSize + thId] = shmData[bufB * dataSize + thId] +
shmData[bufB * dataSize + thId - offset];
} else {
shmData[bufA * dataSize + thId] = shmData[bufB * dataSize + thId];
}
__syncthreads();
}
// Write to output array
dataOut[thId] = shmData[bufA * dataSize + thId];
printf("Thread %d = %d\n", thId, dataOut[thId]);
}
//template <typename T>
//void launchScanKernel(T *h_dataOut, const T *h_dataIn, const size_t dataSize) {
void launchScanKernel(int *h_dataOut, const int *h_dataIn, const size_t dataSize, float *execTime = nullptr) {
const size_t dataBytes = dataSize * sizeof(int);
// Declare GPU memory pointers
int *d_dataOut, *d_dataIn;
// Set up GPU timers
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate GPU memory
cudaMalloc((void **) &d_dataOut, dataBytes);
cudaMalloc((void **) &d_dataIn, dataBytes);
cudaMemcpy(d_dataIn, h_dataIn, dataBytes, cudaMemcpyHostToDevice);
// Execute kernel and record runtime
const unsigned int blocks = 1;
const unsigned int threads = (dataSize) / blocks;
cudaEventRecord(start);
exclusiveScanKernel<<<blocks, threads, dataBytes>>>(d_dataOut, d_dataIn, dataSize);
cudaEventRecord(stop);
// Copy back from GPU to CPU
cudaMemcpy(h_dataOut, d_dataOut, dataBytes, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float timeInMs = 0;
cudaEventElapsedTime(&timeInMs, start, stop);
*execTime = timeInMs;
// Free memory
cudaFree(d_dataIn);
cudaFree(d_dataOut);
}
//template void launchScanKernel<int>(int *h_dataOut, const int *h_dataIn, const size_t dataSize); |
8,331 | /******************************************
* Compile:
* nvcc 2.cu -o 2
* Run:
* ./2
*******************************************/
#include <stdio.h>
#include <sys/time.h>
#define height 256
#define width 256
#define filter_size 3
#define stride 1
#define pad 1
#define channels 3
#define block_size_x 32
#define block_size_y 32
#define GET_TIME(now) \
{ \
struct timeval t; \
gettimeofday(&t, NULL); \
now = t.tv_sec + t.tv_usec / 1000000.0; \
}
// 并行矩阵乘法函数
__global__ static void matMultCUDA(const float *a, const float *b, float *c, int M, int N, int K, int BLOCK_SIZE)
{
//表示目前的 thread 的编号
const int tid = threadIdx.x;
//表示目前在第几个 block 中
const int bid = blockIdx.x;
//计算出当前的 row 和 column
const int idx = bid * BLOCK_SIZE + tid;
const int row = idx / M;
int column = idx % M;
do
{
//矩阵乘法
if (row < M && column < K)
{
float t = 0;
for (int i = 0; i < N; i++)
{
t += a[row * N + i] * b[i * K + column];
}
c[row * K + column] = t;
}
column += M;
} while (column < K);
}
float im2col_get_data(float *im, int row, int col, int channel){
row -= pad;
col -= pad; // padding补0
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
// im[col + width*(row + height*channel)]=im[col+width*row+width*height*channel]
return im[col + width*(row + height*channel)];
}
void im2col(float* data_im, float* data_col) {
int c,h,w; // 计算卷积后的尺寸
int height_col = (height + 2*pad - filter_size) / stride + 1;
int width_col = (width + 2*pad - filter_size) / stride + 1;
int channels_col = channels * filter_size * filter_size;
// 获取对应的值
for (c = 0; c < channels_col; ++c) {
int w_offset = c % filter_size;
int h_offset = (c / filter_size) % filter_size;
int c_im = c / filter_size / filter_size;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
// 获取原图中对应的坐标
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
// col_index为重排后图像中的索引
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_data(data_im, im_row, im_col, c_im);
}
}
}
}
int main()
{
double start, finish, time;
int height_col = (height + 2*pad - filter_size) / stride + 1;
int width_col = (width + 2*pad - filter_size) / stride + 1; /// 卷积核大小:filter_size*filter_size是一个卷积核的大小,通道数channels
int channels_col = channels * filter_size * filter_size;
// 动态分配内存
float *im = (float *)malloc(height * width * channels * sizeof(float));
float *col = (float *)malloc(channels_col * height_col * width_col * sizeof(float));
float *filter = (float *)malloc(channels * filter_size * filter_size * sizeof(float));
// 初始化input矩阵
for (int i = 0; i < height * width * channels ; i++)
{
im[i] = (float)(rand() % 50)/100;
}
// 初始化filter
for (int i = 0; i < filter_size * filter_size * channels; i++)
{
filter[i] = (float)(rand() % 50)/100;
}
GET_TIME(start);
im2col(im, col);
float *cuda_a, *cuda_b, *cuda_c;
float *c = (float *)malloc(channels * (width_col * height_col) * sizeof(float));
//cudaMalloc 分配空间
cudaMalloc((void **)&cuda_a, sizeof(float) * channels * (filter_size * filter_size) );
cudaMalloc((void **)&cuda_b, sizeof(float) * channels_col * (width_col * height_col));
cudaMalloc((void **)&cuda_c, sizeof(float) * channels * (width_col * height_col));
//cudaMemcpy 将矩阵复制到显存中
cudaMemcpy(cuda_a, filter, sizeof(float) * channels * (filter_size * filter_size), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b, col, sizeof(float) * channels_col * (width_col * height_col), cudaMemcpyHostToDevice);
int BLOCK_SIZE = height;
const int blocks_num = (channels * (width_col * height_col) + BLOCK_SIZE - 1) / BLOCK_SIZE;
// 在CUDA 中执行 gemm 函数
matMultCUDA<<<blocks_num, BLOCK_SIZE, 0>>>(cuda_a, cuda_b, cuda_c, channels, (filter_size * filter_size), (width_col * height_col), BLOCK_SIZE);
//cudaMemcpy 将结果从显存中复制回内存
cudaMemcpy(c, cuda_c, sizeof(float) * channels * (width_col * height_col), cudaMemcpyDeviceToHost);
GET_TIME(finish);
time = finish - start;
// 输出结果到文件result.txt
FILE *fp = fopen("result.txt", "w");
for (int i = 0; i < channels * (width_col * height_col); i++)
{
fprintf(fp, "%f ", c[i]);
}
//Free
cudaFree(cuda_a);
cudaFree(cuda_b);
cudaFree(cuda_c);
free(c);
free(im);
free(col);
printf("time: %f s\n\n", time);
return 0;
}
|
8,332 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
const unsigned int BLOCK_SIZE = 512;
__global__ void Histo_Kernel(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//using shared memory
extern __shared__ unsigned int histo_private[];
for(int i = threadIdx.x; i < num_bins; i += blockDim.x) {
histo_private[i] = 0;
}
__syncthreads();
while (index < num_elements) {
// atomicAdd(&(histo_private[(input[index])]), 1);
atomicAdd(&(histo_private[(input[index])]), 1);
index += stride;
}
__syncthreads();
//create final histogram using atomic add
for(int j = threadIdx.x; j < num_bins; j += blockDim.x) {
atomicAdd(&(bins[j]), histo_private[j]);
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
Histo_Kernel<<<ceil(num_elements/BLOCK_SIZE), BLOCK_SIZE,
sizeof(unsigned int)*num_bins>>>(input, bins, num_elements, num_bins);
}
|
8,333 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <cuda_runtime.h>
#define N 1024 // vector size
#define TxB 32 // threads x block
/*
* kernel: somma di vettori
*/
__global__ void add_vect(int *a, int *b, int *c) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N)
c[idx] = a[idx] + b[idx];
}
int main(void) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int nBytes = N * sizeof(int);
// malloc host memory
a = (int *) malloc(nBytes);
b = (int *) malloc(nBytes);
c = (int *) malloc(nBytes);
// malloc device memory
cudaMalloc((void**) &dev_a, nBytes);
cudaMalloc((void**) &dev_b, nBytes);
cudaMalloc((void**) &dev_c, nBytes);
// fill the arrays 'a' and 'b' on the CPU
for (int i= 0; i< N; i++) {
a[i] = rand() % 10;
b[i] = rand() % 10;
}
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy(dev_a, a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, nBytes, cudaMemcpyHostToDevice);
add_vect<<<N /TxB, TxB>>>(dev_a, dev_b, dev_c);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy(c, dev_c, nBytes, cudaMemcpyDeviceToHost);
// display the results
for (int i = 0; i < N; i++) {
printf("%d\n", c[i]);
}
// Free host memory
free(a);
free(b);
free(c);
// free the memory allocated on the GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
8,334 | #include <stdio.h>
#include <iostream>
#include <iostream>
#include <fstream>
#include <random>
#define WIDTH 8192
#define LENGHT 8192
#define N_PARTICLES 5000
#define INF 999999.999
#define RADIO 100
#define CELLS_FOR_THREAD 8
using namespace std;
// __constant__ float x_part_dev[N_PARTICLES];
// __constant__ float y_part_dev[N_PARTICLES];
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
cout << cudaGetErrorString(error) << endl; \
} \
} while (0)
__global__
void minReduction(float *in, float *out)
{
__shared__ float sharedData[256];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + tid; // blockSize = 256
sharedData[tid] = in[i] + in[i+blockDim.x];
__syncthreads();
for (unsigned int s = blockDim.x/2; s>32; s>>=1) {
if(tid<s)
{
sharedData[tid] = (sharedData[tid]<sharedData[tid+s])?sharedData[tid]:sharedData[tid+s];
}
__syncthreads();
}
if (tid < 32)
{
sharedData[tid] = (sharedData[tid]<sharedData[tid+32])?sharedData[tid]:sharedData[tid+32];
sharedData[tid] = (sharedData[tid]<sharedData[tid+16])?sharedData[tid]:sharedData[tid+16];
sharedData[tid] = (sharedData[tid]<sharedData[tid+8])?sharedData[tid]:sharedData[tid+8];
sharedData[tid] = (sharedData[tid]<sharedData[tid+4])?sharedData[tid]:sharedData[tid+4];
sharedData[tid] = (sharedData[tid]<sharedData[tid+2])?sharedData[tid]:sharedData[tid+2];
sharedData[tid] = (sharedData[tid]<sharedData[tid+1])?sharedData[tid]:sharedData[tid+1];
}
if(tid==0)
{
out[blockIdx.x] = sharedData[0];
}
}
float random_float(float min, float max) {
return ((float)rand() / RAND_MAX) * (max - min) + min;
}
int main(int argc, char *argv[]){
// Load data
string input_file_name;
/*
if (argc > 1) {
input_file_name = argv[1];
} else {
cout << "faltó un argumento" << endl;
exit(0);
}
ifstream infile;
cout << input_file_name.c_str() << endl;
infile.open(input_file_name.c_str());
*/
int nP = WIDTH*LENGHT;
float *cells;
// infile >> nP;
// cout << "nP: "<<nP << endl;
cells = (float *)malloc(WIDTH*LENGHT * sizeof(float));
//cells = (float*)malloc(nP*sizeof(float));
int target_min_pos = 1000000;
for (int i = 0; i<nP; i++){
if (i != target_min_pos){
cells[i] = 1.0f;
} else {
cells[i] = random_float(2.0f, 19.99f);
}
}
// Get memory for structures
float *chunk,*outData,*out2,y[128];
// Define sizes of GPU
int blockSize = 256; // # threads
int gridSize = ((WIDTH*LENGHT)/256)/CELLS_FOR_THREAD; // # blocks
cout << "gridSize: " << gridSize << endl;
// Get memory in GPU for structures
// data for charge function
CUDA_CHECK(cudaMalloc(&chunk, gridSize*sizeof(float))); // 1D array representation for grid 2D
// data for reduction function
CUDA_CHECK(cudaMalloc(&outData, gridSize*sizeof(float)));
CUDA_CHECK(cudaMalloc(&out2, (gridSize/blockSize)*sizeof(float)));
float min = INF;
float *aux;
aux = (float*)malloc(gridSize*sizeof(float));
// Search min load
for (size_t i = 0; i < CELLS_FOR_THREAD; i++) {
memcpy(aux, cells + i*gridSize, gridSize * sizeof(float));
// Copy data from CPU to GPU
CUDA_CHECK(cudaMemcpy(chunk, aux, gridSize*sizeof(float), cudaMemcpyHostToDevice));
minReduction<<<gridSize,blockSize>>>(chunk,outData); // outData lenght 32.768
cudaDeviceSynchronize();
// check for errors
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error));
}
minReduction<<<gridSize/blockSize,blockSize>>>(outData,out2); // out2 lenght 128
cudaDeviceSynchronize();
cudaMemcpy(y, out2, 128*sizeof(float), cudaMemcpyDeviceToHost);
// check for errors
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error));
}
// min load
for (size_t i = 0; i < 128; i++) {
min = (y[i]<min)?y[i]:min;
}
cout << min << endl;
}
cudaFree(chunk);
cudaFree(outData);
cudaFree(out2);
free(cells);
free(aux);
return 0;
}
|
8,335 | #include <cuda.h>
#include <stdio.h>
#define N (32)
#define BLOCK (8)
__global__ void
matrix_add(const float *a, const float *b, float *c) {
const int x = threadIdx.x + blockIdx.x*blockDim.x;
const int y = threadIdx.y + blockIdx.y*blockDim.y;
const int i = y*N + x;
c[i] = a[i] + b[i];
}
void
print_matrix(const float *m, const int w, const int h) {
int x, y;
for (y = 0; y != h; ++y) {
for (x = 0; x != w; ++x)
printf("%02.0f ", m[y*w + x]);
printf("\n");
}
}
void
create_matrix_d(float **m, int w, int h) {
cudaMalloc(m, w * h * sizeof(float));
cudaMemset(*m, 0, w * h * sizeof(float));
}
void
create_matrix_h(float **m, int w, int h) {
*m = (float *) malloc(w * h * sizeof(float));
memset(*m, 0, w * h * sizeof(float));
}
int
main(void) {
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int x, y;
create_matrix_h(&h_a, N, N);
create_matrix_h(&h_b, N, N);
create_matrix_h(&h_c, N, N);
create_matrix_d(&d_a, N, N);
create_matrix_d(&d_b, N, N);
create_matrix_d(&d_c, N, N);
for (y = 0; y != N; ++y) {
for (x = 0; x != N; ++x) {
h_a[y*N + x] = y*N + x;
h_b[y*N + x] = 1;
}
}
cudaMemcpy(d_a, h_a, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*N*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimb(BLOCK, BLOCK);
dim3 dimg(N/BLOCK, N/BLOCK);
matrix_add<<<dimg, dimb>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, N*N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("first matrix\n");
print_matrix(h_a, N, N);
printf("second matrix\n");
print_matrix(h_b, N, N);
printf("resultant matrix\n");
print_matrix(h_c, N, N);
return 0;
}
|
8,336 | #include <cstdio>
#include <cstdlib>
#include <ctime>
#include <cstring>
#include <algorithm>
#include <cmath>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define PROGRAM_STATE 1
#define HARD_DBG 2
#define DBG 1
#define VECTOR_SIZE_STD 4194304*2 ///100 000 000
#define VECTOR_SIZE_DBG 4194304*2*2 // 128
#define VECTOR_MAX_NUMBER_STD 999 //1000000
#define VECTOR_MAX_NUMBER_DBG 100 // 100
#define THREADS_PER_BLOCK_STD 512
#define THREADS_PER_BLOCK_DBG 128 // 32
#define VECTOR_LENGTH_PER_THREAD_STD 32
#define VECTOR_LENGTH_PER_THREAD_DBG 2 //2
#define VECTOR_SIZE ((PROGRAM_STATE >= DBG) ? VECTOR_SIZE_DBG : VECTOR_SIZE_STD)
#define MAX_NUMBER ((PROGRAM_STATE >= DBG) ? VECTOR_MAX_NUMBER_DBG : VECTOR_MAX_NUMBER_STD)
#define THREADS_PER_BLOCK ((PROGRAM_STATE >= DBG) ? THREADS_PER_BLOCK_DBG : THREADS_PER_BLOCK_STD)
#define VECTOR_LENGTH_PER_THREAD ((PROGRAM_STATE >= DBG) ? VECTOR_LENGTH_PER_THREAD_DBG : VECTOR_LENGTH_PER_THREAD_STD)
/* cuda errors */
bool checkForError(const cudaError_t cudaStatus, const char text[], short* dev_input) {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n%s \nError code: %d \nStatus: %s \n\n", text, cudaStatus, cudaGetErrorString(cudaStatus));
if (dev_input != NULL) {
cudaFree(dev_input);
}
return true;
}
return false;
}
bool checkForError(const cudaError_t cudaStatus, const char text[]) {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n%s \nError code: %d \nStatus: %s \n\n", text, cudaStatus, cudaGetErrorString(cudaStatus));
return true;
}
return false;
}
/* info */
void printArray(short* A, int size) {
printf("\n");
for (int i = 0; i < size; i++) {
printf("%d, ", A[i]);
}
printf("\n");
fflush(stdout);
}
void checkIfCorrectlySorted(short* arr) {
for (int i = 0; i < VECTOR_SIZE - 1; i++) {
if (arr[i] > arr[i + 1]) {
printf("\n\n-----------ERROR!-----------%d\n\n ", i);
return;
}
}
printf("\n----------- OK ------------");
}
/* merge sort */
void fillArrayWithNumbers(short* numbers) {
int i;
srand(time(NULL));
for (i = 0; i < VECTOR_SIZE; i++) {
numbers[i] = rand() % MAX_NUMBER;
}
if (PROGRAM_STATE >= HARD_DBG) {
printArray(numbers, VECTOR_SIZE);
}
}
__host__
__device__
int getMid(int start, int end) {
return start + (end - start) / 2;
}
__host__
__device__
void merge(short* arr, int leftStart, int rightEnd, int mid, int tmpIndexStart) {
int i, j, k;
int leftHalfSize = mid - leftStart + 1;
int rightHalfSize = rightEnd - mid;
short* L = &arr[tmpIndexStart + leftStart];
short* R = &arr[tmpIndexStart + mid + 1];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < leftHalfSize; i++) {
L[i] = arr[leftStart + i];
}
for (j = 0; j < rightHalfSize; j++) {
R[j] = arr[mid + 1 + j];
}
/* Merge the temp arrays back into arr[l..r]*/
i = 0;
j = 0;
k = leftStart;
while (i < leftHalfSize && j < rightHalfSize) {
if (L[i] <= R[j]) {
arr[k] = L[i];
i++;
}
else {
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there are any */
while (i < leftHalfSize) {
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there are any */
while (j < rightHalfSize) {
arr[k] = R[j];
j++;
k++;
}
}
__global__
void mergeKernel(short* arr, int vectorLengthPerThread, int vectorLength, int tmpIndexStart) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int leftStart = threadId * vectorLengthPerThread;
int rightEnd = leftStart + vectorLengthPerThread - 1;
int mid = getMid(leftStart, rightEnd);
if (leftStart < vectorLength) {
if (PROGRAM_STATE >= HARD_DBG) {
printf("\n thread: %d, <%d, %d>, mid %d", threadId, leftStart, rightEnd, mid);
}
merge(arr, leftStart, rightEnd, mid, tmpIndexStart);
}
}
__host__
__device__
void mergeSort(short* arr,int leftStart, int rightEnd, int minVectorLength, int vectorLength, int tmpIndexStart) {
if (leftStart < rightEnd && rightEnd - leftStart >= minVectorLength && rightEnd <= vectorLength) {
if (PROGRAM_STATE >= HARD_DBG) {
printf("\n<%d,%d> minVec: %d", leftStart, rightEnd, minVectorLength);
}
int m = getMid(leftStart, rightEnd);
mergeSort(arr, leftStart, m, minVectorLength, vectorLength, tmpIndexStart);
mergeSort(arr, m + 1, rightEnd, minVectorLength, vectorLength, tmpIndexStart);
merge(arr, leftStart, rightEnd, m, tmpIndexStart);
}
}
__global__
void mergeSortKernel(short* arr, int vectorLengthPerThread, int minVectorLength, int vectorLength, int tmpIndexStart) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int leftStart = threadId * vectorLengthPerThread;
int rightEnd = leftStart + vectorLengthPerThread - 1;
mergeSort(arr, leftStart, rightEnd, minVectorLength, vectorLength, tmpIndexStart);
}
int main() {
const int vectorMultiplier = 2;
const int vectorLength = VECTOR_SIZE;
int threadsPerBlock = THREADS_PER_BLOCK;
int vectorLengthPerThread = VECTOR_LENGTH_PER_THREAD;
int numBlocks = ceil(vectorLength / threadsPerBlock);
const int blockVectorLength = vectorLength / numBlocks;
const int vectorSizeInBytes = vectorLength * sizeof(short) * 2;
int tmpIndexStart = vectorLength;
short* vector = (short*)malloc(vectorSizeInBytes);
fillArrayWithNumbers(vector);
short* dev_input = NULL;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (checkForError(cudaStatus, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?", dev_input)) {
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_input, vectorSizeInBytes);
if (checkForError(cudaStatus, "cudaMalloc (dev_input) failed!", dev_input)) {
return cudaStatus;
}
cudaStatus = cudaMemcpy(dev_input, vector, vectorSizeInBytes, cudaMemcpyHostToDevice);
if (checkForError(cudaStatus, "cudaMemcpy (vector -> dev_input) failed!", dev_input)) {
return cudaStatus;
}
printf("\nConfiguration: vector length: %d, threads per block: %d, vector length per thread: %d, num blocks: %d, block vector length: %d\n",
vectorLength, threadsPerBlock, vectorLengthPerThread, numBlocks, blockVectorLength);
int i = 0;
while (vectorLengthPerThread <= blockVectorLength) {
if (PROGRAM_STATE >= DBG) {
printf("\nIter: %d, vector length per thread: %d", i++, vectorLengthPerThread);
}
mergeSortKernel<<<numBlocks, threadsPerBlock>>>(dev_input, vectorLengthPerThread, vectorLengthPerThread / VECTOR_LENGTH_PER_THREAD, vectorLength, tmpIndexStart);
cudaStatus = cudaGetLastError();
if (checkForError(cudaStatus, "mergeKernel launch failed!", dev_input)) {
return cudaStatus;
}
cudaStatus = cudaDeviceSynchronize();
if (checkForError(cudaStatus, "cudaDeviceSynchronize on \"mergeKernel\" returned error code.", dev_input)) {
return cudaStatus;
}
vectorLengthPerThread *= vectorMultiplier;
if (PROGRAM_STATE >= HARD_DBG) {
cudaStatus = cudaMemcpy(vector, dev_input, vectorSizeInBytes, cudaMemcpyDeviceToHost);
if (checkForError(cudaStatus, "cudaMemcpy (dev_input -> vector) failed!")) {
return cudaStatus;
}
printArray(vector, vectorLength);
}
}
if (PROGRAM_STATE < HARD_DBG) {
cudaStatus = cudaMemcpy(vector, dev_input, vectorSizeInBytes, cudaMemcpyDeviceToHost);
if (checkForError(cudaStatus, "cudaMemcpy (dev_input -> vector) failed!")) {
return cudaStatus;
}
}
mergeSort(vector, 0, vectorLength - 1, blockVectorLength, vectorLength, tmpIndexStart);
if (PROGRAM_STATE >= HARD_DBG) {
printArray(vector, vectorLength);
}
cudaFree(dev_input);
cudaStatus = cudaDeviceReset();
if (checkForError(cudaStatus, "cudaDeviceReset failed!")) {
return 1;
}
fflush(stdout);
checkIfCorrectlySorted(vector);
return 0;
} |
8,337 | #include "device_launch_parameters.h"
#include <iostream>
using namespace std;
int main() {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int i = 0; i < deviceCount; i++) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
cout << "GPU device: " << i << ": " << devProp.name << endl;
cout << "Total Globam Mem: " << devProp.totalGlobalMem / 1024 / 1024
<< "MB" << endl;
cout << "SM #num: " << devProp.multiProcessorCount << endl;
cout << "Shared Mem Per Block: " << devProp.sharedMemPerBlock / 1024.0
<< " KB" << endl;
cout << "Max Threads Per Block: " << devProp.maxThreadsPerBlock << endl;
cout << "Regs Per Block: " << devProp.regsPerBlock << endl;
cout << "Max Threads Per SM: " << devProp.maxThreadsPerMultiProcessor
<< endl;
cout << "Max Warps Per SM: " << devProp.maxThreadsPerMultiProcessor / 32
<< endl;
}
cudaDeviceReset();
} |
8,338 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
void list_env_properties();
int main(int argc, char **argv){
list_env_properties();
}
void list_env_properties(){
int deviceCount, device;
struct cudaDeviceProp properties;
cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount);
if (cudaResultCode != cudaSuccess)
deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) /* 9999 means emulation only */
if (device==0) {
printf("name:%s\n", properties.name);
printf("memory:%ld\n", properties.totalGlobalMem);
printf("warpsize:%d\n", properties.warpSize);
printf("max threads per block:%d\n", properties.maxThreadsPerBlock);
printf("clock rate:%d\n", properties.clockRate);
printf("multiProcessorCount %d\n",properties.multiProcessorCount);
printf("maxThreadsPerMultiProcessor %d\n",properties.maxThreadsPerMultiProcessor);
}
}
}
|
8,339 | __global__ void wave1Dmac1(double * f_tmp1, double * f_in,
double u, double dt, double dx,
int N){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_p = tid+1;
if(x_p == N) x_p = 0;
double f_tmp = f_in[tid];
f_tmp1[tid]= f_tmp - u*(dt/dx)*(f_in[x_p] - f_tmp);
}
}
|
8,340 | /* test.cu */
// Libraries :
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <time.h>
# include <ctime>
# include <math.h>
# include <unistd.h>
# include <iostream>
# include <fstream>
using namespace std;
#define NB_THREADS 512
int number_of_blocks(int n)
{
int res;
res = n/NB_THREADS;
if ( n % NB_THREADS != 0)
res++;
return res;
}
__global__ void ker(int *T, int n)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k < n)
T[k] = k;
}
int main()
{
int n = 512;
int e = 9;
int i, nb_blocks;
int *Td, *Th;
for (i=0; i<100; i++)
{
printf("n = %d\ne = %d\n", n, e);
Th = (int*) calloc(n, sizeof(int));
printf(" --> Th calloc done\n");
cudaMalloc( (void **) &Td, n * sizeof(int) );
cudaThreadSynchronize();
printf(" --> Td cudaMalloc done\n");
cudaMemcpy( Td, Th, n*sizeof(int), cudaMemcpyHostToDevice );
cudaThreadSynchronize();
printf(" --> cudaMemcpy(Td, Th) done\n");
nb_blocks = number_of_blocks(n);
ker<<<nb_blocks, NB_THREADS>>>(Td, n);
printf(" --> ker(Td) done\n");
cudaMemcpy( Th, Td, n*sizeof(int), cudaMemcpyDeviceToHost );
cudaThreadSynchronize();
printf(" --> cudaMemcpy(Th, Td) done\n");
free(Th);
cudaFree(Td);
n *= 2;
e++;
}
}
|
8,341 | #include "includes.h"
__global__ void g_One_feedforward( float* _inputs, float* _w, float* _b, float* _outputs, int rows, int cols, int channels)
{
int row = blockIdx.x;
int channel = blockIdx.y;
int skip = channel * rows * cols + row * cols;
float* inputs = _inputs + skip;
float* outputs= _outputs+ skip;
// if(threadIdx.x == 0)
// sprintf(logStr, "block(%d %d) skip = %d\n", blockIdx.x, blockIdx.y, skip);
float* w = _w + channel * cols;
float* b = _b + channel * cols;
for(int i = 0; i < cols; i += blockDim.x){
int id = i + threadIdx.x;
if(id < cols){
outputs[id] = inputs[id] * w[id] + b[id];
}
}
} |
8,342 | #include "includes.h"
__global__ void kernBlockWiseMax(const size_t numPoints, double* dest) {
// Assumes a 2D grid of 1024x1 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
__shared__ double blockMax[1024];
if(threadIdx.x >= numPoints) {
blockMax[threadIdx.x] = -INFINITY;
} else {
blockMax[threadIdx.x] = dest[i];
}
__syncthreads();
// Do all the calculations in block shared memory instead of global memory.
for(int s = blockDim.x / 2; threadIdx.x < s; s /= 2) {
if(blockMax[threadIdx.x] < blockMax[threadIdx.x + s]) {
blockMax[threadIdx.x] = blockMax[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0) {
// Just do one global write
dest[i] = blockMax[0];
}
} |
8,343 | #include "includes.h"
__global__ void InputWeightsRTRLDerivativesKernel( float *input, float *hiddenActivationDerivatives, float *recurrentWeights, float *inputWeightRTRLDerivatives, float *previousInputWeightRTRLDerivatives )
{
int partialId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (partialId < D_HIDDEN_UNITS * D_HIDDEN_UNITS * D_INPUT_UNITS)
{
int unitId = partialId / (D_HIDDEN_UNITS * D_INPUT_UNITS);
int weightId = partialId % (D_HIDDEN_UNITS * D_INPUT_UNITS);
int to = weightId / D_INPUT_UNITS;
int from = weightId % D_INPUT_UNITS;
float sum = 0;
for (int i = 0; i < D_HIDDEN_UNITS; i++)
{
sum += recurrentWeights[unitId * D_HIDDEN_UNITS + i] * previousInputWeightRTRLDerivatives[i * (D_HIDDEN_UNITS * D_INPUT_UNITS) + weightId];
}
inputWeightRTRLDerivatives[partialId] = hiddenActivationDerivatives[unitId] * ((unitId == to) * input[from] + sum);
}
} |
8,344 | #include "includes.h"
__global__ void grayScale(unsigned char* imgInput, unsigned char* imgOutput, int Row, int Col) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < Col) && (col < Row)) {
imgOutput[row * Row + col] = imgInput[(row * Row + col) * 3 + 2] * 0.299 + imgInput[(row * Row + col) * 3 + 1] * 0.587 + imgInput[(row * Row + col) * 3] * 0.114;
}
} |
8,345 | #ifdef __cplusplus
extern "C" {
#endif
__global__ void mandelbrot(int* A, const int N, const int largeur, const int hauteur){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int y = idx / hauteur;
int x = idx - (y * largeur);
if (y < hauteur && x < largeur)
{
int cpt = 0;
float x1 = 0.;
float y1 = 0.;
float x2 = 0.;
float y2 = 0.;
float a = 4. * x / largeur - 2.;
float b = 4. * y / hauteur - 2.;
float val = x1* x1 + y1 * y1;
while (cpt < N && val <= 4.)
{
cpt ++;
x2 = x1* x1 - y1 * y1 + a;
y2 = 2. * x1 * y1 + b;
x1 = x2;
y1 = y2;
val = x1* x1 + y1 * y1;
}
A[y*hauteur+x] = cpt;
}
}
__global__ void game(int* A, const int N, const int largeur, const int hauteur){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int y = idx / hauteur;
int x = idx - (y * largeur);
if (y >= hauteur || x >= largeur)
return;
int me = A[idx];
int north = 0 ;
int northEast = 0;
int northWest = 0;
int south = 0;
int southEast = 0;
int southWest = 0;
int east = 0;
int west = 0;
if (x > 0)
west = A[idx -1];
if (x < largeur - 1)
east = A[idx + 1];
if (y > 0)
north = A[idx - largeur];
if (y < hauteur - 1)
south = A[idx + largeur];
if ((y < hauteur - 1) && (x < largeur - 1))
southEast = A[idx + largeur + 1];
if ((y < hauteur - 1) && (x > 0))
southWest = A[idx + largeur - 1];
if ((y > 0) && (x >0))
northWest = A[idx - largeur - 1];
if ((y > 0) && (x < largeur - 1))
northEast = A[idx - largeur + 1];
int res = north + south + east + west + northEast + northWest + southEast + southWest;
//__syncthreads();
if ((me == 1) && (res < 2) || (res > 3))
A[idx] = 0;
else
if ((me == 0) && (res == 3))
A[idx] = 1;
}
#ifdef __cplusplus
}
#endif
|
8,346 | #include "includes.h"
__device__ __forceinline__ float relu(float a) {
return a < 0 ? 0 : a;
}
__global__ void relu_derivative(float *upper_grads, float *upper_values, unsigned int upper_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < upper_size)
if (upper_values[index] == 0)
upper_grads[index] = 0;
} |
8,347 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1024
__global__ void gpu_sort(int *d_a, int *d_b){
__shared__ int tmp[512];
int tid = threadIdx.x;
int ttid = threadIdx.x + blockIdx.x * blockDim.x;
int val = d_a[ttid];
int count =0;
for(int i=tid;i<N;i+=512){
tmp[tid] = d_a[i];
__syncthreads();
for(int j=0;j<512;j++){
if(val>tmp[j]){
count++;
}
}
__syncthreads();
}
d_b[count] = val;
}
int main(){
int sizeByte = sizeof(int)*N;
int *h_a = (int*) malloc(sizeByte);
int *h_b = (int*) malloc(sizeByte);
int *h_a_cpu = (int*) malloc(sizeByte);
int *h_b_cpu = (int*) malloc(sizeByte);
int *d_a, *d_b;
cudaMalloc(&d_a, sizeByte);
cudaMalloc(&d_b, sizeByte);
for(int i=0;i<N;i++){
h_a[i] = rand();
h_a_cpu[i] = h_a[i];
}
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaMemcpy(d_a, h_a, sizeByte, cudaMemcpyHostToDevice);
gpu_sort<<<2, 512>>>(d_a,d_b);
cudaMemcpy(h_b, d_b, sizeByte, cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&time, start, stop);
printf("Time consumption on GPU: %lf\n", time);
for(int i=0;i<N-1;i++){
if(h_b[i]>h_b[i+1]){
printf("Error at index %d\n GPU[%d] = %d\n", i,i,h_b[i]);
break;
}
}
cudaEvent_t start_cpu,stop_cpu;
cudaEventCreate(&start_cpu);
cudaEventCreate(&stop_cpu);
cudaEventRecord(start_cpu,0);
//sort on cpu
for(int i=N;i>0;i--){
for(int j=0;j<i-1;j++){
if(h_a_cpu[j]>h_a_cpu[j+1]){
int tmp = h_a_cpu[j];
h_a_cpu[j] = h_a_cpu[j + 1];
h_a_cpu[j+1] = tmp;
}
}
}
cudaEventRecord(stop_cpu,0);
cudaEventSynchronize(stop_cpu);
float time_cpu = 0;
cudaEventElapsedTime(&time_cpu, start_cpu, stop_cpu);
printf("Time consumption on CPU: %lf\n", time_cpu);
return 0;
}
|
8,348 | #include "TensorSum_Kernel.cuh"
__global__ void AddBIAS_Kernel(float* tensor, float* bias, int biasSize, int tensorHeight, int tensorWidth)
{
int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int tensorXYSize = tensorHeight * tensorWidth;
int tensorSize = biasSize * tensorXYSize;
if (threadIndex < tensorSize)
{
int threadDepthIndex = threadIndex % biasSize;
int threadXYIndex = threadIndex % tensorXYSize;
//tensor[threadDepthIndex * tensorXYSize + threadXYIndex] = threadDepthIndex;
tensor[threadDepthIndex * tensorXYSize + threadXYIndex] += bias[threadDepthIndex];
}
}
void AddBIAS_GPU(float* tensor, float* bias, int biasSize, int tensorHeight, int tensorWidth) {
int tensorSize = biasSize * tensorHeight * tensorWidth;
int gridXDim = ceil(tensorSize / 512.0);
AddBIAS_Kernel << <gridXDim, 512 >> > (tensor, bias, biasSize, tensorHeight, tensorWidth);
}
|
8,349 | // Writer: Junhyuck Woo
// Lecture: Multicore Computing
// Organization: Chung-Ang University
// Deadline: June3 20, 2020
// Project #4
// - Thrust
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/device_vector.h>
#include <iostream>
#include <time.h>
using namespace std;
#define N 2000000.0
int main(int argc, char* argv[])
{
float sum = 0;
clock_t start_time, end_time;
double exec_time = 0;
// allocate three device_vectors
thrust::device_vector<float> X(N);
thrust::device_vector<float> Y(N);
thrust::device_vector<float> Z(N);
// Start timer;
start_time = clock();
// initialize X to 0,1,2,3, ....
thrust::sequence(X.begin(), X.end());
thrust::fill(Y.begin(), Y.end(), N);
// Divide X as N
thrust::transform(X.begin(), X.end(), Y.begin(), X.begin(), thrust::divides<float>());
// Calculation
thrust::transform(X.begin(), X.end(), X.begin(), X.begin(), thrust::multiplies<float>());// X = X*X
thrust::fill(Y.begin(), Y.end(), 1.0); // Y <- 1.0
thrust::transform(X.begin(), X.end(), Y.begin(), X.begin(), thrust::plus<float>()); // X = X + 1
thrust::fill(Y.begin(), Y.end(), 4.0); // Y <- 4.0
thrust::transform(Y.begin(), Y.end(), X.begin(), Z.begin(), thrust::divides<float>()); // z = 4.0 / X
thrust::fill(Y.begin(), Y.end(), N);
thrust::transform(Z.begin(), Z.end(), Y.begin(), Z.begin(), thrust::divides<float>()); // z = z / N
// Sum the calculation result
sum = thrust::reduce(Z.begin(), Z.end(), (float)0.0, thrust::plus<float>());
// End timer
end_time = clock();
exec_time = (double)(end_time - start_time)*1000 / CLOCKS_PER_SEC;
// Print the result
cout << "N: 2000000.0" << endl;
cout << "Excution Time: " << exec_time << " ms" << endl;
cout << "Result: " << sum << endl;
return 0;
} |
8,350 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
int
n = 512;
float
a, beta_old, beta = 0.0f,
sqydot, ydot, thr = 1e-5f,
*x, *dx, *y, *dy;
#define I (i+1)
#define J (j+1)
#define BLOCKSIZE 256 // Recommended blocksize from cuda ducumentation.
// y = A * x_old on GPU
__global__ void
cudaDy(float * dx, float * dy, int n)
{
int i = blockIdx.x*BLOCKSIZE+threadIdx.x;
float ytemp = 0.0f;
for (int j = 0; j<n; j++)
{
ytemp += dx[j]/(0.5f*(I+J-1)*(I+J-2)+I);
}
dy[i]=ytemp;
}
// x = y / sqrt(y dot y) on GPU. 2-norm precalculated on CPU.
__global__ void
cudaDx(float* dx, float* dy, float sqydot)
{
int i = blockIdx.x*BLOCKSIZE+threadIdx.x;
dx[i] = dy[i]/sqydot;
}
int
main ( int argc, char **argv )
{
if ( argc > 1 )
n = (1 << strtol ( argv[1], NULL, 10 ));
x = (float *) malloc ( n*sizeof(float) );
y = (float *) malloc ( n*sizeof(float) );
memset ( x, 0, n*sizeof(float) );
x[0] = 1.0f;
// Allocate similar arrays on device.
cudaMalloc(&dx, n*sizeof(float));
cudaMalloc(&dy, n*sizeof(float));
// Copy initial contents of x one time.
cudaMemcpy(dx, x, n*sizeof(float), cudaMemcpyHostToDevice);
// Set size of thread block
dim3 threadBlock (BLOCKSIZE);
// Set number of thread blocks
dim3 gridBlock (n/BLOCKSIZE);
do
{
// Calculate y vector
cudaDy <<< gridBlock, threadBlock >>> (dx, dy, n);
// Copy result to host
cudaMemcpy(y, dy, n*sizeof(float), cudaMemcpyDeviceToHost);
// Calculate new beta and y dot product on host.
beta_old = beta;
beta = 0.0f;
ydot = 0.0f;
for ( int j=0; j<n; j++ )
{
beta += y[j] * x[j];
ydot += y[j] * y[j];
}
if ( fabs(beta_old-beta) < thr )
break;
// Precalculate square root on host and send to x vector calculation.
sqydot = sqrt(ydot);
cudaDx <<< gridBlock, threadBlock >>> (dx, dy, sqydot);
// Copy result to host.
cudaMemcpy(x, dx, n*sizeof(float), cudaMemcpyDeviceToHost);
} while ( 1 );
printf ( "%e\n", beta );
free ( x ), free ( y );
cudaFree(dx), cudaFree(dy);
}
|
8,351 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <ctime>
using namespace std;
#define TILE_SIZEX 32
#define TILE_SIZEY 32
#define sizeImage 512
#define W 3
#define ITERATIONS ( 1 )
//////////////////////////////////////////
__global__ void boxFilterKernel(int* input, int* output)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int x,y;
if ( row==0 || col==0 || row==sizeImage-1 || col==sizeImage-1)
output[row*sizeImage+col] = 0;
else
{
int sum = 0;
for ( x=0 ; x<W ; x++ )
for ( y=0 ; y<W ; y++ )
{
sum += input[(row+x-1)*sizeImage+(col+y-1)];
}
output[row*sizeImage+col] = sum / (W*W);
}
}
/********************************************************
Main function
*********************************************************/
int main(void)
{
int *in,*out;
int *d_in, *d_out;
int size = sizeImage*sizeImage*sizeof(int);
int i,j;
in = (int*)malloc( size );
out = (int*)malloc( size );
out = (int*)malloc( size );
cudaMalloc( (void**)&d_in, size );
cudaMalloc( (void**)&d_out, size );
for( i=0 ; i<sizeImage ; i++ )
for( j=0 ; j<sizeImage ; j++ )
in[ i*sizeImage + j ] = (rand() % 256);
cudaMemcpy( d_in, in, size, cudaMemcpyHostToDevice );
dim3 dimBlock(TILE_SIZEX , TILE_SIZEY);
dim3 dimGrid((int)ceil((float) sizeImage / (float)TILE_SIZEX),
(int)ceil((float) sizeImage / (float)TILE_SIZEY));
boxFilterKernel <<< dimGrid, dimBlock >>> (d_in, d_out);
cudaMemcpy( out, d_out, size, cudaMemcpyDeviceToHost );
return 0;
}
|
8,352 | #include "includes.h"
__global__ void square(float* d_in, float* d_out) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
} |
8,353 | #include <stdio.h>
#include <stdlib.h>
#define THREAD_PER_BLOCK 2
__global__
void add_matrix(int* a, int* b, int* c,int n)
{
int col = blockDim.x*blockIdx.x+ threadIdx.x;
int row = blockDim.y*blockIdx.y+ threadIdx.y;
if ( col<n && row<n )
{
c[row*n+col] = a[row*n+col] + b[row*n+col];
}
}
__global__
void mult_matrix(int* a, int* b, int* c,int n)
{
int col = blockDim.x*blockIdx.x+ threadIdx.x;
int row = blockDim.y*blockIdx.y+ threadIdx.y;
if ( col<n && row<n )
{
int i;
c[row*n+col] = 0;
for(i=0;i<n;i++)
{
c[row*n + col] += a[ row*n + i ]*b[ i*n + col ];
}
}
}
__global__
void mult_matrix_shared(int* a, int* b, int* c,int n)
{
__shared__ float sub_a[THREAD_PER_BLOCK][THREAD_PER_BLOCK];
__shared__ float sub_b[THREAD_PER_BLOCK][THREAD_PER_BLOCK];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * THREAD_PER_BLOCK + ty;
int Col = bx * THREAD_PER_BLOCK + tx;
int Pvalue = 0;
for (int ph = 0; ph < n/THREAD_PER_BLOCK; ++ph) {
sub_a[ty][tx] = a[Row*n + ph*THREAD_PER_BLOCK + tx];
sub_b[ty][tx] = b[(ph*THREAD_PER_BLOCK + ty)*n + Col];
__syncthreads();
for (int k = 0; k < THREAD_PER_BLOCK; ++k) {
Pvalue += sub_a[ty][k] * sub_b[k][tx];
}
__syncthreads();
}
c[Row*n + Col] = Pvalue;
}
__global__
void mult_mat_rectangular(int *d_M, int *d_N, int *p,int N){
__shared__ int Mds[THREAD_PER_BLOCK][THREAD_PER_BLOCK];
__shared__ int Nds[THREAD_PER_BLOCK][THREAD_PER_BLOCK];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by*THREAD_PER_BLOCK + ty;
int Col = bx*2*THREAD_PER_BLOCK + tx;
int Col2 = (bx*2 + 1)*THREAD_PER_BLOCK + tx;
int p1 = 0;
int p2 = 0;
int k = 0;
int prefM = d_M[Row*N + k*THREAD_PER_BLOCK + tx];
int prefN = d_N[(k*THREAD_PER_BLOCK + ty)*N + Col];
int prefN2 = d_N[(k*THREAD_PER_BLOCK + ty)*N + Col2];
Mds[ty][tx] = prefM;
Nds[ty][tx] = prefN;
__syncthreads();
for(int m = 0; m < N/THREAD_PER_BLOCK ; ++m){
prefM = d_M[Row*N + m*THREAD_PER_BLOCK + tx];
prefN = d_N[(m*THREAD_PER_BLOCK + ty)*N + Col];
for(int k = 0; k < THREAD_PER_BLOCK; k++){
p1 += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
Nds[ty][tx] = prefN2;
__syncthreads();
prefN2 = d_N[(m*THREAD_PER_BLOCK + ty)*N + Col2];
for(int k = 0; k < THREAD_PER_BLOCK; k++){
p2 += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
Mds[ty][tx] = prefM;
Nds[ty][tx] = prefN;
}
p[Row*N + Col] = p1;
p[Row*N + Col2] = p2;
}
void print_matrix(int* a,int n)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
printf("%d ",a[i*n+j]);
}
printf("\n");
}
}
void fill_matrix(int* a,int n)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
//a[i*n+j] = rand()%5+1;
a[i*n+j] = 1;
}
}
}
int main()
{
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int mat_elem = 8;
int my_size = mat_elem*mat_elem*sizeof(int);
//cudaEvent_t my_start,my_stop;
//cudaEventCreate(&my_start);
//cudaEventCreate(&my_stop);
a = (int*) malloc(my_size);
b = (int*) malloc(my_size);
c = (int*) malloc(my_size);
fill_matrix(a,mat_elem);
fill_matrix(b,mat_elem);
printf("Matrix A\n");
print_matrix(a,mat_elem);
printf("Matrix B\n");
print_matrix(b,mat_elem);
printf("\n");
cudaMalloc((void**)&d_a,my_size);
cudaMalloc((void**)&d_b,my_size);
cudaMalloc((void**)&d_c,my_size);
cudaMemcpy(d_a,a,my_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,my_size,cudaMemcpyHostToDevice);
dim3 my_block(THREAD_PER_BLOCK,THREAD_PER_BLOCK);
dim3 my_grid((mat_elem + THREAD_PER_BLOCK-1)/my_block.x,(mat_elem + THREAD_PER_BLOCK-1)/my_block.y);
//////////////////////ELAPSED TIME ///////////////////////////////
//cudaEventRecord(my_start,0);
//mult_matrix<<<my_grid,my_block>>>(d_a, d_b, d_c,mat_elem);
mult_mat_rectangular<<<my_grid,my_block>>>(d_a, d_b, d_c,mat_elem);
//cudaEventRecord(my_stop,0);
//cudaEventSynchronize(my_stop);
/////////////////////////////////////////////////////
//float elapsed_time;
//cudaEventElapsedTime(&elapsed_time,my_start,my_stop);
cudaMemcpy(c,d_c,my_size,cudaMemcpyDeviceToHost);
printf("Matrix C\n");
print_matrix(c,mat_elem);
//printf("time : %f\n",elapsed_time);
return 0;
} |
8,354 | #include "includes.h"
__global__ void kExpand(float *images, float* targets, int num_images, int num_input_channels, int image_size_y, int image_size_x, int num_modules_y, int num_modules_x, int kernel_size_y, int kernel_size_x, int padding_y, int padding_x, int stride_y, int stride_x, int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int src_module_id = module_id_offset + blockIdx.x;
int dst_module_id = blockIdx.x;
int module_id_x = src_module_id % num_modules_x;
int module_id_y = src_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
images += num_images * image_size_x * image_size_y * color;
targets += num_images * (dst_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
target_id = num_images * num_modules_batch * (x + kernel_size_x * y);
source_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = 0;
}
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
targets[target_id + im] = images[source_id + im];
}
}
__syncthreads();
}
}
} |
8,355 | #include "includes.h"
__global__ void vecAdd(float* C, float* A, float* B, int n) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we do not go out of bounds
if (id < n) {
C[id] = A[id] + B[id];
}
} |
8,356 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <cuda.h>
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
printf("cuda Error \n");
exit(1);
}
}
__global__ void vectorAddKernel(int* deviceA, int* deviceResult) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
// insert operation here
deviceResult[i] = deviceA[i];
}
extern "C"
void histogram(int *v, long n){
int* deviceIn, *deviceOut;
int threadBlockSize=256;
int result[256];
checkCudaCall(cudaMalloc((void **) &deviceIn, n * sizeof(int)));
if (deviceIn == NULL) {
printf("Error in cudaMalloc! \n");
return;
}
checkCudaCall(cudaMalloc((void **) &deviceOut, n * sizeof(int)));
if (deviceOut == NULL) {
checkCudaCall(cudaFree(deviceIn));
printf("Error in cudaMalloc! \n");
return;
}
checkCudaCall(cudaMemcpy(deviceIn, v, n * sizeof(int), cudaMemcpyDeviceToHost));
vectorAddKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceIn, deviceOut);
cudaDeviceSynchronize();
checkCudaCall(cudaMemcpy(result, deviceOut, n * sizeof(int), cudaMemcpyDeviceToHost));
checkCudaCall(cudaFree(deviceIn));
checkCudaCall(cudaFree(deviceOut));
}
|
8,357 | #include <stdio.h>
int nElem = 6;
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
int main(void)
{
printf("grid.x %d gird.y %d grid.z %d \n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d \n", block.x, block.y, block.z);
return 0;
} |
8,358 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <math.h>
extern "C"
{
__device__ float sim_inv_cdf( float mu, float sigma, float phi_a, float phi_b, curandState localState)
{ //Generate trunc StdNorm by inverse CDF if conditions are stable.
float u = ((phi_b - phi_a) * curand_uniform(&localState)) + phi_a;
float x = sigma * normcdfinvf(u) + mu;
return x;
} //Gets valid sample every time--so speed, very wow.
__device__ float simple_rejection( float mu, float sigma, float stdA, float stdB, int maxtries, curandState localState)
{
float res = 1.0f/0.0f; //set result to inf. For error checking.
float z;
for(int i=0; i < maxtries; i++)
{
z = curand_normal(&localState);
if ( z <= stdB && z >= stdA)
{
res = mu + (sigma*z);
return res;
}
} //end maxtry for loop
return res;
} //end simple rejection-sampler.
__device__ float one_sided_trunc( float mu, float sigma, float stdA, float stdB, curandState localState)
{
float logRho, z, logU, res, alpha, trunc;
if (isinf(stdA)) trunc=-stdB; else trunc=stdA; //If a is -Inf, then b is truncated. Else, a is truncated.
alpha = (trunc + sqrtf(trunc*trunc + 4))/2; //Optimal alpha
do { //Start rejection sample loop.
z = (-logf( curand_uniform(&localState) )/ alpha) + trunc; //Truncated Exponential
if (trunc < alpha) logRho = -((z-alpha) * (z-alpha)) / 2;
else logRho = ((trunc-alpha)*(trunc-alpha) -(alpha-z)*(alpha-z)) / 2;
logU = logf(curand_uniform(&localState));
} while (logU > logRho);
//If left trunc, do as usual. If right trunc, reflect the z value, then add mu.
if (isinf(stdB)) res = mu + sigma * z;
else res = mu - sigma*z;
return res;
} //end one-sided sampler.
__device__ float robert( float mu, float sigma, float stdA, float stdB, curandState localState)
{ //Condns unstable for inv-CDF. Do Robert (2009).
float logrho, z, logu, res;
do {
z = (stdB-stdA) * curand_uniform(&localState) + stdA;
logu = logf( curand_uniform(&localState) );
if ( stdA<=0 && stdB>= 0 ) logrho = -(z*z)/2;
else if (stdA > 0) logrho = -((stdA*stdA)-(z*z))/2;
else logrho = -((stdB*stdB)-(z*z))/2;
} while(logu > logrho);
res = sigma*z + mu;
return res;
} //end Robert sampler.
__global__ void truncnormal_kernel(float *result, int n, float *mu,
float *sigma, float *a, float *b, int maxtries,
int mu_len, int sigma_len, int a_len, int b_len)
{
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (idx >= n) return; //Index is larger than sample size--do no calculation here.
curandState localState;
curand_init(idx, idx, 0, &localState);
//Declare vars in thread-local memory.
float t_a = a[idx % a_len];
float t_b = b[idx % b_len];
float t_mu = mu[idx % mu_len];
float t_sigma = sigma[idx % sigma_len];
float res = 1.0f/0.0f;
float stdA = (t_a - t_mu)/t_sigma; //Standardize truncation points. Done in-thread.
float stdB = (t_b - t_mu)/t_sigma;
float phi_a = normcdff(stdA); //Calculate CDF of trunc points from StdNormal.
float phi_b = normcdff(stdB);
if ( phi_b - phi_a > 0.02f )
{ //If stable conditions, use inverse-CDF.
result[idx] = sim_inv_cdf(t_mu, t_sigma, phi_a, phi_b, localState);
return;
}
else if ( isinf(stdA) || isinf(stdB) )
{ //One-sided truncation.
res = one_sided_trunc( t_mu, t_sigma, stdA, stdB, localState);
result[idx] = res;
return;
}
else if ( stdB-stdA >= sqrtf(6.2831853f) && phi_b-phi_a > 0.0001f)
{ //Two-sided trunc with truncs far away--do the naive rejection sampler
res = simple_rejection(t_mu, t_sigma, stdA, stdB, maxtries, localState);
if (!isinf(res))
{
result[idx] = res;
return;
}
}
else{ //Do the Robert method.
res = robert(t_mu, t_sigma, stdA, stdB, localState);
result[idx] = res;
return;
} //End truncation regions on same side of mean.
} //end truncnorm kernel
} //end extern C.
|
8,359 | #include<iostream>
#include<cstdlib>
#include<fstream>
#include<string>
#include<sys/time.h>
//#define PRINT_FINAL_RESULT
typedef unsigned long long int UINT;
using namespace std;
__device__ void _jacobi_square(int* dev_arr1, int* dev_arr2, int idx, int rowsize, int dist){
int total = 0;
for (int row = -dist; row <= dist; row++){
for (int col = -dist; col <= dist; col++){
total += dev_arr1[idx + row * rowsize + col];
}
}
dev_arr2[idx] = total / (dist + dist + 1) / (dist + dist + 1);
}
__device__ void _jacobi_cross(int* dev_arr1, int* dev_arr2, int idx, int rowsize, int dist){
int total = 0;
for (int row = -dist; row < 0; row++){
total += dev_arr1[idx + row * rowsize];
}
for (int row = 1; row <= dist; row++){
total += dev_arr1[idx + row * rowsize];
}
for (int col = -dist; col <= dist; col++){
total += dev_arr1[idx + col];
}
dev_arr2[idx] = total / ((dist + dist + 1) * 2 - 1);
}
__global__ void GPU(int *dev_arr1, int *dev_arr2, const int rowsize, const int colsize,
const int n1, const int threadsPerBlock, int padd, int stride){
int offset = rowsize * blockIdx.x + padd;
int idx = threadIdx.x + offset;
while (idx < n1 + offset){
//_jacobi_square(dev_arr1, dev_arr2, idx, rowsize, stride);
_jacobi_cross(dev_arr1, dev_arr2, idx, rowsize, stride);
idx += threadsPerBlock;
}
__threadfence();
}
void checkGPUError(cudaError err){
if (cudaSuccess != err){
printf("CUDA error in file %s, in line %i: %s\n", __FILE__, __LINE__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void SOR(int n1, int n2, int padd, int *arr1, int *arr2, int MAXTRIAL, int stride){
int rowsize = n1 + 2 * padd;
int colsize = n2 + 2 * padd;
int *dev_arr1, *dev_arr2, *tmp;
int tablesize = rowsize * colsize;
// size_t freeMem, totalMem;
// cudaMemGetInfo(&freeMem, &totalMem);
// cout << "current GPU memory info FREE: " << freeMem << " Bytes, Total: " << totalMem << " Bytes.";
// cout << "colsize: " << colsize << ", rowsize: " << rowsize << ", allocates: " << tablesize * sizeof(int)<< " Bytes." << endl;
cudaError err = cudaMalloc(&dev_arr1, tablesize * sizeof(int));
checkGPUError(err);
err = cudaMalloc(&dev_arr2, tablesize * sizeof(int));
checkGPUError(err);
cudaMemcpy(dev_arr1, arr1, tablesize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_arr2, arr1, tablesize * sizeof(int), cudaMemcpyHostToDevice);
int threadsPerBlock = min(1024, n1);
int blocksPerGrid = n2;
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
struct timeval tbegin, tend;
gettimeofday(&tbegin, NULL);
//suppose n1 is the row size and the longer array
for (int t = 0; t < MAXTRIAL; t++){
GPU<<<blocksPerGrid, threadsPerBlock>>>(&dev_arr1[padd * rowsize], &dev_arr2[padd * rowsize], rowsize, colsize, n1, threadsPerBlock, padd, stride);
cudaDeviceSynchronize();
tmp = dev_arr1;
dev_arr1 = dev_arr2;
dev_arr2 = tmp;
}
cudaDeviceSynchronize();
gettimeofday(&tend, NULL);
double s = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0;
cudaMemcpy(arr1, dev_arr1, tablesize*sizeof(int), cudaMemcpyDeviceToHost);
#ifdef PRINT_FINAL_RESULT
//display table
cout << "result table: " << endl;
for (int i=0; i<colsize; i++){
for (int j=0; j<rowsize; j++){
cout << arr1[i * rowsize + j] << " ";
}
cout << endl;
}
#endif
cout << "execution time: " << s << " second." << endl;
cudaFree(dev_arr1);
cudaFree(dev_arr2);
}
|
8,360 | #include <stdio.h>
#include <iostream>
#include <chrono>
#include <math.h>
using namespace std::chrono;
using namespace std;
#define TPB 256
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define N (NUM_PARTICLES/TPB + 1)
struct particle {
float position[3];
float velocity[3];
};
struct seed {
int x;
int y;
int z;
};
__host__ __device__ float gen_random(int seed, int particle_id, int iteration)
{
float rand_num = (seed * particle_id + iteration) % NUM_PARTICLES;
// printf("seed = %d, particle_id = %d, iteration = %d, rand_num = %e\n",
// seed,
// particle_id,
// iteration,
// rand_num);
return rand_num;
}
__host__ __device__ void updateVelAndPos(particle *particles, seed seed, int iteration, int particle_id)
{
// Velocity update:
particles[particle_id].velocity[0] = gen_random(seed.x, particle_id, iteration);
particles[particle_id].velocity[1] = gen_random(seed.y, particle_id, iteration);
particles[particle_id].velocity[2] = gen_random(seed.z, particle_id, iteration);
// Position update:
particles[particle_id].position[0] = particles[particle_id].position[0] + particles[particle_id].velocity[0];
particles[particle_id].position[1] = particles[particle_id].position[1] + particles[particle_id].velocity[1];
particles[particle_id].position[2] = particles[particle_id].position[2] + particles[particle_id].velocity[2];
}
__global__ void timestepGPU(particle *particles, seed seed, int iteration) {
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<NUM_PARTICLES) {
// printf("Old threadId = %d, velocity.x = %e, position.x = %e\n",
// threadIdx.x, particles[i].velocity[0], particles[i].position[0]);
updateVelAndPos(particles, seed, iteration, i);
// printf("New threadId = %d, velocity.x = %e\n", threadIdx.x, particles[i].velocity[0]);
}
}
void timestepCPU(particle *particles, seed seed, int iteration) {
for (int i = 0; i < NUM_PARTICLES; i++) {
updateVelAndPos(particles, seed, iteration, i);
}
}
int main()
{
seed seed = {5,6,7};
particle *particlesCPU = new particle[NUM_PARTICLES];
//particle *particlesGPU2CPU = new particle[NUM_PARTICLES];
particle *particlesGPU2CPU = NULL;
particle *particlesGPU = new particle[NUM_PARTICLES];
cudaMallocHost(&particlesGPU2CPU, sizeof(particle) * NUM_PARTICLES);
//////// CPU calculations ////////
auto startCPU = high_resolution_clock::now();
for (int i = 0; i < NUM_ITERATIONS; i++) {
// cout << "iteration: " << i <<"\n";
timestepCPU(particlesCPU, seed, i);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesCPU[ii].position[0] << "\n";
// }
auto stopCPU = high_resolution_clock::now();
auto durationCPU = duration_cast<milliseconds>(stopCPU - startCPU);
cout << "---------------\n";
//////////////////////////////////
//////// GPU calculations ////////
auto startGPU = high_resolution_clock::now();
cudaMalloc(&particlesGPU, sizeof(particle) * NUM_PARTICLES);
for (int i = 0; i < NUM_ITERATIONS; i++) {
// New:
cudaMemcpy(particlesGPU, particlesGPU2CPU, sizeof(particle) * NUM_PARTICLES, cudaMemcpyHostToDevice);
// cout << "iteration: " << i <<"\n";
timestepGPU<<<N, TPB>>>(particlesGPU, seed, i);
cudaDeviceSynchronize();
cudaMemcpy(particlesGPU2CPU, particlesGPU, sizeof(particle) * NUM_PARTICLES, cudaMemcpyDeviceToHost);
}
// Print output:
// for (int ii = 0; ii < 10; ii++) {
// cout << particlesGPU2CPU[ii].position[0] << "\n";
// }
auto stopGPU = high_resolution_clock::now();
auto durationGPU = duration_cast<milliseconds>(stopGPU - startGPU);
//////////////////////////////////
//////// Compare calculations ////////
float maxError = 0.0f;
for (int particle_i = 0; particle_i < NUM_PARTICLES; particle_i++) {
for (int dim = 0; dim < 3; dim++) {
maxError = fmax(maxError, fabs(
particlesGPU2CPU[particle_i].position[dim] - particlesCPU[particle_i].position[dim]
));
}
}
std::cout << "Max error: " << maxError << std::endl;
cudaFree(particlesGPU2CPU);
cudaFree(particlesGPU);
delete[] particlesCPU;
//////////////////////////////////
cout << "CPU duration in milliseconds: " << durationCPU.count() << endl;
cout << "GPU duration in milliseconds: " << durationGPU.count() << endl;
return 0;
}
|
8,361 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o linear_cuda linear_cuda.cu -lm
*
* To run:
* ./linear_cuda
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t{
double x;
double y;
}point_t;
int n_data = 1000;
__device__ int d_n_data =1000;
point_t data[] = {
{68.96,127.53},{65.01,85.76},{87.32,132.24},{85.08,120.53},
{68.86,126.80},{65.62,111.97},{76.46,129.69},{73.83,107.29},
{45.02,87.66},{68.95,100.22},{72.95,127.57},{69.97,114.45},
{69.74,105.81},{71.56,105.36},{66.04,99.73},{72.47,109.18},
{95.34,132.58},{24.33,60.81},{65.78,100.00},{ 7.31,35.35},
{88.70,127.68},{ 9.06,50.60},{63.57,108.82},{71.06,105.91},
{35.08,75.71},{ 5.40,49.77},{16.95,55.92},{85.51,118.30},
{ 2.40,59.04},{95.63,144.59},{87.35,115.66},{92.97,137.81},
{95.75,139.55},{86.47,126.39},{83.66,133.68},{28.64,74.53},
{47.87,89.95},{ 8.28,32.43},{85.34,132.68},{38.21,44.45},
{95.68,125.22},{76.39,118.03},{90.89,122.64},{64.59,88.16},
{17.45,49.26},{88.35,122.50},{97.29,159.00},{13.76,37.25},
{44.96,93.57},{86.97,126.46},{26.71,54.98},{66.52,104.86},
{27.72,68.65},{45.97,92.40},{61.71,105.62},{23.82,60.72},
{57.51,104.36},{44.17,84.64},{59.08,95.61},{24.68,57.64},
{62.27,113.18},{65.89,99.82},{33.56,67.81},{93.29,144.45},
{74.46,113.15},{26.44,75.49},{88.38,142.05},{72.05,102.19},
{89.67,143.23},{47.87,103.92},{77.77,125.90},{85.02,143.76},
{19.86,50.69},{73.26,122.63},{58.81,100.48},{55.19,78.87},
{86.07,147.56},{48.11,86.36},{38.07,78.48},{57.03,102.88},
{34.27,79.25},{89.14,147.54},{69.68,129.18},{14.69,51.37},
{ 5.53,37.91},{52.35,77.93},{78.30,135.53},{73.35,105.80},
{ 5.31,38.47},{ 9.45,36.88},{17.71,47.50},{63.41,94.28},
{97.90,141.88},{54.39,100.17},{93.88,131.56},{97.73,147.23},
{75.54,117.14},{76.13,135.50},{51.16,86.67},{51.78,92.42},
{50.60,92.68},{53.41,99.78},{96.79,133.74},{16.54,38.54},
{ 5.74,32.37},{16.56,46.26},{70.19,100.04},{30.91,72.43},
{84.11,156.25},{23.03,64.47},{56.39,86.68},{19.27,68.88},
{13.92,60.07},{96.69,147.94},{46.97,95.63},{41.90,77.09},
{67.70,110.96},{93.23,149.54},{20.48,63.12},{71.10,119.84},
{32.40,68.30},{85.18,126.57},{86.91,126.74},{24.29,61.76},
{ 7.57,29.04},{53.31,106.04},{47.42,104.29},{18.03,59.08},
{29.16,80.23},{99.86,149.00},{64.70,105.44},{53.10,109.96},
{89.31,142.34},{90.07,138.83},{93.94,141.90},{41.06,75.52},
{64.16,120.61},{59.67,94.68},{73.53,96.35},{32.83,76.15},
{ 2.15,17.50},{40.67,70.85},{ 3.69,52.57},{72.86,110.61},
{ 4.69,52.94},{79.66,129.22},{61.24,104.26},{60.06,105.03},
{63.95,90.36},{ 8.54,24.83},{37.27,88.49},{35.28,69.49},
{ 1.63,56.98},{61.10,111.69},{ 6.61,56.58},{ 2.36,29.57},
{72.06,121.00},{42.28,78.50},{78.45,127.41},{34.64,89.91},
{73.45,139.16},{ 0.51,44.90},{91.48,165.90},{27.37,61.15},
{33.80,72.12},{52.21,87.56},{99.03,160.18},{37.29,72.46},
{48.24,98.94},{11.87,50.81},{81.70,110.47},{60.23,98.78},
{16.63,66.26},{24.27,71.76},{99.53,131.86},{75.14,115.86},
{18.63,42.32},{65.68,107.98},{11.98,63.83},{91.97,125.57},
{64.16,100.09},{11.31,39.43},{14.60,50.13},{65.90,105.92},
{67.79,102.00},{48.40,98.05},{ 5.73,43.72},{40.18,64.53},
{97.11,154.61},{84.39,124.80},{75.42,131.44},{31.79,62.32},
{61.98,77.29},{88.47,147.40},{58.14,105.39},{68.76,124.48},
{81.44,128.43},{75.76,138.16},{81.65,135.84},{73.71,114.01},
{27.58,61.17},{75.96,142.08},{76.34,115.53},{ 8.29,46.24},
{79.85,136.90},{43.94,109.71},{86.56,123.14},{89.91,138.47},
{33.08,76.88},{32.44,83.79},{14.62,39.60},{51.16,77.77},
{89.31,130.62},{88.49,129.58},{18.82,53.62},{19.32,61.80},
{33.04,71.10},{37.12,63.13},{65.92,109.67},{84.93,131.02},
{61.27,109.78},{62.67,112.65},{75.64,122.50},{55.05,87.99},
{46.89,85.81},{48.24,98.30},{53.21,95.11},{41.53,65.36},
{47.44,88.03},{58.20,120.96},{25.08,60.30},{24.15,55.72},
{19.14,54.67},{79.71,146.44},{99.99,164.00},{71.63,105.81},
{53.70,97.55},{77.19,123.67},{23.40,82.08},{67.81,111.97},
{ 4.15,26.88},{11.43,34.86},{34.23,88.30},{38.68,78.98},
{59.86,116.72},{99.99,144.33},{64.87,107.33},{88.26,136.75},
{48.78,90.87},{47.84,93.68},{89.39,140.84},{ 8.73,35.47},
{34.62,78.93},{60.41,111.93},{10.88,45.98},{37.95,58.17},
{18.59,70.39},{99.22,150.22},{64.17,111.38},{22.04,47.32},
{78.28,130.70},{68.19,103.80},{31.25,72.37},{16.62,47.74},
{55.87,101.58},{17.34,39.23},{35.69,75.42},{58.62,97.64},
{ 6.52,29.17},{40.58,91.40},{65.55,99.45},{41.15,78.13},
{62.27,111.89},{11.71,42.01},{75.78,121.16},{19.56,67.95},
{92.11,147.71},{13.30,50.97},{ 8.66,42.25},{72.26,121.14},
{62.56,117.12},{36.94,80.80},{28.93,67.37},{76.71,122.06},
{72.67,128.50},{87.02,131.77},{17.54,48.18},{22.53,79.49},
{53.04,84.45},{17.77,62.25},{40.29,87.77},{63.05,101.03},
{73.15,127.17},{26.54,49.42},{66.87,94.86},{73.19,122.96},
{59.87,93.83},{48.56,93.25},{95.18,140.41},{87.93,144.89},
{26.72,60.18},{73.98,118.58},{47.11,87.54},{91.70,129.40},
{21.05,65.09},{39.77,87.22},{15.52,52.98},{85.86,136.30},
{14.31,63.49},{34.07,73.69},{58.68,81.45},{ 3.90,24.63},
{12.31,71.15},{23.14,48.98},{49.94,77.10},{23.31,50.64},
{22.09,53.63},{ 7.58,55.10},{43.46,69.88},{ 9.49,40.86},
{33.39,71.79},{34.47,69.23},{ 4.20,50.76},{93.26,164.56},
{36.40,78.93},{92.22,126.15},{82.28,132.66},{63.56,112.14},
{ 7.50,46.47},{72.48,102.94},{12.47,42.55},{45.53,103.53},
{ 5.85,39.90},{62.85,104.22},{ 1.94,29.59},{ 9.02,36.32},
{78.15,117.25},{93.71,131.84},{35.21,72.45},{ 6.07,45.70},
{20.68,43.62},{57.40,86.08},{ 5.28,44.22},{85.62,141.01},
{ 9.53,45.18},{18.03,61.91},{50.22,95.06},{ 2.63,50.96},
{48.49,96.73},{11.12,54.22},{37.12,77.51},{ 5.33,57.55},
{16.76,47.02},{ 8.03,34.18},{70.23,128.52},{16.56,52.69},
{63.37,108.58},{83.57,142.86},{91.49,132.78},{ 9.37,51.24},
{18.78,52.63},{79.98,126.36},{73.47,128.03},{95.86,147.32},
{ 4.73,60.22},{84.14,149.15},{88.99,138.22},{72.60,130.12},
{88.98,128.37},{26.06,67.67},{35.28,76.42},{58.90,96.32},
{45.55,75.07},{ 3.29,43.62},{ 4.47,38.75},{50.13,99.31},
{33.01,81.84},{40.11,84.34},{50.97,100.60},{97.86,158.95},
{96.50,121.60},{ 9.18,46.59},{19.24,65.28},{76.18,111.45},
{76.50,119.12},{12.55,63.80},{97.12,133.37},{ 5.93,36.40},
{33.25,69.64},{84.92,118.79},{66.74,89.72},{92.66,143.98},
{61.87,110.39},{90.93,148.82},{10.67,33.91},{41.69,69.79},
{ 7.38,45.76},{14.67,47.62},{56.02,79.59},{ 4.78,45.69},
{28.83,57.34},{22.07,58.37},{53.41,76.67},{43.34,82.03},
{88.11,125.88},{67.00,104.43},{78.81,112.15},{76.83,136.31},
{ 3.09,34.19},{31.33,75.78},{71.77,107.29},{66.08,85.43},
{ 0.45,21.58},{88.37,141.69},{10.62,29.70},{85.76,126.94},
{34.79,75.21},{78.52,129.32},{86.33,114.24},{16.52,58.29},
{39.36,62.89},{15.57,53.76},{59.81,92.10},{58.43,93.88},
{14.50,61.10},{55.43,104.94},{66.38,96.65},{28.48,79.21},
{50.99,120.90},{38.07,77.51},{31.14,70.22},{11.99,57.21},
{41.77,82.32},{79.03,94.12},{19.43,59.39},{79.12,123.58},
{85.45,118.30},{67.74,109.74},{62.84,102.58},{ 8.40,45.54},
{87.57,145.11},{26.07,58.81},{63.87,109.66},{47.17,93.24},
{67.35,108.91},{94.26,128.83},{10.75,62.95},{ 2.51,42.89},
{45.79,86.02},{32.68,78.07},{ 4.71,33.98},{57.11,73.85},
{19.18,62.78},{42.38,83.67},{44.88,92.97},{89.97,143.27},
{ 6.76,39.38},{16.41,52.96},{47.36,85.61},{91.90,130.97},
{56.10,117.91},{86.81,133.28},{92.18,123.40},{16.36,58.04},
{64.35,101.99},{75.90,124.29},{43.32,83.52},{89.66,135.32},
{78.56,110.50},{39.59,86.55},{ 7.93,44.82},{46.39,84.26},
{88.63,140.71},{19.31,57.72},{ 9.16,59.68},{23.16,40.90},
{93.83,140.28},{53.68,98.26},{ 2.33,46.84},{34.58,72.81},
{71.27,107.16},{70.72,106.57},{ 2.35,47.62},{20.48,52.38},
{83.20,126.52},{46.03,76.24},{37.58,74.51},{69.44,131.27},
{36.64,62.15},{94.83,148.08},{58.65,100.06},{73.16,139.40},
{84.49,118.86},{ 9.43,52.92},{49.27,92.82},{69.92,121.16},
{55.95,97.27},{ 7.24,37.61},{40.55,78.33},{41.55,77.72},
{59.77,122.83},{76.75,102.31},{27.69,80.01},{48.67,90.20},
{36.85,80.87},{84.52,124.20},{41.71,92.17},{29.19,66.22},
{74.49,108.35},{29.78,60.30},{16.23,41.24},{ 6.34,19.78},
{89.98,131.97},{40.26,91.10},{72.53,113.90},{95.16,137.85},
{60.83,100.78},{58.74,91.69},{24.47,58.96},{83.63,115.99},
{26.27,61.48},{33.63,74.52},{80.42,113.21},{39.78,82.67},
{81.95,122.15},{25.35,65.00},{23.53,68.26},{ 7.01,39.79},
{21.32,56.11},{10.82,38.47},{ 4.62,41.09},{32.92,62.30},
{88.21,145.20},{66.31,103.93},{46.19,71.34},{80.10,119.02},
{29.82,63.25},{93.02,148.19},{45.49,70.06},{81.25,113.65},
{23.57,74.86},{78.64,122.69},{20.22,43.72},{74.17,105.40},
{67.71,94.15},{17.74,50.42},{95.90,159.78},{54.73,110.59},
{92.53,146.46},{91.50,138.82},{18.92,63.64},{19.22,35.06},
{30.02,69.95},{84.45,136.00},{68.04,109.76},{57.38,100.38},
{71.74,123.01},{19.09,48.68},{86.06,135.82},{70.58,115.69},
{97.76,139.68},{42.58,91.52},{57.93,92.63},{55.97,87.95},
{25.56,76.97},{80.24,120.12},{ 2.99,41.31},{56.64,84.16},
{24.89,66.51},{82.41,109.51},{45.73,83.49},{ 9.62,34.83},
{ 3.74,58.22},{11.68,57.95},{32.51,65.83},{77.26,114.26},
{49.82,84.79},{28.71,54.87},{37.75,72.29},{59.31,106.63},
{51.90,94.91},{20.46,70.92},{85.50,127.14},{81.14,136.98},
{55.69,112.07},{48.79,99.85},{24.46,55.17},{26.47,68.48},
{42.43,87.73},{80.52,114.92},{84.14,140.89},{86.86,141.71},
{12.68,30.35},{ 3.73,42.17},{ 7.60,60.63},{64.34,126.44},
{85.86,122.89},{57.23,114.70},{47.53,82.24},{30.72,79.85},
{52.33,111.71},{30.61,82.25},{97.16,144.89},{30.87,80.86},
{17.41,52.54},{81.92,121.35},{19.04,51.89},{42.32,75.01},
{83.73,119.97},{39.70,76.42},{83.70,133.99},{11.67,59.68},
{77.52,135.54},{65.02,98.96},{ 7.11,51.56},{66.44,108.60},
{28.34,93.97},{71.65,110.83},{13.83,64.17},{79.87,120.42},
{89.87,141.39},{79.64,122.06},{51.93,89.38},{26.51,77.05},
{10.92,38.97},{ 5.97,40.70},{77.61,119.67},{13.14,44.19},
{87.71,116.88},{57.53,92.75},{27.62,70.09},{29.27,69.03},
{20.73,65.84},{38.19,60.27},{86.61,124.87},{90.66,135.23},
{77.13,144.86},{13.31,45.32},{35.55,71.23},{80.84,145.99},
{93.69,155.92},{ 5.83,50.37},{ 7.94,44.67},{84.78,125.79},
{11.87,61.17},{35.87,79.17},{48.95,81.77},{42.87,68.65},
{43.10,86.84},{89.16,143.31},{38.42,87.05},{76.17,111.65},
{20.53,54.64},{66.33,106.64},{19.73,61.52},{80.44,143.66},
{46.08,69.80},{17.62,46.53},{41.39,81.66},{76.12,114.60},
{32.26,65.65},{98.98,121.50},{ 1.96,31.81},{71.69,109.95},
{46.76,88.26},{38.41,77.51},{35.47,63.13},{62.18,104.82},
{28.42,67.86},{96.24,138.38},{41.83,64.70},{80.31,149.33},
{50.88,88.60},{ 1.66,37.83},{99.91,142.80},{57.39,82.67},
{12.86,46.49},{58.79,90.08},{65.58,109.78},{49.54,88.45},
{ 6.37,41.62},{67.51,108.56},{18.24,50.92},{77.98,131.81},
{90.20,148.03},{21.38,75.94},{42.88,93.95},{96.38,149.32},
{ 5.07,44.28},{52.75,80.06},{50.70,108.89},{81.88,115.56},
{ 4.60,37.51},{38.17,73.12},{14.99,45.82},{10.97,47.18},
{75.32,101.50},{95.95,137.43},{50.57,95.91},{ 0.07,55.83},
{32.42,56.02},{66.81,110.09},{59.14,102.50},{39.88,88.37},
{57.85,100.14},{52.50,80.54},{22.84,72.37},{38.72,82.42},
{14.48,47.15},{82.81,125.64},{57.23,95.95},{19.63,63.02},
{90.48,125.97},{71.53,110.76},{25.76,87.23},{28.43,58.78},
{98.37,151.15},{55.71,88.79},{74.61,117.52},{20.01,65.00},
{58.04,88.71},{32.14,66.53},{39.87,82.46},{49.01,94.39},
{84.59,123.84},{ 3.08,32.69},{50.45,106.16},{26.42,68.12},
{53.29,86.20},{ 2.72,57.51},{ 1.54,29.53},{35.49,72.18},
{79.91,138.59},{85.79,129.25},{88.35,154.88},{44.52,78.35},
{30.21,69.77},{77.29,120.37},{89.62,142.68},{64.90,111.51},
{98.16,141.77},{15.36,49.63},{ 0.37,37.14},{82.93,132.02},
{27.58,59.23},{ 6.08,43.11},{89.91,137.00},{20.28,61.45},
{94.17,138.67},{93.67,124.21},{36.48,94.01},{83.43,119.68},
{83.33,144.57},{21.92,67.84},{39.66,63.24},{30.35,60.92},
{46.61,82.91},{27.87,68.14},{82.22,138.02},{86.29,143.66},
{10.95,57.85},{43.78,89.55},{40.95,99.32},{30.04,71.57},
{32.84,78.86},{93.70,142.56},{86.15,129.33},{80.62,121.02},
{53.96,82.26},{58.19,108.04},{67.13,112.76},{16.76,73.94},
{20.60,67.45},{51.76,90.16},{23.21,66.24},{55.99,102.86},
{73.70,125.40},{31.94,81.57},{78.51,121.66},{79.90,113.17},
{ 9.68,26.24},{67.92,121.15},{78.62,120.05},{98.45,122.39},
{57.05,101.16},{96.08,134.21},{41.81,68.90},{92.42,139.92},
{50.30,83.41},{28.67,69.79},{77.25,127.93},{57.85,103.22},
{44.48,75.12},{67.49,113.63},{26.51,68.03},{12.33,31.87},
{29.27,75.12},{81.10,128.69},{72.49,122.98},{ 6.01,41.59},
{79.87,129.32},{10.51,28.90},{60.30,111.99},{ 1.26,52.98},
{ 4.88,28.91},{ 2.98,46.82},{88.60,147.69},{24.52,54.10},
{89.48,141.16},{40.27,92.25},{74.78,117.52},{30.04,81.65},
{ 5.71,28.41},{52.20,102.10},{48.34,88.34},{60.97,106.85},
{63.86,107.83},{28.10,62.35},{89.62,129.03},{38.97,77.26},
{88.69,155.39},{27.49,62.47},{87.41,132.05},{82.11,134.45},
{79.18,118.30},{59.62,100.75},{ 8.45,54.09},{ 6.00,61.12},
{ 8.44,49.34},{35.73,81.23},{18.75,34.33},{ 9.78,51.38},
{44.86,73.93},{78.69,135.02},{11.04,45.15},{24.43,49.65},
{42.03,58.97},{40.40,82.21},{24.07,86.92},{54.24,113.12},
{54.77,89.25},{62.12,105.79},{70.63,126.01},{15.77,75.29},
{39.32,65.92},{84.30,139.09},{92.34,147.50},{40.31,78.05},
{13.49,50.25},{87.67,138.67},{47.38,97.39},{82.71,133.50},
{63.75,92.03},{88.34,142.39},{31.86,78.46},{60.26,97.14},
{16.03,39.72},{91.76,142.42},{78.15,126.26},{ 3.06,37.66},
{20.55,45.39},{57.54,97.64},{72.00,128.48},{21.09,68.82},
{ 2.45,35.53},{74.77,122.90},{80.88,118.20},{76.70,126.19},
{72.69,114.68},{ 3.74,39.81},{72.18,113.83},{68.12,126.70},
{57.45,112.45},{11.52,48.76},{44.65,77.15},{53.14,79.26},
{75.78,135.05},{ 5.83,31.59},{92.02,140.96},{29.75,69.06},
{60.58,99.74},{46.53,77.98},{19.95,64.23},{86.68,149.10},
{45.06,76.86},{51.39,80.24},{80.79,122.30},{73.59,116.53},
{50.96,100.97},{94.21,153.47},{86.68,124.49},{28.62,65.69},
{ 8.78,42.38},{53.35,90.09},{59.11,97.29},{13.06,50.81},
{42.57,91.35},{13.41,43.16},{68.72,108.20},{68.55,104.63},
{62.61,108.58},{85.64,113.26},{98.84,160.36},{ 0.19,36.29},
{58.07,93.16},{33.71,71.58},{41.97,70.27},{17.98,48.84},
{96.32,152.67},{21.67,67.96},{19.69,66.15},{45.43,89.15},
{36.83,70.71},{89.54,149.37},{74.30,121.36},{53.12,111.79},
{ 2.34,35.53},{57.07,119.67},{20.49,57.49},{72.87,128.30},
{46.63,94.42},{36.77,100.65},{90.66,140.08},{68.92,112.37},
{82.84,145.28},{56.07,108.27},{12.12,64.91},{94.05,147.84},
{56.37,84.90},{72.85,120.36},{27.81,64.37},{88.73,151.39},
{62.88,105.87},{15.31,46.92},{31.82,81.06},{39.65,101.87},
{84.27,120.37},{88.79,139.21},{14.19,45.44},{65.84,118.07},
{72.67,109.95},{38.81,76.52},{91.48,139.40},{59.40,115.85},
{17.04,42.52},{ 1.75,27.23},{83.42,127.38},{50.29,80.97},
{81.42,123.22},{29.92,76.14},{ 7.17,37.08},{68.79,107.63},
{50.84,104.49},{87.27,141.04},{32.07,56.88},{99.92,145.67},
{14.57,53.89},{58.88,91.73},{18.80,37.56},{38.30,76.37},
{75.07,131.78},{12.57,54.81},{66.46,119.08},{58.35,101.90},
{29.52,72.98},{31.11,68.93},{95.28,133.59},{50.97,98.95},
{96.48,124.92},{14.72,59.72},{33.57,73.66},{ 6.46,49.69},
{89.52,161.37},{40.24,81.66},{98.37,140.08},{58.30,101.01},
{27.93,62.99},{51.38,89.40},{79.18,124.01},{44.73,86.13},
{15.53,41.29},{ 9.66,39.98},{54.08,83.34},{44.47,78.96},
{89.22,124.26},{57.98,107.28},{62.38,112.62},{74.98,125.71},
{43.69,96.91},{67.33,104.22},{56.90,78.79},{96.01,144.67},
{45.66,69.55},{93.00,141.30},{77.86,125.94},{49.47,83.31},
{88.73,138.14},{ 2.74,42.22},{35.73,67.54},{45.61,75.28}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=cudaMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dm returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, //371
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm,dm,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dm returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc,dc,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dc returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
cudaThreadSynchronize();
error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error,
cudaGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr,"cudaFree on d_dm returned %d %s\n",error,
cudaGetErrorString(error)); //453
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr,"cudaFree on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr,"cudaFree on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
|
8,362 | #include <iostream>
#include <cstdio>
using namespace std;
__global__
void fun(int N)
{
double *a = new double[N];
a[N-1] = 3.1415926;
printf("a[] = %f\n", a[N-1]);
delete[] a;
}
int main()
{
int N = 10;
fun<<<1,10>>>(N);
cudaDeviceSynchronize();
}
|
8,363 | #include <stdlib.h>
#include <stdio.h>
#include "convolution.cuh"
__global__ void kernel_init_noyau(
/* IN */
int R,
float coeff,
/* OUT */
int *indi,
int *indj,
float *C
)
{
// A ECRIRE
}
noyau_t *init_noyau(noyau_t *h_noyau, int R)
{
// A ECRIRE
}
void free_noyau(noyau_t *h_noyau, noyau_t *d_noyau)
{
// A ECRIRE
}
__device__ float &elt_ref(void *base_addr, size_t pitch, int i, int j)
{
float *p_elt = (float*)((char*)base_addr + i*pitch) + j;
return *p_elt;
}
__global__ void convol_gl(
noyau_t *d_noyau,
/* IN */
float *d_buf_A,
size_t pitchA,
int Ni,
int Nj,
/* OUT */
float *d_buf_B,
size_t pitchB /* IN */
)
{
int i = d_noyau->R + blockIdx.x * blockDim.x + threadIdx.x;
int j = d_noyau->R + blockIdx.y * blockDim.y + threadIdx.y;
if (i < Ni+d_noyau->R && j < Nj+d_noyau->R)
{
float tmp_B = 0;
for(int k = 0 ; k < d_noyau->KMAX ; k++)
{
const float val_A =
elt_ref(d_buf_A, pitchA, i+d_noyau->indi[k], j+d_noyau->indj[k]);
tmp_B += d_noyau->C[k] * val_A;
}
elt_ref(d_buf_B, pitchB, i, j) = tmp_B;
}
}
__global__ void convol_sh(
noyau_t *d_noyau,
/* IN */
float *d_buf_A,
size_t pitchA,
int Ni,
int Nj,
/* OUT */
float *d_buf_B,
size_t pitchB /* IN */
)
{
// A ECRIRE
}
|
8,364 | #include "includes.h"
//!!nvcc -c test.cu --compiler-options -fPIC
//!g++ -o program -L/usr/local/cuda/lib64 main.cpp test.o -lcuda -lcudart
__global__ void pow(float *a,float *b,float *c)
{
*c = powf(*a,*b);
} |
8,365 | #include <cstdio>
#include <cassert>
__global__ void init_random_numbers(unsigned int seed) {
printf("seed = %d\n", seed);
atomicAdd((int *)(12312433432), 123);
atomicAdd((float *)(12312433432), 123.0f);
assert(seed != 0);
}
int main() {
init_random_numbers<<<1024, 1024>>>(1);
return 0;
}
|
8,366 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__host__ __device__ int h_d_max(int a, int b)
{
return a > b ? a : b;
}
__host__ __device__ float h_d_average(const float *vec, int n)
{
float sum = 0;
int i;
for (i = 0; i < n; i++)
sum += vec[i];
return sum / n;
}
void h_win_average(const float *A, float **B, size_t size, size_t n)
{
int i;
float v_els[n];
for (i = 0; i < size; i++) {
int j;
for (j = 0; j < n; j++)
v_els[j] = A[h_d_max(0, i - n + j)];
(*B)[i] = h_d_average(v_els, n);
}
}
__device__ int get_global_idx_2d_2d()
{
int block_id = blockIdx.x + blockIdx.y * gridDim.x;
int thread_id = block_id * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
return thread_id;
}
__device__ void d_average(const float *g_idata, float *g_odata, unsigned int size)
{
extern __shared__ float s_data[];
unsigned int tid = threadIdx.x;
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
s_data[tid] = g_idata[i];
__syncthreads();
unsigned int s;
for (s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
s_data[tid] += s_data[tid + s];
}
__syncthreads();
}
if (tid == 0)
g_odata[blockIdx.x] = s_data[0] / size;
}
__global__ void d_naive_win_average(const float *A, float *B, size_t n_els, size_t n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x; // get_global_idx_2d_2d();
float sum = 0;
if (i < n_els) {
int j;
for (j = 0; j < n; j++) {
__syncthreads();
sum += A[h_d_max(0, i - n + j)];
}
B[i] = sum / n; //h_average(v_els, n);
}
}
int main(void)
{
cudaError_t err = cudaSuccess;
int win_size = 2;
int num_el = 100000;
size_t size = num_el * sizeof(float);
float *h_vec = (float*)malloc(size);
// Timer
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (h_vec == NULL) {
fprintf(stderr, "Failed to allocate host vector!\n");
exit(EXIT_FAILURE);
}
// Initialise the host input vector
for (int i = 0; i < num_el; ++i)
h_vec[i] = rand() / (float)RAND_MAX;
// Run host version
float *h_avg = (float*)malloc(sizeof(float) * num_el); //[num_el];
h_win_average(h_vec, &h_avg, num_el, win_size);
printf("HOST VERSION:\n");
int i;
// for (i = 0; i < num_el; i++)
// printf("%f ", h_vec[i]);
printf("\n");
// for (i = 0; i < num_el; i++)
// printf("%f ", h_avg[i]);
printf("\n");
// ===============================================================================================================================
printf("DEVICE NAIVE VERSION:\n");
// Allocate the device input vector h_vec
float *d_vec = NULL;
err = cudaMalloc((void **)&d_vec, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_vec (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_avg = NULL;
err = cudaMalloc((void **)&d_avg, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector h_avg (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_vec, h_vec, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector h_vec from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int threads_per_block = 256;
int blocks_per_grid = 1 + ((num_el - 1) / threads_per_block);
cudaEventRecord(start, 0);
d_naive_win_average<<<blocks_per_grid, threads_per_block>>>(d_vec, d_avg, num_el, win_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy the device result vector in device memory to the host result vector in host memory.
err = cudaMemcpy(h_avg, d_avg, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector d_avg from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// for (i = 0; i < num_el; i++)
// printf("%f ", h_avg[i]);
printf("\n");
printf("Time elapsed for naive parallel implementation: %f\n", time);
// --------------------------------------------------
cudaEventRecord(start, 0);
d_naive_win_average<<<blocks_per_grid, threads_per_block>>>(d_vec, d_avg, num_el, win_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy the device result vector in device memory to the host result vector in host memory.
err = cudaMemcpy(h_avg, d_avg, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector d_avg from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// for (i = 0; i < num_el; i++)
// printf("%f ", h_avg[i]);
printf("\n");
printf("Time elapsed for naive parallel implementation: %f\n", time);
// ===============================================================================================================================
/*dim3 threads_per_block_2d(32 , 32);
dim3 blocks_per_grid_2d(num_el / threads_per_block_2d.x, num_el / threads_per_block_2d.y);
printf("x: %d, y: %d\n\n", blocks_per_grid_2d.x, blocks_per_grid_2d.y);
cudaEventRecord(start, 0);
d_naive_win_average<<<threads_per_block_2d , blocks_per_grid_2d>>>(d_vec, d_avg, num_el, win_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy the device result vector in device memory to the host result vector in host memory.
err = cudaMemcpy(h_avg, d_avg, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector d_avg from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// for (i = 0; i < num_el; i++)
// printf("%f ", h_avg[i]);
printf("\n");
printf("Time elapsed for naive parallel implementation 2D: %f\n", time);*/
return 0;
}
|
8,367 | #include <cuda.h>
typedef uchar4 Color; // .x->R, .y->G, .z->B, .w->A
__host__ __device__ double3 operator+ (double3 v1, double3 v2)
{
double3 retval;
retval.x = v1.x+v2.x;
retval.y = v1.y+v2.y;
retval.z = v1.z+v2.z;
return retval;
}
__host__ __device__ double3 operator- (double3 v1, double3 v2)
{
double3 retval;
retval.x = v1.x-v2.x;
retval.y = v1.y-v2.y;
retval.z = v1.z-v2.z;
return retval;
}
__host__ __device__ double3 operator* (double3 v1, double3 v2)
{
double3 retval;
retval.x = v1.x*v2.x;
retval.y = v1.y*v2.y;
retval.z = v1.z*v2.z;
return retval;
}
__host__ __device__ double3 operator/ (double3 v1, double3 v2)
{
double3 retval;
retval.x = v1.x/v2.x;
retval.y = v1.y/v2.y;
retval.z = v1.z/v2.z;
return retval;
}
__host__ __device__ double3 operator* (double scalar, double3 vec)
{
double3 retval;
retval.x = scalar*vec.x;
retval.y = scalar*vec.y;
retval.z = scalar*vec.z;
return retval;
}
__host__ __device__ Color operator* (double scalar, Color vec)
{
Color retval;
retval.x = scalar*vec.x;
retval.y = scalar*vec.y;
retval.z = scalar*vec.z;
return retval;
}
__host__ __device__ Color operator+ (Color vec1, Color vec2)
{
Color retval = vec1;
retval.x = vec1.x+vec2.x;
retval.y = vec1.y+vec2.y;
retval.z = vec1.z+vec2.z;
return retval;
}
|
8,368 | #ifndef QUEUE_HASH_GPU_CU
#define QUEUE_HASH_GPU_CU
///
/// Fixed Memory Rnaged Queue
///
// mimic the template of CPU code
#define Type short
//
// looped fixed-range queue.
// _nFrontInx point to the head, and _nEndInx points to the first NULL element
//
struct range_queue_gpu
{
__device__
range_queue_gpu(Type *pT, unsigned count)
{
_data = pT;
_nFrontInx = 0;
_nEndInx = 0;
_count = count;
_currCount = 0;
}
__device__
void pop()
{
if(size() > 0)
{
_nFrontInx ++;
_nFrontInx %= _count;
_currCount --;
}
}
__device__
void push(Type &t)
{
if(size() < _count)
{
*(_data + _nEndInx) = t;
_nEndInx += 1; _nEndInx %= _count;
_currCount ++;
}
}
__device__
Type &front()
{ return *(_data + _nFrontInx); }
__device__
unsigned size()
{
return _currCount;
}
private:
Type *_data;
unsigned _nFrontInx;
unsigned _nEndInx;
unsigned _currCount;
unsigned _count;
};
// the hash_map could be a directly indexed array
//
#endif |
8,369 | //
#include <stdio.h>
#include <sys/time.h>
#include <stdint.h>
#include <unistd.h>
#define RealType double
// conversions constants
#define deg2rad 0.0174532925199433
#define rad2deg 57.2957795130823
// max array sizes
#define MAX_N_SPOTS 6000000 // max nr of observed spots that can be stored
#define MAX_N_STEPS 1000 // Max nr of pos steps, when stepping along the diffracted ray
#define MAX_N_OR 36000 // max nr of trial orientations that can be stored (360/0.01);
#define MAX_N_MATCHES 1 // max nr of grain matches for 1 spot
#define MAX_N_RINGS 500 // max nr of rings that can be stored (applies to the arrays ringttheta, ringhkl, etc)
#define MAX_N_HKLS 5000 // max nr of hkls that can be stored
#define MAX_N_OMEGARANGES 72 // max nr of omegaranges in input file (also max no of box sizes)
#define N_COL_THEORSPOTS 8 // number of items that is stored for each calculated spot (omega, eta, etc)
#define N_COL_OBSSPOTS 9 // number of items stored for each obs spots
#define N_COL_GRAINSPOTS 17 // nr of columns for output: y, z, omega, differences for spots of grain matches
#define N_COL_GRAINMATCHES 16 // nr of columns for output: the Matches (summary)
#define MAX_LINE_LENGTH 4096
#define MAX_N_FRIEDEL_PAIRS 50
#define N_COLS_FRIEDEL_RESULTS 16
#define N_COLS_ORIENTATION_NUMBERS 3
// the binsizes used for the binning
RealType EtaBinSize = 0;
RealType OmeBinSize = 0;
// some macros for math calculations
#define crossProduct(a,b,c) \
(a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \
(a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \
(a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1];
#define dot(v,q) \
((v)[0] * (q)[0] + \
(v)[1] * (q)[1] + \
(v)[2] * (q)[2])
#define CalcLength(x,y,z) sqrt((x)*(x) + (y)*(y) + (z)*(z))
#define CHECK(call){ \
const cudaError_t error = call; \
if (error != cudaSuccess){ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(-10*error); \
} \
}
RealType cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return ((RealType)tp.tv_sec + (RealType)tp.tv_usec*1.e-6);
}
struct ParametersStruct {
int RingNumbers[MAX_N_RINGS]; // the ring numbers to use for indexing (1, 2, 4, etc)
int SpaceGroupNum; //
RealType LatticeConstant; // [Angstrom]
RealType Wavelength; // Wavelength of incoming beam [Angstrom]
RealType Distance; // Distance between sample and detector [micron]
RealType Rsample; // Radius of the sample [micron]
RealType Hbeam; // Height of the beam [micron]
RealType StepsizePos; // step size in position [micron]
RealType StepsizeOrient; // step size in orientation (rotation around the plane normal) [degrees]
int NrOfRings; // No of rings to use (not explicit input by user, but set via RingNumbers[])
RealType RingRadii[MAX_N_RINGS]; // Radii of the rings [micron]. this is a used internally: ringrad of ring 1 is at index 1 etc.
RealType RingRadiiUser[MAX_N_RINGS]; // Radii of the rings [micron]. stores only radii of the used rings!! Used for user input.
RealType MarginOme; // Margin in Omega [degrees], when assigning theoretical spots to experimental spots. (|omeT-omeO| < MarginOme)
RealType MarginEta; // Margin in eta [degrees], ,,
RealType MarginRad; // Margin in radius [micron], ,,
RealType MarginRadial; // Margin in radial direction (ortogonal to the ring) [micron], ,,
RealType EtaBinSize; // Size of bin for eta [degrees]
RealType OmeBinSize; // Size of bin for omega [degrees]
RealType ExcludePoleAngle; // Spots can be excluded at the poles: the range is |Eta| < ExcludePoleAngle and 180-|Eta| < ExcludePoleAngle [degrees]
RealType MinMatchesToAcceptFrac; // Minimum fraction (matched_spots/exp_spots) to accept an orientation+position.
RealType BoxSizes[MAX_N_OMEGARANGES][4]; // for each omegarange a box (window: left right bottom top) that defines the spots to include during indexing [micron]
RealType OmegaRanges[MAX_N_OMEGARANGES][2]; // Omegaranges: min, max [degrees], multiple possible.
char OutputFolder[MAX_LINE_LENGTH]; // output folder
int NoOfOmegaRanges; // Automaticly set from Omegaranges (not explicit input by user)
char SpotsFileName[MAX_LINE_LENGTH]; // filename containing observed spots (see top for definition of columns)
char IDsFileName [MAX_LINE_LENGTH]; // filename containing the spot-ids that will be used for indexing
int UseFriedelPairs; // 0=do not use friedelpairs 1=try to use friedelpairs
RealType ABCABG[6]; // ABC, Alpha, Beta, Gamma for the structure
};
int ReadParams(char FileName[], struct ParametersStruct * Params){
FILE *fp;
char line[MAX_LINE_LENGTH];
char dummy[MAX_LINE_LENGTH];
char *str;
int NrOfBoxSizes = 0;
int cmpres;
int NoRingNumbers = 0; // should be equal to Params->NrOfRings
Params->NrOfRings = 0;
Params->NoOfOmegaRanges = 0;
fp = fopen(FileName, "r");
if (fp==NULL) {
printf("Cannot open file: %s.\n", FileName);
return(1);
}
fflush(stdout);
// now get the params: format: "string" value(s)
while (fgets(line, MAX_LINE_LENGTH, fp) != NULL) {
str = "RingNumbers ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %d", dummy, &(Params->RingNumbers[NoRingNumbers]) );
NoRingNumbers++;
continue;
}
str = "SpaceGroup ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %d", dummy, &(Params->SpaceGroupNum) );
continue;
}
str = "LatticeParameter ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->LatticeConstant) );
sscanf(line, "%s %lf %lf %lf %lf %lf %lf", dummy, &(Params->ABCABG[0]), &(Params->ABCABG[1]),
&(Params->ABCABG[2]), &(Params->ABCABG[3]), &(Params->ABCABG[4]), &(Params->ABCABG[5]));
continue;
}
str = "Wavelength ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Wavelength) );
continue;
}
str = "Distance ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Distance) );
continue;
}
str = "Rsample ";
cmpres = strncmp(line, str, strlen(str));
if ( cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Rsample) );
continue;
}
str = "Hbeam ";
cmpres = strncmp(line, str, strlen(str));
if ( cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->Hbeam) );
continue;
}
str = "StepsizePos ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->StepsizePos) );
continue;
}
str = "StepsizeOrient ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->StepsizeOrient) );
continue;
}
str = "MarginOme ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginOme) );
continue;
}
str = "MarginRadius ";
cmpres = strncmp(line, str , strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginRad) );
continue;
}
str = "MarginRadial ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginRadial) );
continue;
}
str = "EtaBinSize ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->EtaBinSize) );
continue;
}
str = "OmeBinSize ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->OmeBinSize) );
continue;
}
str = "MinMatchesToAcceptFrac ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MinMatchesToAcceptFrac) );
continue;
}
str = "ExcludePoleAngle ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->ExcludePoleAngle) );
continue;
}
str = "RingRadii ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->RingRadiiUser[Params->NrOfRings]));
Params->NrOfRings = Params->NrOfRings + 1;
continue;
}
str = "OmegaRange ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf %lf", dummy, &(Params->OmegaRanges[Params->NoOfOmegaRanges][0]),
&(Params->OmegaRanges[Params->NoOfOmegaRanges][1]));
(Params->NoOfOmegaRanges)++;
continue;
}
str = "BoxSize ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf %lf %lf %lf", dummy, &(Params->BoxSizes[NrOfBoxSizes][0]),
&(Params->BoxSizes[NrOfBoxSizes][1]),
&(Params->BoxSizes[NrOfBoxSizes][2]),
&(Params->BoxSizes[NrOfBoxSizes][3]));
NrOfBoxSizes++;
continue;
}
str = "SpotsFileName ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %s", dummy, Params->SpotsFileName );
continue;
}
str = "IDsFileName ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %s", dummy, Params->IDsFileName );
continue;
}
str = "MarginEta ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %lf", dummy, &(Params->MarginEta) );
continue;
}
str = "UseFriedelPairs ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %d", dummy, &(Params->UseFriedelPairs) );
continue;
}
str = "OutputFolder ";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
sscanf(line, "%s %s", dummy, Params->OutputFolder );
continue;
}
// if string is empty
str = "";
cmpres = strncmp(line, str, strlen(str));
if (cmpres == 0) {
continue;
}
// if string not recognized: print warning all other cases
printf("Warning: skipping line in parameters file:\n");
printf("%s\n", line);
}
// make a Params->RingRadii for internal use: ringno is directly the index in array (RingRadii[5] = ringradius from ring 5)
int i;
for (i = 0 ; i < MAX_N_RINGS ; i++ ) {
Params->RingRadii[i] = 0;
}
for (i = 0 ; i < Params->NrOfRings ; i++ ) {
Params->RingRadii[Params->RingNumbers[i]] = Params->RingRadiiUser[i];
}
return(0);
}
__device__ int FindRowInMatrix(RealType *aMatrixp, int nrows, int ncols, int SearchColumn, int aVal){
for (int r=0 ; r< nrows ; r++) {
if (aMatrixp[(r*ncols) + SearchColumn] == aVal){
return r;
}
}
return -1;
}
__device__ RealType CalcEtaAngle(RealType y, RealType z) {
RealType alpha = rad2deg * acos(z/sqrt(y*y+z*z));
if (y > 0) alpha = -alpha;
return alpha;
}
__device__ void AxisAngle2RotMatrix(RealType axis[3], RealType angle, RealType R[3][3]){
if ( (axis[0] == 0) && (axis[1] == 0) && (axis[2] == 0) ) {
R[0][0] = 1;
R[1][0] = 0;
R[2][0] = 0;
R[0][1] = 0;
R[1][1] = 1;
R[2][1] = 0;
R[0][2] = 0;
R[1][2] = 0;
R[2][2] = 1;
return;
}
RealType u = axis[0]*(1/sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]));
RealType v = axis[1]*(1/sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]));
RealType w = axis[2]*(1/sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]));
RealType angleRad = deg2rad * angle;
RealType rcos = cos(angleRad);
RealType rsin = sin(angleRad);
R[0][0] = rcos + u*u*(1-rcos);
R[1][0] = w * rsin + v*u*(1-rcos);
R[2][0] = -v * rsin + w*u*(1-rcos);
R[0][1] = -w * rsin + u*v*(1-rcos);
R[1][1] = rcos + v*v*(1-rcos);
R[2][1] = u * rsin + w*v*(1-rcos);
R[0][2] = v * rsin + u*w*(1-rcos);
R[1][2] = -u * rsin + v*w*(1-rcos);
R[2][2] = rcos + w*w*(1-rcos);
return;
}
__device__ RealType CalcRotationAngle (int RingNr, int *HKLints, int *IntParamArr,
RealType *RTParamArr){
int habs, kabs, labs;
for (int i=0;i<MAX_N_HKLS;i++){
if (HKLints[i*4+3] == RingNr){
habs = abs(HKLints[i*4+0]);
kabs = abs(HKLints[i*4+1]);
labs = abs(HKLints[i*4+2]);
break;
}
}
int SGNum = IntParamArr[0];
RealType ABCABG[6];
for (int i=0;i<6;i++) ABCABG[i] = RTParamArr[13 + MAX_N_RINGS + i];
int nzeros = 0;
if (habs == 0) nzeros++;
if (kabs == 0) nzeros++;
if (labs == 0) nzeros++;
if (nzeros == 3) return 0;
if (SGNum == 1 || SGNum == 2){
return 360;
}else if (SGNum >= 3 && SGNum <= 15){
if (nzeros != 2) return 360;
else if (ABCABG[3] == 90 && ABCABG[4] == 90 && labs != 0){
return 180;
}else if (ABCABG[3] == 90 && ABCABG[5] == 90 && habs != 0){
return 180;
}else if (ABCABG[3] == 90 && ABCABG[5] == 90 && kabs != 0){
return 180;
}else return 360;
}else if (SGNum >= 16 && SGNum <= 74){
if (nzeros !=2) return 360;
else return 180;
}else if (SGNum >= 75 && SGNum <= 142){
if (nzeros == 0) return 360;
else if (nzeros == 1 && labs == 0 && habs == kabs){
return 180;
}else if (nzeros == 2){
if (labs == 0){
return 180;
}else{
return 90;
}
}else return 360;
}else if (SGNum >= 143 && SGNum <= 167){
if (nzeros == 0) return 360;
else if (nzeros == 2 && labs != 0) return 120;
else return 360;
}else if (SGNum >= 168 && SGNum <= 194){
if (nzeros == 2 && labs != 0) return 60;
else return 360;
}else if (SGNum >= 195 && SGNum <= 230){
if (nzeros == 2) return 90;
else if (nzeros == 1){
if (habs == kabs || kabs == labs || habs == labs) return 180;
} else if (habs == kabs && kabs == labs) return 120;
else return 360;
}
return 0;
}
__device__ void MatrixMultF33(RealType m[3][3], RealType n[3][3], RealType res[3][3]){
for (int r=0; r<3; r++) {
res[r][0] = m[r][0]*n[0][0] +
m[r][1]*n[1][0] +
m[r][2]*n[2][0];
res[r][1] = m[r][0]*n[0][1] +
m[r][1]*n[1][1] +
m[r][2]*n[2][1];
res[r][2] = m[r][0]*n[0][2] +
m[r][1]*n[1][2] +
m[r][2]*n[2][2];
}
}
__device__ void MatrixMultF(RealType m[3][3], RealType v[3], RealType r[3]){
for (int i=0; i<3; i++) {
r[i] = m[i][0]*v[0] +
m[i][1]*v[1] +
m[i][2]*v[2];
}
}
__device__ void RotateAroundZ(RealType v1[3], RealType alpha, RealType v2[3]){
RealType mat[3][3] = {{ cos(alpha*deg2rad), -sin(alpha*deg2rad), 0 },
{ sin(alpha*deg2rad), cos(alpha*deg2rad), 0 },
{ 0, 0, 1}};
MatrixMultF(mat, v1, v2);
}
__device__ int CalcOmega(RealType x, RealType y, RealType z, RealType theta, RealType omegas[4], RealType etas[4]) {
int nsol = 0;
RealType v=sin(theta*deg2rad)*sqrt(x*x + y*y + z*z);
if ( fabs(y) < 1e-4 ) {
if (x != 0) {
if (fabs(-v/x) <= 1) {
omegas[nsol] = acos(-v/x)*rad2deg;
nsol = nsol + 1;
omegas[nsol] = -acos(-v/x)*rad2deg;
nsol = nsol + 1;
}
}
} else {
RealType cosome1;
RealType cosome2;
if ((((2*v*x) / (y*y))*((2*v*x) / (y*y)) - 4*(1 + ((x*x) / (y*y)))*(((v*v) / (y*y)) - 1)) >= 0) {
cosome1 = (-((2*v*x) / (y*y)) + sqrt((((2*v*x) / (y*y))*((2*v*x) / (y*y)) - 4*(1 + ((x*x) / (y*y)))*(((v*v) / (y*y)) - 1))))/(2*(1 + ((x*x) / (y*y))));
if (fabs(cosome1) <= 1) {
if (fabs(-x*cos(acos(cosome1)) + y*sin(acos(cosome1)) - v) < fabs(-x*cos(-acos(cosome1)) + y*sin(-acos(cosome1)) - v) ) {
omegas[nsol] = acos(cosome1)*rad2deg;
nsol = nsol + 1;
}else {
omegas[nsol] = -acos(cosome1)*rad2deg;
nsol = nsol + 1;
}
}
cosome2 = (-((2*v*x) / (y*y)) - sqrt((((2*v*x) / (y*y))*((2*v*x) / (y*y)) - 4*(1 + ((x*x) / (y*y)))*(((v*v) / (y*y)) - 1))))/(2*(1 + ((x*x) / (y*y))));
if (fabs(cosome2) <= 1) {
if (fabs(-x*cos(acos(cosome2)) + y*sin(acos(cosome2)) - v) < fabs(-x*cos(-acos(cosome2)) + y*sin(-acos(cosome2)) - v)) {
omegas[nsol] = acos(cosome2)*rad2deg;
nsol = nsol + 1;
} else {
omegas[nsol] = -acos(cosome2)*rad2deg;
nsol = nsol + 1;
}
}
}
}
RealType gw[3];
RealType gv[3]={x,y,z};
RealType eta;
for (int indexOme = 0; indexOme < nsol; indexOme++) {
RotateAroundZ(gv, omegas[indexOme], gw);
eta = CalcEtaAngle(gw[1],gw[2]);
etas[indexOme] = eta;
}
return nsol;
}
__device__ int CalcDiffrSpots_Furnace(RealType OrientMatrix[3][3],
RealType *RingRadii, RealType *OmeBoxArr, int NOmegaRanges, RealType ExcludePoleAngle, RealType *spots, RealType *hkls, int *n_arr){
int OmegaRangeNo;
int KeepSpot;
RealType Ghkl[3];
RealType Gc[3];
RealType omegas[4];
RealType etas[4];
RealType yl;
RealType zl;
int nspotsPlane;
int spotnr = 0;
for (int indexhkl=0; indexhkl < n_arr[1] ; indexhkl++) {
Ghkl[0] = hkls[indexhkl*7+0];
Ghkl[1] = hkls[indexhkl*7+1];
Ghkl[2] = hkls[indexhkl*7+2];
MatrixMultF(OrientMatrix,Ghkl, Gc);
nspotsPlane = CalcOmega(Gc[0], Gc[1], Gc[2], hkls[indexhkl*7+5], omegas, etas);
for (int i=0 ; i<nspotsPlane ; i++) {
if ((fabs(etas[i]) < ExcludePoleAngle ) || ((180-fabs(etas[i])) < ExcludePoleAngle)) continue;
yl = -(sin(deg2rad * etas[i])*RingRadii[(int)(hkls[indexhkl*7+3])]);
zl = cos(deg2rad * etas[i])*RingRadii[(int)(hkls[indexhkl*7+3])];
for (OmegaRangeNo = 0 ; OmegaRangeNo < NOmegaRanges ; OmegaRangeNo++ ) {
KeepSpot = 0;
if ((omegas[i] > OmeBoxArr[OmegaRangeNo*6+4]) &&
(omegas[i] < OmeBoxArr[OmegaRangeNo*6+5]) &&
(yl > OmeBoxArr[OmegaRangeNo*6+0]) &&
(yl < OmeBoxArr[OmegaRangeNo*6+1]) &&
(zl > OmeBoxArr[OmegaRangeNo*6+2]) &&
(zl < OmeBoxArr[OmegaRangeNo*6+3]) ) {
KeepSpot = 1;
break;
}
}
if (KeepSpot) {
spots[spotnr*N_COL_THEORSPOTS+0] = yl;
spots[spotnr*N_COL_THEORSPOTS+1] = zl;
spots[spotnr*N_COL_THEORSPOTS+2] = omegas[i];
spots[spotnr*N_COL_THEORSPOTS+3] = hkls[indexhkl*7+3];
spotnr++;
}
}
}
return spotnr;
}
__global__ void CompareDiffractionSpots(RealType *AllTheorSpots, RealType *RTParamArr,
int maxPos, RealType *ResultArr, int PosResultArr, int *nTspotsArr,
int *data, int *ndata, RealType *ObsSpots, RealType *etamargins, int *AllGrainSpots,
RealType *IAs, int *n_arr, int *nMatchedArr, int n_min, int nOrients, RealType *GS){
int nPos, orientPos, overallPos; // Position Calculate!!
overallPos = blockIdx.x * blockDim.x + threadIdx.x;
if (overallPos >= maxPos){
return;
}
nPos = overallPos / nOrients;
orientPos = overallPos % nOrients;
nMatchedArr[overallPos] = 0;
int n = n_min + nPos;
RealType *TheorSpots;
TheorSpots = AllTheorSpots + n_arr[1]*2*N_COL_THEORSPOTS*orientPos;
int *GrainSpots;
GrainSpots = AllGrainSpots + overallPos * n_arr[1] * 2;
RealType y0, z0, xi, yi, zi, ys, zs,omega,RefRad;
y0 = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 7];
z0 = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 8];
xi = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 9];
yi = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 10];
zi = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 11];
ys = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 12];
zs = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 13];
omega = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 14];
RefRad = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 15];
RealType Displ_y, Displ_z;
int nTspots, nMatched, MatchFound;
RealType diffOmeBest, diffOme;
long long int Pos;
int nspots, DataPos, spotRow,spotRowBest;
RealType omeo, ometh, gvo[3], gvth[3], lo, lth, tmp, go[3], gth[3],gs[3];
RealType n_eta_bins, n_ome_bins, t;
n_eta_bins = ceil(360.0 / RTParamArr[5 + MAX_N_RINGS + 4]);
n_ome_bins = ceil(360.0 / RTParamArr[5 + MAX_N_RINGS + 5]);
gs[0] = ((RTParamArr[3])*(n/xi)*xi*cos(omega*deg2rad)) +
((ys - y0 + (RTParamArr[3])*(n/xi)*yi)*sin(omega*deg2rad));
gs[1] = ((ys - y0 + (RTParamArr[3])*(n/xi)*yi)*cos(
omega*deg2rad)) - ((RTParamArr[3])*(n/xi)*xi*sin(omega*deg2rad));
gs[2] = zs - z0 + (RTParamArr[3])*(n/xi)*zi;
GS[overallPos*3 + 0] = gs[0];
GS[overallPos*3 + 1] = gs[1];
GS[overallPos*3 + 2] = gs[2];
nMatched = 0;
nTspots = nTspotsArr[orientPos];
IAs[overallPos] = 0;
if (fabs(zs - z0 + (RTParamArr[3])*(n/xi)*zi) > RTParamArr[2] /2) {
nMatchedArr[overallPos] = 0;
return;
}
for (int sp = 0 ; sp < nTspots ; sp++) {
ometh = TheorSpots[sp*N_COL_THEORSPOTS+2];
t = (gs[0]*cos(deg2rad * ometh) - gs[1]*sin(deg2rad * ometh))/xi;
Displ_y = ((gs[0]*sin(deg2rad * ometh))+ (gs[1]*cos(deg2rad * ometh))) - t* yi;
Displ_z = gs[2] - t*zi;
TheorSpots[sp*N_COL_THEORSPOTS+4] = TheorSpots[sp*N_COL_THEORSPOTS+0] + Displ_y;
TheorSpots[sp*N_COL_THEORSPOTS+5] = TheorSpots[sp*N_COL_THEORSPOTS+1] + Displ_z;
TheorSpots[sp*N_COL_THEORSPOTS+6] = CalcEtaAngle( TheorSpots[sp*N_COL_THEORSPOTS+4],
TheorSpots[sp*N_COL_THEORSPOTS+5]);
TheorSpots[sp*N_COL_THEORSPOTS+7] = sqrt(TheorSpots[sp*N_COL_THEORSPOTS+4] * TheorSpots[sp*N_COL_THEORSPOTS+4] +
TheorSpots[sp*N_COL_THEORSPOTS+5] * TheorSpots[sp*N_COL_THEORSPOTS+5]) -
RTParamArr[5 + (int)TheorSpots[sp*N_COL_THEORSPOTS+3]];
MatchFound = 0;
diffOmeBest = 100000;
Pos = (((int) TheorSpots[sp*N_COL_THEORSPOTS+3])-1)*n_eta_bins*n_ome_bins
+ ((int)(floor((180+TheorSpots[sp*N_COL_THEORSPOTS+6])/RTParamArr[5 + MAX_N_RINGS + 4])))*n_ome_bins +
((int)floor((180+TheorSpots[sp*N_COL_THEORSPOTS+2])/RTParamArr[5 + MAX_N_RINGS + 5]));
nspots = ndata[Pos*2];
if (nspots == 0){
continue;
}
DataPos = ndata[Pos*2+1];
for (int iSpot = 0 ; iSpot < nspots; iSpot++ ) {
spotRow = data[DataPos + iSpot];
if ( fabs(TheorSpots[sp*N_COL_THEORSPOTS+7] - ObsSpots[spotRow*9+8]) < RTParamArr[5 + MAX_N_RINGS + 3] ) {
if ( fabs(RefRad - ObsSpots[spotRow*9+3]) < RTParamArr[5 + MAX_N_RINGS + 2] ) {
if ( fabs(TheorSpots[sp*N_COL_THEORSPOTS+6] - ObsSpots[spotRow*9+6]) < etamargins[(int) TheorSpots[sp*N_COL_THEORSPOTS+3]] ) {
diffOme = fabs(TheorSpots[sp*N_COL_THEORSPOTS+2] - ObsSpots[spotRow*9+2]);
if ( diffOme < diffOmeBest ) {
diffOmeBest = diffOme;
spotRowBest = spotRow;
MatchFound = 1;
}
}
}
}
}
if (MatchFound == 1) {
GrainSpots[nMatched] = (int) ObsSpots[spotRowBest*9+4];
omeo = ObsSpots[spotRowBest*9+2];
ometh = TheorSpots[sp*N_COL_THEORSPOTS+2];
RotateAroundZ(gs,omeo,go);
RotateAroundZ(gs,ometh,gth);
gvo[0] = (-1 + (RTParamArr[0] - go[0])/CalcLength((RTParamArr[0] - go[0]),(ObsSpots[spotRowBest*9+0] - go[1]),
(ObsSpots[spotRowBest*9+1] - go[2]))) * cos(-omeo*deg2rad) - ((ObsSpots[spotRowBest*9+0] - go[1])/
CalcLength((RTParamArr[0] - go[0]),(ObsSpots[spotRowBest*9+0] - go[1]),(ObsSpots[spotRowBest*9+1]
- go[2]))) * sin(-omeo*deg2rad);
gvo[1] = (-1 + (RTParamArr[0] - go[0])/CalcLength((RTParamArr[0] - go[0]),(ObsSpots[spotRowBest*9+0] - go[1]),
(ObsSpots[spotRowBest*9+1] - go[2]))) * sin(-omeo*deg2rad) + ((ObsSpots[spotRowBest*9+0] - go[1])/
CalcLength((RTParamArr[0] - go[0]),(ObsSpots[spotRowBest*9+0] - go[1]),(ObsSpots[spotRowBest*9+1]
- go[2]))) * cos(-omeo*deg2rad);
gvo[2] = (ObsSpots[spotRowBest*9+1] - go[2])/CalcLength((RTParamArr[0] - go[0]),(ObsSpots[spotRowBest*9+0] - go[1]),
(ObsSpots[spotRowBest*9+1] - go[2]));
gvth[0] = (-1 + (RTParamArr[0] - gth[0])/CalcLength((RTParamArr[0] - gth[0]),(TheorSpots[sp*N_COL_THEORSPOTS+0]
- gth[1]),(TheorSpots[sp*N_COL_THEORSPOTS+1] - gth[2]))) * cos(-ometh*deg2rad) - ((TheorSpots[sp*N_COL_THEORSPOTS+0]
- gth[1])/CalcLength((RTParamArr[0] - gth[0]),(TheorSpots[sp*N_COL_THEORSPOTS+0] - gth[1]),(TheorSpots[sp*N_COL_THEORSPOTS+1]
- gth[2]))) * sin(-ometh*deg2rad);
gvth[1] = (-1 + (RTParamArr[0] - gth[0])/CalcLength((RTParamArr[0] - gth[0]),(TheorSpots[sp*N_COL_THEORSPOTS+0]
- gth[1]),(TheorSpots[sp*N_COL_THEORSPOTS+1] - gth[2]))) * sin(-ometh*deg2rad) + ((TheorSpots[sp*N_COL_THEORSPOTS+0]
- gth[1])/CalcLength((RTParamArr[0] - gth[0]),(TheorSpots[sp*N_COL_THEORSPOTS+0] - gth[1]),(TheorSpots[sp*N_COL_THEORSPOTS+1]
- gth[2]))) * cos(-ometh*deg2rad);
gvth[2] = (TheorSpots[sp*N_COL_THEORSPOTS+1] - gth[2])/CalcLength((RTParamArr[0] - gth[0]),(TheorSpots[sp*N_COL_THEORSPOTS+0]
- gth[1]),(TheorSpots[sp*N_COL_THEORSPOTS+1] - gth[2]));
lo = CalcLength(gvo[0],gvo[1],gvo[2]);
lth = CalcLength(gvth[0],gvth[1],gvth[2]);
tmp = dot(gvo,gvth)/(lo*lth);
if (tmp >1) tmp = 1;
else if (tmp < -1) tmp = -1;
IAs[overallPos] += rad2deg * acos(tmp);
nMatched++;
}
}
IAs[overallPos] /= (RealType)nMatched;
nMatchedArr[overallPos] = nMatched;
}
__global__ void ReturnDiffractionSpots(RealType *RTParamArr, RealType *OmeBoxArr,
int *IntParamArr, RealType *AllTheorSpots, RealType *hkls, int *n_arr, int PosResultArr,
RealType *ResultArr, int norients, int *nSpotsArr, RealType *Orientations){
int orient = blockIdx.x * blockDim.x + threadIdx.x;
if (orient > norients) return;
RealType *TheorSpots = AllTheorSpots + n_arr[1]*2*N_COL_THEORSPOTS*orient;
RealType hkl[3], hklnormal[3];
hkl[0] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 0];
hkl[1] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 1];
hkl[2] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 2];
hklnormal[0] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 3];
hklnormal[1] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 4];
hklnormal[2] = ResultArr[PosResultArr * N_COLS_FRIEDEL_RESULTS + 5];
RealType v[3];
crossProduct(v, hkl, hklnormal);
RealType RotMat[3][3];
RealType RotMat2[3][3];
RealType RotMat3[3][3];
AxisAngle2RotMatrix(v, rad2deg * acos(dot(hkl, hklnormal)/
(sqrt(hkl[0]*hkl[0] + hkl[1]*hkl[1] + hkl[2]*hkl[2])*sqrt(
hklnormal[0]*hklnormal[0] + hklnormal[1]*hklnormal[1] +
hklnormal[2]*hklnormal[2]))), RotMat);
AxisAngle2RotMatrix(hklnormal, orient*RTParamArr[4], RotMat2);
MatrixMultF33(RotMat2, RotMat, RotMat3);
nSpotsArr[orient] = CalcDiffrSpots_Furnace(RotMat3,
RTParamArr + 5, OmeBoxArr, IntParamArr[1],
RTParamArr[5 + MAX_N_RINGS + 6], TheorSpots, hkls,n_arr);
int PosUse = 9*orient;
Orientations[PosUse + 0] = RotMat3[0][0];
Orientations[PosUse + 1] = RotMat3[0][1];
Orientations[PosUse + 2] = RotMat3[0][2];
Orientations[PosUse + 3] = RotMat3[1][0];
Orientations[PosUse + 4] = RotMat3[1][1];
Orientations[PosUse + 5] = RotMat3[1][2];
Orientations[PosUse + 6] = RotMat3[2][0];
Orientations[PosUse + 7] = RotMat3[2][1];
Orientations[PosUse + 8] = RotMat3[2][2];
}
__global__ void MakeOrientations(RealType *ResultArr, int *HKLints,
int *IntParamArr, RealType *RTParamArr, int *ResultOut, int sumTotal){
int ID = blockIdx.x * blockDim.x + threadIdx.x;
if (ID >= sumTotal) return;
RealType y0, xi, yi, ys;
y0 = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 7];
xi = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 9];
yi = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 10];
ys = ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 12];
RealType RotationAngles = CalcRotationAngle(((int) ResultArr[ID * N_COLS_FRIEDEL_RESULTS + 6]), HKLints, IntParamArr, RTParamArr);
ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 0] = (int) RotationAngles/RTParamArr[4];
ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 1] = (int)((((-(2*yi*(ys-y0))+sqrt((2*yi*(ys-y0))*(2*yi*(ys-y0))
- 4*(xi*xi + yi*yi)*((ys-y0)*(ys-y0) - RTParamArr[1]*RTParamArr[1]
)))/(2*(xi*xi + yi*yi)) + 20)*xi)/(RTParamArr[3]));
ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 2] = (2*ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 1] + 1) * ResultOut[ID*N_COLS_ORIENTATION_NUMBERS + 0];
}
__device__ int TryFriedel(RealType ys, RealType zs,
RealType ttheta, RealType eta, RealType omega, int ringno,
RealType Ring_rad, RealType Rsample, RealType Hbeam, RealType OmeTol,
RealType RadiusTol, RealType *ObsSpotsLab, RealType *hkls, int *n_arr,
RealType *RTParamArr, RealType *ResultArray, int rowID, RealType RefRad){
int NrFriedel = 0;
RealType OmeF;
if (omega < 0 ) OmeF = omega + 180;
else OmeF = omega - 180;
int quadr_coeff2 = 0, quadr_coeff, coeff_y0 = 0, coeff_z0 = 0;
RealType eta_Hbeam, y0_max_z0, y0_min_z0, y0_max = 0, y0_min = 0, z0_min = 0, z0_max = 0;
if (eta > 90) eta_Hbeam = 180 - eta;
else if (eta < -90) eta_Hbeam = 180 - fabs(eta);
else eta_Hbeam = 90 - fabs(eta);
Hbeam = Hbeam + 2*(Rsample*tan(ttheta*deg2rad))*(sin(eta_Hbeam*deg2rad));
RealType eta_pole = (1 + rad2deg*acos(1-(Hbeam/Ring_rad)));
RealType eta_equator = (1 + rad2deg*acos(1-(Rsample/Ring_rad)));
if ((eta >= eta_pole) && (eta <= (90-eta_equator)) ) { // % 1st quadrant
quadr_coeff = 1;
coeff_y0 = -1;
coeff_z0 = 1;
} else if ( (eta >=(90+eta_equator)) && (eta <= (180-eta_pole)) ) {//% 4th quadrant
quadr_coeff = 2;
coeff_y0 = -1;
coeff_z0 = -1;
} else if ( (eta >= (-90+eta_equator) ) && (eta <= -eta_pole) ) { // % 2nd quadrant
quadr_coeff = 2;
coeff_y0 = 1;
coeff_z0 = 1;
} else if ( (eta >= (-180+eta_pole) ) && (eta <= (-90-eta_equator)) ) { // % 3rd quadrant
quadr_coeff = 1;
coeff_y0 = 1;
coeff_z0 = -1;
} else quadr_coeff = 0;
RealType y0_max_Rsample = ys + Rsample;
RealType y0_min_Rsample = ys - Rsample;
RealType z0_max_Hbeam = zs + 0.5 * Hbeam;
RealType z0_min_Hbeam = zs - 0.5 * Hbeam;
if (quadr_coeff == 1) {
y0_max_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max_Hbeam * z0_max_Hbeam));
y0_min_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min_Hbeam * z0_min_Hbeam));
} else if (quadr_coeff == 2) {
y0_max_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min_Hbeam * z0_min_Hbeam));
y0_min_z0 = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max_Hbeam * z0_max_Hbeam));
}
if (quadr_coeff > 0) {
y0_max = min(y0_max_Rsample, y0_max_z0);
y0_min = max(y0_min_Rsample, y0_min_z0);
} else {
if ((eta > -eta_pole) && (eta < eta_pole )) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = 1;
} else if (eta < (-180+eta_pole)) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = -1;
} else if (eta > (180-eta_pole)) {
y0_max = y0_max_Rsample;
y0_min = y0_min_Rsample;
coeff_z0 = -1;
} else if (( eta > (90-eta_equator)) && (eta < (90+eta_equator)) ) {
quadr_coeff2 = 1;
z0_max = z0_max_Hbeam;
z0_min = z0_min_Hbeam;
coeff_y0 = -1;
} else if ((eta > (-90-eta_equator)) && (eta < (-90+eta_equator)) ) {
quadr_coeff2 = 1;
z0_max = z0_max_Hbeam;
z0_min = z0_min_Hbeam;
coeff_y0 = 1;
}
}
if ( quadr_coeff2 == 0 ) {
z0_min = coeff_z0 * sqrt((Ring_rad * Ring_rad)-(y0_min * y0_min));
z0_max = coeff_z0 * sqrt((Ring_rad * Ring_rad)-(y0_max * y0_max));
} else {
y0_min = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_min * z0_min));
y0_max = coeff_y0 * sqrt((Ring_rad * Ring_rad)-(z0_max * z0_max));
}
RealType dYMin = ys - y0_min;
RealType dYMax = ys - y0_max;
RealType dZMin = zs - z0_min;
RealType dZMax = zs - z0_max;
RealType YMinFrIdeal = y0_min;
RealType YMaxFrIdeal = y0_max;
RealType ZMinFrIdeal = -z0_min;
RealType ZMaxFrIdeal = -z0_max;
RealType YMinFr = YMinFrIdeal - dYMin;
RealType YMaxFr = YMaxFrIdeal - dYMax;
RealType ZMinFr = ZMinFrIdeal + dZMin;
RealType ZMaxFr = ZMaxFrIdeal + dZMax;
RealType Eta1, Eta2;
Eta1 = CalcEtaAngle((YMinFr + ys),(ZMinFr - zs));
Eta2 = CalcEtaAngle((YMaxFr + ys),(ZMaxFr - zs));
RealType EtaMinF = min(Eta1,Eta2);
RealType EtaMaxF = max(Eta1,Eta2);
RealType yf, zf, EtaTransf, radius, IdealY, IdealZ, xi,yi,zi, hklnormal[3], hkl[3];
for (int r=0 ; r < n_arr[0] ; r++) {
if ( ((int)ObsSpotsLab[r*9+5]) != ringno ) continue; // Not a Friedel pair
if ( fabs(ObsSpotsLab[r*9+2] - OmeF) > OmeTol) continue; // Not a Friedel pair
yf = ObsSpotsLab[r*9+0];
zf = ObsSpotsLab[r*9+1];
EtaTransf = CalcEtaAngle(yf + ys, zf - zs);
radius = sqrt((yf + ys)*(yf + ys) + (zf - zs)*(zf - zs));
if ( fabs(radius - 2*Ring_rad) > RadiusTol) continue;
if (( EtaTransf < EtaMinF) || (EtaTransf > EtaMaxF) ) continue;
IdealY = Ring_rad*(ys - ((-ObsSpotsLab[r*9+0] + ys)/2))/sqrt((
ys - ((-ObsSpotsLab[r*9+0] + ys)/2))*(ys - ((-ObsSpotsLab[r*9+0] +
ys)/2))+(zs - (( ObsSpotsLab[r*9+1] + zs)/2))*(zs - ((
ObsSpotsLab[r*9+1] + zs)/2)));
IdealZ = Ring_rad*(zs - (( ObsSpotsLab[r*9+1] + zs)/2))/sqrt((
ys - ((-ObsSpotsLab[r*9+0] + ys)/2))*(ys - ((-ObsSpotsLab[r*9+0] +
ys)/2))+(zs - (( ObsSpotsLab[r*9+1] + zs)/2))*(zs - ((
ObsSpotsLab[r*9+1] + zs)/2)));
xi = RTParamArr[0]/CalcLength(RTParamArr[0],IdealY,IdealZ);
yi = IdealY/CalcLength(RTParamArr[0],IdealY,IdealZ);
zi = IdealZ/CalcLength(RTParamArr[0],IdealY,IdealZ);
hklnormal[0] = (-1 + xi) * cos(-omega*deg2rad) - yi * sin(-omega*deg2rad);
hklnormal[1] = (-1 + xi) * sin(-omega*deg2rad) + yi * cos(-omega*deg2rad);
hklnormal[2] = zi;
for (int i=0;i<n_arr[1];i++){
if ((int) hkls[i*7+3] == ringno){
hkl[0] = hkls[i*7+0];
hkl[1] = hkls[i*7+1];
hkl[2] = hkls[i*7+2];
break;
}
}
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 0] = hkl[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 1] = hkl[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 2] = hkl[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 3] = hklnormal[0];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 4] = hklnormal[1];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 5] = hklnormal[2];
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 6] = (RealType) ringno;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 7] = IdealY;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 8] = IdealZ;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 9] = xi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 10] = yi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 11] = zi;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 12] = ys;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 13] = zs;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 14] = omega;
ResultArray[rowID * MAX_N_FRIEDEL_PAIRS * N_COLS_FRIEDEL_RESULTS + NrFriedel * N_COLS_FRIEDEL_RESULTS + 15] = RefRad;
NrFriedel++;
}
return NrFriedel;
}
__global__ void FriedelFinding (int *SpotIDs, RealType *ObsSpotsLab,
RealType *hkls, int *n_arr, int *IntParamArr, RealType *RTParamArr, RealType *ResultArray, int *nNormals){
int rowID = blockIdx.x * blockDim.x + threadIdx.x;
if (rowID >= n_arr[2]) return;
int SpotID = SpotIDs[rowID];
int SpotRowNo = FindRowInMatrix(ObsSpotsLab, n_arr[0], N_COL_OBSSPOTS, 4, SpotID);
if (SpotRowNo == -1) {
printf("WARNING: SpotId %d not found in spots file! Ignoring this spotID. n_spots = %d\n", SpotID, n_arr[0]);
return;
}
RealType RefRad = ObsSpotsLab[SpotRowNo*9+3];
int nPlaneNormals = 0;
if (IntParamArr[2] == 1) {
nPlaneNormals = TryFriedel(ObsSpotsLab[SpotRowNo*9+0], ObsSpotsLab[SpotRowNo*9+1],
ObsSpotsLab[SpotRowNo*9+7], ObsSpotsLab[SpotRowNo*9+6], ObsSpotsLab[SpotRowNo*9+2], (int) ObsSpotsLab[SpotRowNo*9+5],
RTParamArr[(int) ObsSpotsLab[SpotRowNo*9+5] + 5], RTParamArr[1], RTParamArr[2], RTParamArr[5 + MAX_N_RINGS + 0],
RTParamArr[5 + MAX_N_RINGS + 3],ObsSpotsLab, hkls, n_arr, RTParamArr, ResultArray,rowID,RefRad);
nNormals[rowID] = nPlaneNormals;
}
}
int main(int argc, char *argv[]){
printf("\n\n\t\t\tGPU Indexer v1.0\nContact hsharma@anl.gov in case of questions about the MIDAS project.\n\n");
RealType iStart = cpuSecond();
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,0);
size_t gpuGlobalMem = deviceProp.totalGlobalMem;
fprintf(stderr, "GPU global memory = %zu MBytes\n", gpuGlobalMem/(1024*1024));
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
char folder[4096];
struct ParametersStruct Parameters;
char ParamFN[4096];
getcwd(folder,sizeof(folder));
sprintf(ParamFN,"%s/%s",folder,argv[1]);
printf("Reading parameters from file: %s.\n", ParamFN);
int returncode = ReadParams(ParamFN, &Parameters);
int *SpotIDs_h;
SpotIDs_h = (int *) malloc(sizeof(*SpotIDs_h)* MAX_N_SPOTS);
char spotIDsfn[4096];
sprintf(spotIDsfn,"%s/%s",folder,Parameters.IDsFileName);
fflush(stdout);
int nSpotIDs=0;
FILE *IDsFile = fopen(spotIDsfn,"r");
char line[MAX_LINE_LENGTH];
while (fgets(line,MAX_LINE_LENGTH,IDsFile)!=NULL){
SpotIDs_h[nSpotIDs] = atoi(line);
nSpotIDs++;
}
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy to spotIDs Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
RealType hkls[MAX_N_HKLS*7];
int HKLints[MAX_N_HKLS*4];
char *hklfn = "hkls.csv";
FILE *hklf = fopen(hklfn,"r");
char aline[1024],dummy[1024];
fgets(aline,1000,hklf);
int Rnr,i;
int hi,ki,li;
RealType hc,kc,lc,RRd,Ds,tht;
int n_hkls_h = 0;
while (fgets(aline,1000,hklf)!=NULL){
sscanf(aline, "%d %d %d %lf %d %lf %lf %lf %lf %s %lf",&hi,&ki,&li,&Ds,&Rnr,&hc,&kc,&lc,&tht,dummy,&RRd);
for (i=0;i<Parameters.NrOfRings;i++){
if (Rnr == Parameters.RingNumbers[i]){
HKLints[n_hkls_h*4+0] = hi;
HKLints[n_hkls_h*4+1] = ki;
HKLints[n_hkls_h*4+2] = li;
HKLints[n_hkls_h*4+3] = Rnr;
hkls[n_hkls_h*7+0] = hc;
hkls[n_hkls_h*7+1] = kc;
hkls[n_hkls_h*7+2] = lc;
hkls[n_hkls_h*7+3] = (RealType)Rnr;
hkls[n_hkls_h*7+4] = Ds;
hkls[n_hkls_h*7+5] = tht;
hkls[n_hkls_h*7+6] = RRd;
n_hkls_h++;
}
}
}
char datafn[4096];
sprintf(datafn,"%s/%s",folder,"Data.bin");
char ndatafn[4096];
sprintf(ndatafn,"%s/%s",folder,"nData.bin");
char spotsfn[4096];
sprintf(spotsfn,"%s/%s",folder,"Spots.bin");
char extrafn[4096];
sprintf(extrafn,"%s/%s",folder,"ExtraInfo.bin");
FILE *fData = fopen(datafn,"r");
FILE *fnData = fopen(ndatafn,"r");
FILE *fSpots = fopen(spotsfn,"r");
FILE *fExtraInfo = fopen(extrafn,"r");
RealType *hkls_d, *etamargins_d;
int *HKLints_d;
RealType etamargins[MAX_N_RINGS];
for ( i = 0 ; i < MAX_N_RINGS ; i++) {
if ( Parameters.RingRadii[i] == 0) {
etamargins[i] = 0;
}else {
etamargins[i] = rad2deg * atan(Parameters.MarginEta/Parameters.RingRadii[i]) + 0.5 * Parameters.StepsizeOrient;
}
}
cudaMalloc((RealType **)&hkls_d,n_hkls_h*7*sizeof(RealType));
cudaMalloc((int **)&HKLints_d,n_hkls_h*4*sizeof(int));
cudaMalloc((RealType **)&etamargins_d,MAX_N_RINGS*sizeof(RealType));
cudaMemcpy(hkls_d,hkls,n_hkls_h*7*sizeof(RealType),cudaMemcpyHostToDevice);
cudaMemcpy(HKLints_d,HKLints,n_hkls_h*4*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(etamargins_d,etamargins,MAX_N_RINGS*sizeof(RealType),cudaMemcpyHostToDevice);
int nspids = nSpotIDs, *sps;
cudaMalloc((int **)&sps,nspids*sizeof(int));
cudaMemcpy(sps,SpotIDs_h,nspids*sizeof(int),cudaMemcpyHostToDevice);
RealType *ObsSpotsLab, *spots_h;
fseek(fSpots,0L,SEEK_END);
long long sizeSpots = ftell(fSpots);
rewind(fSpots);
spots_h = (RealType *)malloc(sizeSpots);
fread(spots_h,sizeSpots,1,fSpots);
cudaMalloc((RealType **)&ObsSpotsLab,(size_t)sizeSpots);
cudaMemcpy(ObsSpotsLab,spots_h,sizeSpots,cudaMemcpyHostToDevice);
free(spots_h);
int n_spots_h = ((int)sizeSpots)/(9*sizeof(double));
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "End data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "FewSpotIDs Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
int *n_arr, n_arr_h[3];
cudaMalloc((int **)&n_arr,sizeof(int)*3);
n_arr_h[0] = n_spots_h;
n_arr_h[1] = n_hkls_h;
n_arr_h[2] = nspids;
cudaMemcpy(n_arr,n_arr_h,3*sizeof(int),cudaMemcpyHostToDevice);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "SpotsInfo Theor and BestGrains Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
int *IntParamArr, IntParamArr_h[3];
IntParamArr_h[0] = Parameters.SpaceGroupNum;
IntParamArr_h[1] = Parameters.NoOfOmegaRanges;
IntParamArr_h[2] = Parameters.UseFriedelPairs;
cudaMalloc((int **)&IntParamArr, sizeof(int)*3);
cudaMemcpy(IntParamArr,IntParamArr_h,sizeof(int)*3,cudaMemcpyHostToDevice);
RealType *RTParamArr, RTParamArr_h[5 + MAX_N_RINGS + 8 + 6];
RTParamArr_h[0] = Parameters.Distance;
RTParamArr_h[1] = Parameters.Rsample;
RTParamArr_h[2] = Parameters.Hbeam;
RTParamArr_h[3] = Parameters.StepsizePos;
RTParamArr_h[4] = Parameters.StepsizeOrient;
for (int cntr=0;cntr<MAX_N_RINGS;cntr++) RTParamArr_h[5+cntr] = Parameters.RingRadii[cntr];
RTParamArr_h[5+MAX_N_RINGS+0] = Parameters.MarginOme;
RTParamArr_h[5+MAX_N_RINGS+1] = Parameters.MarginEta;
RTParamArr_h[5+MAX_N_RINGS+2] = Parameters.MarginRad;
RTParamArr_h[5+MAX_N_RINGS+3] = Parameters.MarginRadial;
RTParamArr_h[5+MAX_N_RINGS+4] = Parameters.EtaBinSize;
RTParamArr_h[5+MAX_N_RINGS+5] = Parameters.OmeBinSize;
RTParamArr_h[5+MAX_N_RINGS+6] = Parameters.ExcludePoleAngle;
RTParamArr_h[5+MAX_N_RINGS+7] = Parameters.MinMatchesToAcceptFrac;
for (int cntr=0;cntr<6;cntr++) RTParamArr_h[5+MAX_N_RINGS+8+cntr] = Parameters.ABCABG[cntr];
cudaMalloc((RealType **)&RTParamArr,(19+MAX_N_RINGS)*sizeof(RealType));
cudaMemcpy(RTParamArr,RTParamArr_h,(19+MAX_N_RINGS)*sizeof(RealType),cudaMemcpyHostToDevice);
RealType *OmeBoxArr, OmeBoxArr_h[Parameters.NoOfOmegaRanges * 6];
for (int cntr=0;cntr<Parameters.NoOfOmegaRanges;cntr++){
OmeBoxArr_h[cntr*6 + 0] = Parameters.BoxSizes[cntr][0];
OmeBoxArr_h[cntr*6 + 1] = Parameters.BoxSizes[cntr][1];
OmeBoxArr_h[cntr*6 + 2] = Parameters.BoxSizes[cntr][2];
OmeBoxArr_h[cntr*6 + 3] = Parameters.BoxSizes[cntr][3];
OmeBoxArr_h[cntr*6 + 4] = Parameters.OmegaRanges[cntr][0];
OmeBoxArr_h[cntr*6 + 5] = Parameters.OmegaRanges[cntr][1];
}
cudaMalloc((RealType **)&OmeBoxArr,Parameters.NoOfOmegaRanges * 6 * sizeof(RealType));
cudaMemcpy(OmeBoxArr,OmeBoxArr_h,Parameters.NoOfOmegaRanges * 6 * sizeof(RealType),cudaMemcpyHostToDevice);
int dim = nspids;
dim3 block (256);
dim3 grid ((dim/block.x)+1);
printf("Time elapsed before FriedelFinding: %fs\n",cpuSecond()-iStart);
RealType *ResultArray;
int *nNormals;
cudaMalloc((RealType **)&ResultArray,sizeof(RealType)*nspids*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS);
cudaMalloc((int **)&nNormals,sizeof(int)*nspids);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Finding Friedel Pairs Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
FriedelFinding<<<grid,block>>>(sps, ObsSpotsLab, hkls_d,n_arr,IntParamArr,RTParamArr,ResultArray,nNormals);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
int *data, *nData, *data_h, *nData_h;
fseek(fData,0L,SEEK_END);
long long sizeData = ftell(fData);
rewind(fData);
data_h = (int *)malloc(sizeData);
fread(data_h,sizeData,1,fData);
cudaMalloc((int **)&data,(size_t)sizeData);
cudaMemcpy(data,data_h,sizeData,cudaMemcpyHostToDevice);
free(data_h);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
fseek(fnData,0L,SEEK_END);
long long sizenData = ftell(fnData);
rewind(fnData);
nData_h = (int *)malloc(sizenData);
fread(nData_h,sizenData,1,fnData);
cudaMalloc((int **)&nData,(size_t)sizenData);
cudaMemcpy(nData,nData_h,sizenData,cudaMemcpyHostToDevice);
free(nData_h);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy ndata Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
int *nNormals_h;
nNormals_h = (int *) malloc(sizeof(int) * nspids);
cudaMemcpy(nNormals_h, nNormals, sizeof(int) * nspids, cudaMemcpyDeviceToHost);
RealType *ResultArray_h;
ResultArray_h = (RealType *) malloc(sizeof(RealType)*nspids*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS);
cudaMemcpy(ResultArray_h,ResultArray,sizeof(RealType)*nspids*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS,cudaMemcpyDeviceToHost);
cudaFree(ResultArray);
int sumTotal=0, *startingIDs;
startingIDs = (int *) malloc(sizeof(int) * nspids);
for (int i=0;i<nspids;i++){
startingIDs[i] = sumTotal;
sumTotal += nNormals_h[i];
}
RealType *ResultArr, *ResultArr_h;
int currentpos = 0, outerpos = 0, totalpos = 0;
ResultArr_h = (RealType *) malloc(sizeof(RealType)*N_COLS_FRIEDEL_RESULTS*sumTotal);
for (int i=0;i<nspids;i++){
currentpos = 0;
for (int j=0;j<nNormals_h[i];j++){
memcpy(ResultArr_h + (totalpos * N_COLS_FRIEDEL_RESULTS),
ResultArray_h + (outerpos*MAX_N_FRIEDEL_PAIRS*N_COLS_FRIEDEL_RESULTS + currentpos *N_COLS_FRIEDEL_RESULTS),
sizeof(RealType)*N_COLS_FRIEDEL_RESULTS);
currentpos++;
totalpos++;
}
outerpos++;
}
if (totalpos != sumTotal){
printf("Something wrong.\n");
return 0;
}
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
printf("Time elapsed before making orientations: %fs\n",cpuSecond()-iStart);
dim3 blocka (32);
dim3 grida ((sumTotal/blocka.x)+1);
cudaMalloc((RealType **)&ResultArr,sizeof(RealType)*N_COLS_FRIEDEL_RESULTS*sumTotal);
CHECK(cudaMemcpy(ResultArr, ResultArr_h,sizeof(RealType)*N_COLS_FRIEDEL_RESULTS*sumTotal,cudaMemcpyHostToDevice));
int *ResultMakeOrientations, *ResultMakeOrientations_h;
cudaMalloc((int **)&ResultMakeOrientations,N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int));
cudaMemset(ResultMakeOrientations,0,N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int));
//// Now generate candidates and match
MakeOrientations<<<grida,blocka>>>(ResultArr, HKLints_d, IntParamArr, RTParamArr, ResultMakeOrientations,sumTotal);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
ResultMakeOrientations_h = (int *) malloc(N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int));
cudaMemcpy(ResultMakeOrientations_h,ResultMakeOrientations,N_COLS_ORIENTATION_NUMBERS*sumTotal*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy before data Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
long long int totalJobs = 0;
int maxJobs=0, maxJobsOrient=0;
for (int i=0;i<sumTotal;i++){
totalJobs += ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 2];
if (ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 2] > maxJobs) maxJobs = ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 2];
if (ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 0] > maxJobsOrient) maxJobsOrient = ResultMakeOrientations_h[i*N_COLS_ORIENTATION_NUMBERS + 0];
}
printf("Total Jobs: %lld, MaxJobs for one combination: %d\n",totalJobs,maxJobs);
RealType *AllTheorSpots, *IAs, *IAs_h, *GS, *Orientations, *GS_h, *Orientations_h, *AllInfo;
int *AllGrainSpots,*nSpotsArr,*nMatchedArr,*nMatchedArr_h,*nSpotsArr_h, *SpotsInfoTotal;
cudaMalloc((RealType **)&AllTheorSpots,maxJobsOrient*n_hkls_h*N_COL_THEORSPOTS*2*sizeof(RealType));
cudaMalloc((int **)&AllGrainSpots,maxJobs*n_hkls_h*2*sizeof(int));
cudaMalloc((int **)&nSpotsArr,maxJobsOrient*sizeof(int));
cudaMalloc((RealType **)&IAs,maxJobs*sizeof(RealType));
cudaMalloc((int **)&nMatchedArr,maxJobs*sizeof(int));
cudaMemset(nMatchedArr,0,maxJobs*sizeof(int));
nMatchedArr_h = (int *) malloc(maxJobs*sizeof(int));
nSpotsArr_h = (int *) malloc(maxJobsOrient*sizeof(int));
IAs_h = (RealType *) malloc(maxJobs*sizeof(RealType));
memset(nMatchedArr_h,0,maxJobs*sizeof(int));
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Memcpy ndata Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
RealType bestFraction, tempFraction;
int nJobsOrient, posResultArr, nJobsTotal, n_min, BestPosition;
RealType bestIA, tempIA;
cudaMalloc((RealType **)&GS,3*maxJobs*sizeof(RealType));
cudaMalloc((RealType **)&Orientations,9*maxJobsOrient*sizeof(RealType));
GS_h = (RealType *) malloc(3*maxJobs*sizeof(RealType));
Orientations_h = (RealType *) malloc(9*maxJobsOrient*sizeof(RealType));
AllInfo = (RealType *) malloc(N_COL_GRAINMATCHES*sumTotal*sizeof(RealType));
memset(AllInfo,0,N_COL_GRAINMATCHES*sumTotal*sizeof(RealType));
SpotsInfoTotal = (int *) malloc(sumTotal*n_hkls_h*2*sizeof(int));
memset(SpotsInfoTotal,0,sumTotal*n_hkls_h*2*sizeof(int));
printf("Time elapsed before calculation of matches: %fs\n",cpuSecond()-iStart);
for (int jobNr=0;jobNr<sumTotal;jobNr++){//sumTotal
posResultArr = jobNr;
nJobsOrient = ResultMakeOrientations_h[jobNr*N_COLS_ORIENTATION_NUMBERS + 0];
dim3 blockb (32);
dim3 gridb ((nJobsOrient/blockb.x)+1);
ReturnDiffractionSpots<<<gridb,blockb>>>(RTParamArr,OmeBoxArr,IntParamArr,
AllTheorSpots,hkls_d,n_arr,posResultArr,ResultArr,nJobsOrient,nSpotsArr,
Orientations);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(nSpotsArr_h,nSpotsArr,nJobsOrient*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(Orientations_h,Orientations,nJobsOrient*9*sizeof(RealType),cudaMemcpyDeviceToHost);
nJobsTotal = ResultMakeOrientations_h[jobNr*N_COLS_ORIENTATION_NUMBERS + 2];
dim3 blockc (32);
dim3 gridc ((nJobsTotal/blockc.x)+1);
n_min = -ResultMakeOrientations_h[jobNr*N_COLS_ORIENTATION_NUMBERS + 1];
CompareDiffractionSpots<<<gridc,blockc>>>(AllTheorSpots,RTParamArr,
nJobsTotal, ResultArr, posResultArr, nSpotsArr, data, nData, ObsSpotsLab,
etamargins_d, AllGrainSpots, IAs, n_arr, nMatchedArr, n_min, nJobsOrient,GS);
CHECK(cudaPeekAtLastError());
CHECK(cudaDeviceSynchronize());
cudaMemcpy(nMatchedArr_h,nMatchedArr,nJobsTotal*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(GS_h,GS,nJobsTotal*3*sizeof(RealType),cudaMemcpyDeviceToHost);
cudaMemcpy(IAs_h,IAs,nJobsTotal*sizeof(RealType),cudaMemcpyDeviceToHost);
bestFraction = 0.0;
bestIA = 1000.0;
for (int idx=0;idx<nJobsTotal;idx++){
tempFraction = ((RealType)nMatchedArr_h[idx])/((RealType)nSpotsArr_h[idx%(-2*n_min + 1 )]);
tempIA = IAs_h[idx];
if (tempFraction > bestFraction && tempFraction <= 1 && tempFraction >= 0){
bestIA = tempIA;
bestFraction = tempFraction;
BestPosition = idx;
}else if(tempFraction == bestFraction && tempIA < bestIA){
bestIA = tempIA;
BestPosition = idx;
}
}
if (bestFraction >= Parameters.MinMatchesToAcceptFrac){
cudaMemcpy(SpotsInfoTotal+jobNr*n_hkls_h*2, AllGrainSpots+BestPosition*n_hkls_h*2,nMatchedArr_h[BestPosition]*sizeof(int),cudaMemcpyDeviceToHost);
AllInfo[jobNr*N_COL_GRAINMATCHES + 0] = bestIA;
AllInfo[jobNr*N_COL_GRAINMATCHES + 1] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 0];
AllInfo[jobNr*N_COL_GRAINMATCHES + 2] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 1];
AllInfo[jobNr*N_COL_GRAINMATCHES + 3] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 2];
AllInfo[jobNr*N_COL_GRAINMATCHES + 4] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 3];
AllInfo[jobNr*N_COL_GRAINMATCHES + 5] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 4];
AllInfo[jobNr*N_COL_GRAINMATCHES + 6] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 5];
AllInfo[jobNr*N_COL_GRAINMATCHES + 7] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 6];
AllInfo[jobNr*N_COL_GRAINMATCHES + 8] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 7];
AllInfo[jobNr*N_COL_GRAINMATCHES + 9] = Orientations_h[BestPosition%(-2*n_min+1)*9 + 8];
AllInfo[jobNr*N_COL_GRAINMATCHES + 10] = GS_h[BestPosition*3 + 0];
AllInfo[jobNr*N_COL_GRAINMATCHES + 11] = GS_h[BestPosition*3 + 1];
AllInfo[jobNr*N_COL_GRAINMATCHES + 12] = GS_h[BestPosition*3 + 2];
AllInfo[jobNr*N_COL_GRAINMATCHES + 13] = (RealType)nSpotsArr_h[BestPosition%(-2*n_min+1)];
AllInfo[jobNr*N_COL_GRAINMATCHES + 14] = (RealType)nMatchedArr_h[BestPosition];
AllInfo[jobNr*N_COL_GRAINMATCHES + 15] = bestFraction;
}
}
printf("Time elapsed after calculation of matches: %fs\n",cpuSecond()-iStart);
// Now sort all the results.
RealType *SaveAllInfo;
int *SaveSpotsInfoAll;
SaveAllInfo = (RealType *) malloc(nspids*(N_COL_GRAINMATCHES+1)*sizeof(RealType));
SaveSpotsInfoAll = (int *) malloc(nspids*n_hkls_h*2*sizeof(int));
memset(SaveAllInfo,0,nspids*(N_COL_GRAINMATCHES+1)*sizeof(RealType));
memset(SaveSpotsInfoAll,0,nspids*n_hkls_h*2*sizeof(int));
int StartingPosition, EndPosition, bestPos;
for (int i=0;i<nspids;i++){
StartingPosition = startingIDs[i];
EndPosition = StartingPosition + nNormals_h[i];
bestFraction = 0.0;
bestIA = 1000.0;
bestPos = -1;
for (int PlanePos=StartingPosition; PlanePos<EndPosition; PlanePos++){
tempIA = AllInfo[PlanePos*N_COL_GRAINMATCHES + 0];
tempFraction = AllInfo[PlanePos*N_COL_GRAINMATCHES + 15];
if (tempFraction > bestFraction){
bestFraction = tempFraction;
bestPos = PlanePos;
bestIA = tempIA;
} else if (tempFraction == bestFraction && tempIA < bestIA){
bestIA = tempIA;
bestPos = PlanePos;
}
}
if (bestPos >-1){
SaveAllInfo[i*(N_COL_GRAINMATCHES+1) + 0] = (RealType)SpotIDs_h[i];
memcpy(SaveAllInfo+i*(N_COL_GRAINMATCHES+1) + 1,AllInfo + bestPos*N_COL_GRAINMATCHES, N_COL_GRAINMATCHES);
memcpy(SaveSpotsInfoAll+i*n_hkls_h*2, SpotsInfoTotal + bestPos*n_hkls_h*2, n_hkls_h*2);
}
}
printf("Time elapsed after sorting the results: %fs\n",cpuSecond()-iStart);
char outfnall[MAX_LINE_LENGTH], outfnspots[MAX_LINE_LENGTH];
sprintf(outfnall, "%s/AllInfo.bin",Parameters.OutputFolder);
sprintf(outfnspots, "%s/SpotsInfo.bin",Parameters.OutputFolder);
FILE *fAllInfo = fopen(outfnall,"w"), *fSpotsInfo = fopen(outfnspots,"w");
fwrite(SaveAllInfo,nspids*(N_COL_GRAINMATCHES+1)*sizeof(RealType),1,fAllInfo);
fwrite(SaveSpotsInfoAll,nspids*n_hkls_h*2*sizeof(int),1,fSpotsInfo);
fclose(fAllInfo);
fclose(fSpotsInfo);
free(nMatchedArr_h);
free(nSpotsArr_h);
free(IAs_h);
cudaDeviceSynchronize();
cudaFree(GS);
cudaFree(Orientations);
cudaFree(AllTheorSpots);
cudaFree(AllGrainSpots);
cudaFree(nSpotsArr);
cudaFree(IAs);
cudaFree(nMatchedArr);
cudaFree(data);
cudaFree(nData);
cudaFree(sps);
cudaFree(ObsSpotsLab);
cudaFree(ResultArr);
cudaFree(hkls_d);
cudaFree(HKLints_d);
cudaFree(etamargins_d);
cudaFree(n_arr);
cudaFree(IntParamArr);
cudaFree(RTParamArr);
cudaFree(OmeBoxArr);
cudaDeviceReset();
printf("Time elapsed: %fs\n",cpuSecond()-iStart);
return 0;
}
|
8,370 | #include <stdio.h>
// Theards per block
#ifndef TPB
#define TPB 256
#endif
// Blocks
#ifndef NB
#define NB 1
#endif
__global__ void helloWorldKernel();
int main()
{
// Launch kernel
helloWorldKernel<<<NB, TPB>>>();
// Syncronize
cudaDeviceSynchronize();
return 0;
}
__global__ void helloWorldKernel()
{
const int th_idx = blockIdx.x*blockDim.x + threadIdx.x;
printf("Hello World! My threadId is %d\n", th_idx);
}
|
8,371 | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
///////////////////////////////////////////////////////////////////////////////
// On G80-class hardware 24-bit multiplication takes 4 clocks per warp
// (the same as for floating point multiplication and addition),
// whereas full 32-bit multiplication takes 16 clocks per warp.
// So if integer multiplication operands are guaranteed to fit into 24 bits
// (always lie withtin [-8M, 8M - 1] range in signed case),
// explicit 24-bit multiplication is preferred for performance.
///////////////////////////////////////////////////////////////////////////////
#define IMUL(a, b) __mul24(a, b)
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on GPU
// Parameters restrictions:
// 1) ElementN is strongly preferred to be a multiple of warp size to
// meet alignment constraints of memory coalescing.
// 2) ACCUM_N must be a power of two.
///////////////////////////////////////////////////////////////////////////////
#define ACCUM_N 1024
__global__ void scalarProdGPU(
float *d_C,
float *d_A,
float *d_B,
int vectorN,
int elementN
){
//Accumulators cache
__shared__ float accumResult[ACCUM_N];
////////////////////////////////////////////////////////////////////////////
// Cycle through every pair of vectors,
// taking into account that vector counts can be different
// from total number of thread blocks
////////////////////////////////////////////////////////////////////////////
for(int vec = blockIdx.x; vec < vectorN; vec += gridDim.x){
int vectorBase = IMUL(elementN, vec);
int vectorEnd = vectorBase + elementN;
////////////////////////////////////////////////////////////////////////
// Each accumulator cycles through vectors with
// stride equal to number of total number of accumulators ACCUM_N
// At this stage ACCUM_N is only preferred be a multiple of warp size
// to meet memory coalescing alignment constraints.
////////////////////////////////////////////////////////////////////////
for(int iAccum = threadIdx.x; iAccum < ACCUM_N; iAccum += blockDim.x){
float sum = 0;
for(int pos = vectorBase + iAccum; pos < vectorEnd; pos += ACCUM_N)
sum += d_A[pos] * d_B[pos];
accumResult[iAccum] = sum;
}
////////////////////////////////////////////////////////////////////////
// Perform tree-like reduction of accumulators' results.
// ACCUM_N has to be power of two at this stage
////////////////////////////////////////////////////////////////////////
for(int stride = ACCUM_N / 2; stride > 0; stride >>= 1){
__syncthreads();
for(int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x)
accumResult[iAccum] += accumResult[stride + iAccum];
}
__syncthreads();
if(threadIdx.x == 0) d_C[vec] = accumResult[0];
}
}
|
8,372 | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_k_forward(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *relpos, const float* lookup_table, const float* key_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
if (index_pair[index] == -1){
// Ignore index.
return;
}
int query_idx = index / local_size;
int key_idx = index % local_size;
int batch_idx = index_pair_batch[query_idx];
int key_start_idx = 0;
for (int i = 0; i < batch_idx; i++){
key_start_idx += key_batch_cnt[i];
}
// 1. Obtain key features.
key_start_idx += index_pair[index];
key_features += key_start_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
key_features[0] * lookup_table[0]);
}
void rpe_k_launcher(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *relpos, const float* lookup_table, const float* key_features,
float *output){
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_k_forward<<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim, l,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, relpos, lookup_table, key_features,
output);
}
__global__ void rpe_k_backward(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *relpos, const float* lookup_table, const float* key_features,
float *grad_out, float * grad_lookup_table, float * grad_key_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_key_features: [total_key_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
if (index_pair[index] == -1){
// Ignore index.
return;
}
int query_idx = index / local_size;
int key_idx = index % local_size;
int batch_idx = index_pair_batch[query_idx];
int key_start_idx = 0;
for (int i = 0; i < batch_idx; i++){
key_start_idx += key_batch_cnt[i];
}
// 1. Obtain key features.
key_start_idx += index_pair[index];
key_features += key_start_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_key_features += key_start_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_key_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * key_features[0]);
}
void rpe_k_grad_launcher(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *relpos, const float* lookup_table, const float* key_features,
float *grad_out, float* grad_lookup_table, float* grad_key_features){
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_key_features: [total_key_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_k_backward<<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim, l,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, relpos, lookup_table, key_features,
grad_out, grad_lookup_table, grad_key_features);
}
|
8,373 | #include <iostream>
#include <memory>
__global__ void tensor_1d_assign(float *tensor, size_t tensor_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < tensor_size) {
tensor[i] = i;
}
}
void tensor_1d_test() {
float *device_ptr;
cudaMalloc(&device_ptr, sizeof(float) * 1024 * 1024);
const int block_dim = 256;
const int grid_dim = 1024 * 1024 / 256;
tensor_1d_assign<<<grid_dim, block_dim>>>(device_ptr, 1024 * 1024);
auto host_ptr = std::unique_ptr<float[]>(new float[1024 * 1024]);
cudaMemcpy(host_ptr.get(), device_ptr, sizeof(float) * 1024 * 1024,
cudaMemcpyDeviceToHost);
cudaFree(device_ptr);
bool is_ok = true;
for (auto i = 0; i < 1024 * 1024; ++i) {
if (host_ptr[i] != i) {
is_ok = false;
std::cout << "host_ptr[" << i << "] = " << host_ptr[i] << std::endl;
break;
}
}
if (is_ok) {
std::cout << "ok" << std::endl;
} else {
std::cout << "wrong" << std::endl;
}
}
__global__ void tensor_2d_assign(float *tensor, int width, int height,
size_t pitch) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (!(i < height && j < width)) {
return;
}
float *tensor_row =
reinterpret_cast<float *>(reinterpret_cast<char *>(tensor) + i * pitch);
tensor_row[j] = i * width + j;
}
void tensor_2d_test() {
const int width = 4096;
const int height = 1024;
size_t pitch = 0u;
float *device_ptr;
cudaMallocPitch(&device_ptr, &pitch, width * sizeof(float), height);
dim3 block_dim(16, 16);
dim3 grid_dim(1024 / 16, 4096 / 16);
tensor_2d_assign<<<grid_dim, block_dim>>>(device_ptr, width, height, pitch);
auto host_ptr = std::unique_ptr<float[]>(new float[width * height]);
cudaMemcpy2D(host_ptr.get(), width * sizeof(float), device_ptr, pitch,
width * sizeof(float), height, cudaMemcpyDeviceToHost);
cudaFree(device_ptr);
bool is_ok = true;
auto value = 0;
for (auto i = 0; i < height; ++i) {
for (auto j = 0; j < width; ++j, ++value) {
if (host_ptr[width * i + j] != value) {
std::cout << host_ptr[width * i + j] << std::endl;
break;
}
}
if (!is_ok) {
break;
}
}
if (is_ok) {
std::cout << "ok" << std::endl;
} else {
std::cout << "wrong" << std::endl;
}
}
__global__ void tensor_3d_assign(cudaPitchedPtr pitched_ptr, int depth,
int height, int width) {
const size_t pitch = pitched_ptr.pitch;
const size_t slice_pitch = pitch * height;
int d_idx = blockDim.x * blockIdx.x + threadIdx.x;
int h_idx = blockDim.y * blockIdx.y + threadIdx.y;
int w_idx = blockDim.z * blockIdx.z + threadIdx.z;
char *slice_ptr =
reinterpret_cast<char *>(pitched_ptr.ptr) + d_idx * slice_pitch;
int *row = reinterpret_cast<int *>(slice_ptr + h_idx * pitch);
row[w_idx] = d_idx * height * width + h_idx * width + w_idx;
}
void tensor_3d_test() {
const int depth = 32;
const int height = 64;
const int width = 128;
cudaExtent extent = make_cudaExtent(width * sizeof(int), height, depth);
cudaPitchedPtr pitched_ptr;
cudaMalloc3D(&pitched_ptr, extent);
dim3 block_dim(4, 4, 4);
dim3 grid_dim(depth / 4, height / 4, width / 4);
tensor_3d_assign<<<grid_dim, block_dim>>>(pitched_ptr, depth, height, width);
int host_ptr[32][64][128];
cudaMemcpy3DParms memcpy_params;
memcpy_params.srcPtr = pitched_ptr;
memcpy_params.dstPtr.ptr = host_ptr;
memcpy_params.dstPtr.pitch = width * sizeof(int);
memcpy_params.dstPtr.xsize = width;
memcpy_params.dstPtr.ysize = height;
memcpy_params.kind = cudaMemcpyDeviceToHost;
memcpy_params.extent.depth = depth;
memcpy_params.extent.height = height;
memcpy_params.extent.width = width * sizeof(int);
cudaMemcpy3D(&memcpy_params);
cudaFree(pitched_ptr.ptr);
bool is_ok = true;
int value = 0;
for (int d = 0; d < depth; ++d) {
for (int h = 0; h < height; ++h) {
for (int w = 0; w < width; ++w, ++value) {
if (host_ptr[d][h][w] != value) {
std::cout << "wrong result. host_ptr[" << d << "][" << h << "][" << w
<< "] = " << host_ptr[d][h][w] << ",, value = " << value
<< std::endl;
is_ok = false;
break;
}
}
if (!is_ok)
break;
}
if (!is_ok)
break;
}
if (is_ok) {
std::cout << "ok" << std::endl;
} else {
std::cout << "wrong" << std::endl;
}
}
int main() {
// tensor_1d_test();
// tensor_2d_test();
tensor_3d_test();
return 0;
} |
8,374 |
/* Execution Format : ./<exe> <drug_result_1_dict_compounds.txt> <drug_result_2_dict_compounds.txt> <drug_result_1_dict_proteins.txt> <drug_result_2_dict_proteins.txt> <para.txt> <drug name>
*/
#include <stdio.h>
#include <errno.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/dir.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define INITIAL_SIZE (102397)
#define MULTIPLIER (97)
typedef struct dict *Dict;
/* create a new empty dictionary */
Dict DictCreate(void);
/* destroy a dictionary */
void DictDestroy(Dict);
/* insert a new key-value pair into an existing dictionary */
void DictInsert(Dict, const char *key, int val);
/* return the most recently inserted value associated with a key */
/* or 0 if no matching key is present */
struct elt * DictSearch(Dict, const char *key);
/* delete the most recently inserted record with the given key */
/* if there is no such record, has no effect */
void DictDelete(Dict, const char *key);
// Dictionary Code from : http://www.cs.yale.edu/homes/aspnes/pinewiki/C(2f)HashTables.html?highlight=(CategoryAlgorithmNotes)
//Structure for nodes in dictionary
struct elt {
struct elt *next;
char *key;
int value;
};
//Structure for dictionary
struct dict {
int size; /* size of the pointer table */
int n; /* number of elements stored */
struct elt **table;
};
//Structure for nodes in the CUDA hashtable
typedef struct node {
char key[80];
int index;
struct node *next;
} Node;
/* dictionary initialization code used in both DictCreate and grow */
Dict internalDictCreate(int size)
{
Dict d;
int i;
d = (Dict)malloc(sizeof(*d));
if(d==NULL){
printf("d malloc failed\n");
exit(0);
}
assert(d != 0);
d->size = size;
d->n = 0;
d->table = (elt **)malloc(sizeof(struct elt *) * d->size);
if(d->table==NULL){
printf("d->table malloc failed\n");
exit(0);
}
assert(d->table != 0);
for(i = 0; i < d->size; i++)
d->table[i] = 0;
return d;
}
//Function to create dictionary
Dict DictCreate()
{
return internalDictCreate(INITIAL_SIZE);
}
//Function to free dictionary
void DictDestroy(Dict d)
{
int i;
struct elt *e;
struct elt *next;
for(i = 0; i < d->size; i++) {
for(e = d->table[i]; e != 0; e = next) {
next = e->next;
free(e->key);
//free(e->value);
free(e);
}
}
free(d->table);
free(d);
}
//Function to compute hash value
static unsigned long hash_function(const char *s)
{
unsigned const char *us;
unsigned long h;
h = 0;
for(us = (unsigned const char *) s; *us; us++) {
h = h * MULTIPLIER + *us;
}
return h;
}
/* insert a new key-value pair into an existing dictionary */
void DictInsert(Dict d, const char *key, int val)
{
struct elt *e;
unsigned long h;
assert(key);
assert(val);
e = (elt*)malloc(sizeof(*e));
if(e==NULL){
printf("e malloc failed\n");
exit(0);
}
assert(e);
e->key = strdup(key);
//e->value = (int*)malloc(sizeof(int));
e->value = val;
h = hash_function(key) % d->size;
e->next = d->table[h];
d->table[h] = e;
d->n++;
return;
}
/* return the most recently inserted Node associated with a key */
/* or NULL if no matching key is present */
struct elt * DictSearch(Dict d, const char *key)
{
struct elt *e;
for(e = d->table[hash_function(key) % d->size]; e != 0; e = e->next) {
if(!strcmp(e->key, key)) {
/* got it */
return e;
}
}
return NULL;
}
/* delete the most recently inserted record with the given key */
/* if there is no such record, has no effect */
void DictDelete(Dict d, const char *key)
{
struct elt **prev; /* what to change when elt is deleted */
struct elt *e; /* what to delete */
for(prev = &(d->table[hash_function(key) % d->size]);
*prev != 0;
prev = &((*prev)->next)) {
if(!strcmp((*prev)->key, key)) {
/* got it */
e = *prev;
*prev = e->next;
free(e->key);
//free(e->value);
free(e);
return;
}
}
}
//Function to compute normal distribution of a value, equivalent to Python's CDF.norm from NVIDIA CUDA samples
//http://stackoverflow.com/questions/2328258/cumulative-normal-distribution-function-in-c-c
__device__ float CND(float d)
{
const double A1 = 0.31938153;
const double A2 = -0.356563782;
const double A3 = 1.781477937;
const double A4 = -1.821255978;
const double A5 = 1.330274429;
const double RSQRT2PI = 0.39894228040143267793994605993438;
double
K = 1.0 / (1.0 + 0.2316419 * fabs(d));
double
cnd = RSQRT2PI * exp(- 0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
cnd = 1.0 - cnd;
return cnd;
}
//Function to remove specific characters from input string, used to remove carriage returns
void removeChar(char *str, char garbage) {
char *src, *dst;
for (src = dst = str; *src != '\0'; src++) {
*dst = *src;
if (*dst != garbage) dst++;
}
*dst = '\0';
return;
}
//Function used by qsort to sort the records based on number of tokens
int sort(const void* a, const void* b)
{
char *ia = strdup(*(const char **)a);
char *ib = strdup(*(const char **)b);
char *split1, *saveptr, *saveptr1;
split1 = strtok_r(ia, ";", &saveptr);
split1 = strtok_r(NULL, ";", &saveptr);
int x = atoi(split1);
split1 = strtok_r(ib, ";", &saveptr1);
split1 = strtok_r(NULL, ";", &saveptr1);
return (x-atoi(split1));
}
/*Kernel function performs sampling and Z-score, P-value calculation
It is designed in this way: One block does one sampling and every thread processes one record. In case number of records to be processed exceed 1024, then some threads will take more than one stride. That is, some threads process more than one record during one sampling.
Stages in kernel function are:
1) Build the dictionary 'd_hashtab', for O(1) time lookup of keyword while sampling, first thread will ensure all the keys 'd_r1_dict_keys' are linked in the hashtable.
2) Generate the random numbers and sort them. Every thread will generate a random number and first thread in every block will sort the random numbers using iterative quick sort function. In case number of random numbers required are higher than 1024, then some threads will take more strides to generate the required number of random numbers.
3) Shared memory initialization for sampling. This is required, as during sampling if keyword is found then we increment the count.
4) Sampling, every block performs one sampling. And, every thread will process atleast one record. That is, thread will extract the keywords/tokens in the record and then finds for the keyword in the dictionary, if found then shared memory is incremented.
5) Copy data to global memory from shared memory for Z-score and P-value calculation.
6) Z-score and P-value calculation. In this stage, one thread will compute Z-score and P-value for atleast one record. In case, the number of threads are fewer than number of records, then some threads will take more strides to compute the Z-score and P-value.
Generally, this would never happen, because the number of threads (blocks*threadCount) is always higher than the number of records.
Threads operate on the 'd_r1_dict_value', producing Z-score and P-value for each array of integers.
Note: Shared memory s_random_r1_key array has 2 types of value: random numbers and array for keywords found during sampling.
First part is the random numbers, second part is used for incrementing array of keywords for sampling.
Arguments passed to kernel function:
* d_r1_dict_list - is the list of keywords from dictionary 1, created in the CPU.
* d_r2_str - is the list of records to be used for sampling.
* d_r1_dict_value - Global values for vector produced from sampling.
* sampleTimes - number of samples.
* sampleSize - size of the sample.
* randomRange - maximum value of each random number.
* r1_dict_cnt - number of keywords in dictionary 1.
* d_z_score - array to hold Z-scores.
* d_p_value - array to hold P-values.
* d_r1_dict_keys - keywords of dictionary 1, to populate hashtable in kernel function.
* sampleStrides - maximum number of strides every thread will take for sampling.
* threadCount - number of threads per block.
*/
__global__ void deviceDDI(char * d_r2_str, int * d_r1_dict_value, int sampleTimes, int sampleSize, int randomRange, int r1_dict_cnt, Node *d_r1_dict_keys, int sampleStrides, int threadCount, int samplesCompleted){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j, k, x = 0, ind1, random;
unsigned hashval;
extern __shared__ int s_r1_value_list[];
__shared__ Node *s_hashtab[5003];
//s_random_r1_key array has 2 types of value: random numbers and array for keywords found during sampling.
//First part is the random numbers, second part is used for incrementing array of keywords for sampling.
//Build the dictionary
if(threadIdx.x==0){
//Initialize the hash table
for(j=0;j<5003;j++){
s_hashtab[j] = 0;
}
j=0;
k=0;
//Build the dictionary
/*while(d_r1_dict_list[k]!='^'){
x=0;
hashval = 0;
while(d_r1_dict_list[k]!='~'){
d_r1_dict_keys[j].key[x++] = d_r1_dict_list[k];
hashval = ((int)d_r1_dict_list[k++] + 97*hashval)%5003;
}
d_r1_dict_keys[j].key[x] = '\0';
d_r1_dict_keys[j].index = j;
d_r1_dict_keys[j].next = (s_hashtab[hashval]==0?NULL:s_hashtab[hashval]);
s_hashtab[hashval] = &d_r1_dict_keys[j++];
k++;
}*/
for(j=0;j<r1_dict_cnt;j++){
x=0;
hashval = 0;
while(d_r1_dict_keys[j].key[x]!='\0'){
hashval = ((int)d_r1_dict_keys[j].key[x++] + 97*hashval)%5003;
}
d_r1_dict_keys[j].next = (s_hashtab[hashval]==0?NULL:s_hashtab[hashval]);
s_hashtab[hashval] = &d_r1_dict_keys[j];
}
//Initialize the shared memory
for(j=0;j<(r1_dict_cnt);j++){
s_r1_value_list[j] = 0;
}
}
__syncthreads();
//Generate the random numbers
curandState_t state;
curand_init(clock64(), i, 0, &state);
//Sampling
for(j=0;(j<sampleStrides)&&((threadIdx.x+(j*threadCount))<sampleSize);j++){
char str_split[80];
x=0;
hashval = 0;
random = curand(&state)%randomRange;
for(k=random*1000;k<random*1000+1000;k++){
if(d_r2_str[k] == '^')
break;
if(d_r2_str[k] != '~'){
str_split[x++] = d_r2_str[k];
hashval = ((int)d_r2_str[k] + 97*hashval)%5003;
}
else{
str_split[x] = '\0';
Node *np = s_hashtab[hashval];
while((np!= NULL)&&(np!=0)){
ind1 = 0;
while((np->key[ind1] != '\0')&&(ind1<x)){
if(np->key[ind1] == str_split[ind1])
ind1++;
else
break;
}
if((np->key[ind1] == '\0')&&(ind1==x)){
atomicAdd(&s_r1_value_list[(np->index)],1);
break;
}
if(np->next == NULL||np->next==0)
break;
np = np->next;
}
x=0;
hashval = 0;
}
}
}
__syncthreads();
//Copy to global memory from shared memory
if(threadIdx.x==0){
for(j=0;j<(r1_dict_cnt);j++){
d_r1_dict_value[(j)*(sampleTimes+1)+blockIdx.x+1+samplesCompleted] = s_r1_value_list[j];
}
}
}
__global__ void deviceZP(int * d_r1_dict_value, int sampleTimes,int r1_dict_cnt, float * d_z_score, float * d_p_value){
int x, j, i = blockDim.x * blockIdx.x + threadIdx.x;
float mean =0,sd;
if(i<r1_dict_cnt){
x = 0;
sd = 0;
for(j=1;j<=sampleTimes;j++){
x += d_r1_dict_value[(i*(sampleTimes+1))+j];
}
mean = x/(sampleTimes);
for(j=1;j<=sampleTimes;j++){
sd += (d_r1_dict_value[(i*(sampleTimes+1))+j]-mean)*(d_r1_dict_value[(i*(sampleTimes+1))+j]-mean);
}
sd = sqrt(sd/(sampleTimes));
//if(std != 0)
//Better approach to check if standard deviation is equal to zero or not, because standard deviation is floating point
if(fabs(sd)>pow(10.0,-7))
d_z_score[i] = (d_r1_dict_value[i*(sampleTimes+1)] - mean)/sd;
else{
if(d_r1_dict_value[i*(sampleTimes+1)] != (int)mean)
d_z_score[i] = d_r1_dict_value[i*(sampleTimes+1)]*100;
else
d_z_score[i] = -100;
}
d_p_value[i] = 1-CND(d_z_score[i]);
//printf("Z-score = %f, p-value = %f\n",d_z_score[i],d_p_value[i]);
}
}
//Function to partition records while sorting based on Z-score, called by quickSort
int partition( float a[], int index[], int l, int r) {
int i, j, t;
float temp;
float pivot = a[l];
i = l; j = r+1;
while( 1)
{
do ++i; while( a[i] >= pivot && i <= r );
do --j; while( a[j] < pivot );
if( i >= j ) break;
temp = a[i]; a[i] = a[j]; a[j] = temp;
t = index[i];
index[i] = index[j];
index[j] = t;
}
temp = a[l]; a[l] = a[j]; a[j] = temp;
t = index[l];
index[l] = index[j];
index[j] = t;
return j;
}
//Function to quicksort the records based on Z-score
void quickSort(float a[], int index[], int l, int r)
{
int j;
if( l < r )
{
j = partition( a, index, l, r);
quickSort( a, index, l, j-1);
quickSort( a, index, j+1, r);
}
return;
}
int main(int argc, char *argv[])
{
if(argc!=7){
printf("\nIncorrect arguments passed, Please pass <Compounds with interactions>, <Compounds without interactions>, <Proteins with interactions>, <Proteins without interactions>, <PMID Substances>, <para.txt>, <Drug Name> as arguments\n");
exit(1);
}
FILE *inp_r1, *inp_r2, *inp_para, *op1, *op2;
char rmode[2] = "r";
char str1[10000];
char *split0,*split1, *saveptr, *saveptr1, *saveptr2;
char *inp2_list[100000];
//char * r1_dict_list;
char filename1[100], filename2[100], cutoffstr[20], pvaluestr[20];
size_t len = 0;
Dict d_cinp1;
int cutoff, sampleTimes;
float p_value, elapsedTime, totalTime=0;
//float z_score;
int i=0, j=0, k=0, x;
int r1_cnt, r2_cnt, r1_dict_cnt, threadCount, sampleStrides;
printf("Drug name = %s\n",argv[6]);
printf("Read input files\n");
cudaEvent_t start, stop;
d_cinp1 = DictCreate();
//Read the parameters from para.txt - 4th argument
inp_para = fopen(argv[5],rmode);
if (inp_para == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[5]);
exit(1);
}
while(1)
{
fscanf(inp_para,"%[^\n]%*c", str1);
if(feof(inp_para)) break;
split0 = strtok_r(str1, "\t", &saveptr);
split1 = strtok_r(NULL, "\t", &saveptr);
removeChar(split0,'\r');
removeChar(split1,'\r');
if( strcmp(split0,"sampleTimes") == 0)
{
char temp[20];
strcpy(temp, split1);
sampleTimes = atoi(temp);
}
else if( strcmp(split0,"cutoff") == 0)
{
char temp[20];
strcpy(temp, split1);
strcpy(cutoffstr,temp);
cutoff = atoi(temp);
}
else if( strcmp(split0,"p_value") == 0)
{
char temp[20];
strcpy(temp, split1);
strcpy(pvaluestr,temp);
p_value = atof(temp);
}
/*else if( strcmp(split0,"z_score") == 0)
{
char temp[20];
strcpy(temp, split1);
z_score = atof(temp);
}*/
}
fclose(inp_para);
printf("Number of Samples = %d\n",sampleTimes);
if(sampleTimes <=0){
printf("Incorrect number of samples specified = %d, value of atleast 1 is expected\n", sampleTimes);
exit(0);
}
// Reading the dictionary of compounds of result 1 - 1st argument
// Create and populate dictionary 'd_cinp1' while reading the records
inp_r1 = fopen(argv[1], rmode);
if (inp_r1 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[1]);
exit(1);
}
r1_cnt = 0;
r1_dict_cnt = 0;
while(1){
fscanf(inp_r1, "%[^\n]%*c", str1);
if( feof(inp_r1)) break;
removeChar(str1,'\r');
r1_cnt++;
len = strlen(str1);
for(i=0;(i<len);i++){
char *newstr = (char*)malloc(len+1);
if(newstr==NULL){
printf("malloc to newstr failed\n");
exit(0);
}
j=0;
while(str1[i] != '~'){
newstr[j++] = str1[i++];
}
newstr[j] = '\0';
struct elt * e = DictSearch(d_cinp1,newstr);
if(e!=NULL){
e->value++;
}
else{
DictInsert(d_cinp1,newstr,1);
r1_dict_cnt++;
}
free(newstr);
}
}
fclose(inp_r1);
// Reading the list of result 2- 2nd argument
inp_r2 = fopen(argv[2], rmode);
if (inp_r2 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[2]);
exit(1);
}
r2_cnt = 0;
while (1)
{
fscanf(inp_r2, "%[^\n]%*c", str1);
if( feof(inp_r2)) break;
removeChar(str1,'\r');
inp2_list[r2_cnt] = (char*)malloc(strlen(str1)+1);
if(inp2_list[r2_cnt]==NULL){
printf("malloc to inp2_list[r2_cnt] failed\n");
exit(0);
}
strcpy(inp2_list[r2_cnt++],str1);
}
fclose(inp_r2);
printf("Input files read completed\n");
printf("Sample size = %d\n", r1_cnt);
//Sort inp2_list based on the number of tokens or length
qsort(inp2_list,r2_cnt,sizeof(char *), sort);
printf("Pre-process records for kernel launch\n");
//r1_dict_list = (char*)malloc(80*r1_dict_cnt);
//populate value list for dictionary 1
cudaError_t err = cudaSuccess;
int * r1_dict_value;
//pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&r1_dict_value, sizeof(int)*r1_dict_cnt*(sampleTimes+1));
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate r1_dict_value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Device value list for dictionary 1
int *d_r1_dict_value;
Node * r1_dict_keys = (Node*)malloc(sizeof(Node)*r1_dict_cnt);
if(r1_dict_keys==NULL){
printf("malloc to r1_dict_keys failed\n");
exit(0);
}
j=0;
k=0;
for(i=0;i<d_cinp1->size;i++)
{
if(d_cinp1->table[i]!=0){
while(1)
{
//char * key = strdup(d_cinp1->table[i]->key);
//int subind = 0;
/*for(subind=0;subind<strlen(key);subind++){
r1_dict_list[k++] = key[subind];
}
r1_dict_list[k++] = '~';
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
*/
//for(subind=0;subind<strlen(key);subind++){
//r1_dict_keys[j].key[x++] = key[subind];
//}
strcpy(r1_dict_keys[j].key,d_cinp1->table[i]->key);
r1_dict_keys[j].index = j;
r1_dict_keys[j].next = NULL;
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
j++;
//free(key);
if(d_cinp1->table[i]->next!= NULL)
d_cinp1->table[i] = d_cinp1->table[i]->next;
else
break;
}
}
}
//r1_dict_list[k] = '^';
DictDestroy(d_cinp1);
/*x=0;
while(r1_dict_list[x] != '^'){
while(r1_dict_list[x] != '~')
printf("%c",r1_dict_list[x++]);
printf("\n");
x++;
}*/
//Strip off the number of tokens from every record in list 2.
for(i=0;i<r2_cnt;i++){
split0 = strtok_r(inp2_list[i], ";", &saveptr1);
}
//Process the records for shipping to kernel
char * temp1 = (char*) malloc(1000*r2_cnt*sizeof(char));
if(temp1==NULL){
printf("temp1 malloc failed\n");
exit(0);
}
char * d_r2_str;
j=0;
for(i=0;i<r2_cnt;i++){
for(k=0,x=0;k<1000;k++){
while(x<strlen(inp2_list[i])){
temp1[j++] = inp2_list[i][x++];
k++;
}
temp1[j++] = '^';
}
free(inp2_list[i]);
}
//char* d_r1_dict_list;
//Allocate global memory for dictionary 1 keywords
/*err = cudaMalloc((void **)&d_r1_dict_list,80*sizeof(char)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_list (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_list,r1_dict_list,80*sizeof(char)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_list (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}*/
//Allocate global memory for input list 2 records
err = cudaMalloc((void **)&d_r2_str,1000*sizeof(char)*r2_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for dictionary 1 value list
err = cudaMalloc((void **)&d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value,r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Node * d_r1_dict_keys = NULL;
err = cudaMalloc((void **)&d_r1_dict_keys,sizeof(Node)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Sampling for compounds begin\n");
for(i=0;i<sampleTimes;i=i+256){
threadCount = (r1_cnt>1024)?1024:r1_cnt;
//sampleStrides: maximum number of strides every thread needs to take for sampling
sampleStrides = ceil(r1_cnt/threadCount);
printf("Kernel deviceDDI launched with %d blocks of %d threads each\n", (sampleTimes-i)>256?256:(sampleTimes-i), threadCount);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceDDI<<<(sampleTimes-i)>256?256:(sampleTimes-i), threadCount, (sizeof(int)*(r1_dict_cnt))>>>(d_r2_str, d_r1_dict_value, sampleTimes, r1_cnt, r2_cnt, r1_dict_cnt, d_r1_dict_keys, sampleStrides, threadCount,i);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(r1_dict_value,d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from device to Host(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value, r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from host to device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
printf("Sampling for compounds completed\n");
err = cudaFree(d_r2_str);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r1_dict_keys);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(temp1);
float *d_z_score;
float *z_score_arr;
//Allocate array for Z-score, pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&z_score_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate z-score host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_z_score,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_z_score (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_p_value;
float *p_value_arr;
//Allocate array for P-value, pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&p_value_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate p-value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_p_value,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score and P-value calculation for Compounds begin\n");
printf("Kernel deviceZP launched with %d blocks of %d threads each\n", (int)ceil(r1_dict_cnt/256.0), 256);
totalTime += elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceZP<<<ceil(r1_dict_cnt/256.0), 256>>>(d_r1_dict_value, sampleTimes, r1_dict_cnt, d_z_score, d_p_value);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation completed\n");
err = cudaMemcpy(z_score_arr,d_z_score,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from z-score device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(p_value_arr,d_p_value,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from p-value device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_p_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_z_score);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_z_score (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize the sortedIndex array, as sortedIndex will have the values sorted with quickSort based on descending order of Z-score
//After sorting sortedIndex contains the new index of Z-score.
int sortedIndex[r1_dict_cnt];
for(i=0;i<r1_dict_cnt;i++){
sortedIndex[i] = i;
}
quickSort(z_score_arr, sortedIndex, 0, r1_dict_cnt-1);
//Read PMID list file and populate dictionary d_pmid
/*inp_pmid = fopen(argv[5], rmode);
if (inp_pmid == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[5]);
exit(1);
}
while (1)
{
fscanf(inp_pmid, "%[^\n]%*c", str1);
removeChar(str1,'\r');
if(feof(inp_pmid)) break;
split0 = strtok_r(str1, ";", &saveptr);
split1 = strtok_r(NULL, ";", &saveptr);
DictInsertC(d_drugs_app_wdrwn, split0, split1);
}
fclose(inp_pmid);
*/
printf("Write extracted compounds to output files\n");
//Write to output files
strcpy(filename1, argv[6]);
strcat(filename1, "_temp_result1_Substance_compounds_cutoff_");
strcat(filename1,cutoffstr);
strcat(filename1,"_p_");
strcat(filename1,pvaluestr);
strcat(filename1,".txt");
strcpy(filename2, argv[6]);
strcat(filename2, "_temp_result1_Substance_compounds_cutoff_");
strcat(filename2,cutoffstr);
strcat(filename2,".txt");
op1 = fopen(filename1, "w");
fprintf(op1,"Term Pair\tMeSHID\tDistribution\tZ-Score\tP-value\n");
op2 = fopen(filename2, "w");
fprintf(op2,"Term Pair\tMeSHID\tDistribution\tZ-Score\tP-value\n");
k=0;
for(i=0;i<r1_dict_cnt;i++){
//char key[80];
//j=0;
/*while(r1_dict_list[k] != '~'){
key[j++] = r1_dict_list[k++];
}
key[j] = '\0';
*/
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=p_value)){
fprintf(op1,"%s;%s\t[",argv[6],r1_dict_keys[i].key);
for(j=0;j<sampleTimes;j++)
fprintf(op1,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op1,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=1.0)){
fprintf(op2,"%s;%s\t[",argv[6],r1_dict_keys[i].key);
for(j=0;j<=sampleTimes;j++)
fprintf(op2,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op2,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
k++;
}
fclose(op1);
fclose(op2);
printf("Compounds output files written\n");
//free(r1_dict_list);
free(r1_dict_keys);
err = cudaFreeHost(p_value_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host p_value_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(z_score_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host z_score_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
d_cinp1 = DictCreate();
printf("Processing proteins\n");
printf("Read input files\n");
// Reading the dictionary of proteins of result 1 - 3rd argument
inp_r1 = fopen(argv[3], rmode);
if (inp_r1 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[3]);
exit(1);
}
//r1_cnt = 0;
r1_dict_cnt = 0;
while(1){
fscanf(inp_r1, "%[^\n]%*c", str1);
if( feof(inp_r1)) break;
removeChar(str1,'\r');
//r1_cnt++;
len = strlen(str1);
for(i=0;(i<len);i++){
char *newstr = (char*)malloc(len+1);
if(newstr==NULL){
printf("newstr malloc failed\n");
exit(0);
}
j=0;
while(str1[i] != '~'){
newstr[j++] = str1[i++];
}
newstr[j] = '\0';
struct elt * e = DictSearch(d_cinp1,newstr);
if(e!=NULL){
e->value++;
}
else{
DictInsert(d_cinp1,newstr,1);
r1_dict_cnt++;
}
free(newstr);
}
}
fclose(inp_r1);
// Reading the list of result 2- 4th argument
inp_r2 = fopen(argv[4], rmode);
if (inp_r2 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[4]);
exit(1);
}
r2_cnt = 0;
while (1)
{
fscanf(inp_r2, "%[^\n]%*c", str1);
if( feof(inp_r2)) break;
removeChar(str1,'\r');
inp2_list[r2_cnt] = (char*)malloc(strlen(str1)+1);
if(inp2_list[r2_cnt]==NULL){
printf("inp2_list[r2_cnt] malloc failed\n");
exit(0);
}
strcpy(inp2_list[r2_cnt++],str1);
}
fclose(inp_r2);
printf("Input files read completed\n");
//Sort inp2_list based on the number of tokens
qsort(inp2_list,r2_cnt,sizeof(char *), sort);
printf("Pre-process records for kernel launch\n");
//r1_dict_list = (char*)malloc(80*r1_dict_cnt);
//populate value list for dictionary 1
//int r1_dict_value[r1_dict_cnt*(sampleTimes+1)];
//pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&r1_dict_value, sizeof(int)*r1_dict_cnt*(sampleTimes+1));
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate r1_dict_value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Process the input data for shipping
free(r1_dict_keys);
r1_dict_keys = (Node*)malloc(sizeof(Node)*r1_dict_cnt);
if(r1_dict_keys==NULL){
printf("r1_dict_keys malloc failed\n");
exit(0);
}
j=0;
k=0;
for(i=0;i<d_cinp1->size;i++)
{
if(d_cinp1->table[i]!=0){
while(1)
{
//char * key = strdup(d_cinp1->table[i]->key);
/*ind2_r1_dict_key[2*j] = k;
k += strlen(key);
ind2_r1_dict_key[(2*j)+1] = k;
*/
//int subind = 0;
//for(x=ind2_r1_dict_key[2*j];x<ind2_r1_dict_key[(2*j)+1];x++){
/*for(subind=0;subind<strlen(key);subind++){
r1_dict_list[k++] = key[subind];
}
r1_dict_list[k++] = '~';
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
*/
strcpy(r1_dict_keys[j].key,d_cinp1->table[i]->key);
r1_dict_keys[j].index = j;
r1_dict_keys[j].next = NULL;
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
j++;
//free(key);
if(d_cinp1->table[i]->next!= NULL)
d_cinp1->table[i] = d_cinp1->table[i]->next;
else
break;
}
}
}
//r1_dict_list[k] = '^';
DictDestroy(d_cinp1);
//Strip off the number of tokens from list 2 records
for(i=0;i<r2_cnt;i++){
split0 = strtok_r(inp2_list[i], ";", &saveptr2);
}
temp1 = (char*) malloc(1000*r2_cnt*sizeof(char));
if(temp1 == NULL){
printf("temp1 malloc failed\n");
exit(0);
}
j=0;
for(i=0;i<r2_cnt;i++){
/*ind2_r2_str[2*i] = k;
k += strlen(inp2_list[i]);
ind2_r2_str[(2*i)+1] = k;
*/
//for(j=ind2_r2_str[2*i];j<ind2_r2_str[(2*i)+1];j++){
for(k=0,x=0;k<1000;k++){
while(x<strlen(inp2_list[i])){
temp1[j++] = inp2_list[i][x++];
k++;
}
if(k<1000)
temp1[j++] = '^';
}
free(inp2_list[i]);
}
err = cudaMalloc((void **)&d_r1_dict_keys,sizeof(Node)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for list 2 records
err = cudaMalloc((void **)&d_r2_str,1000*sizeof(char)*r2_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for index of dictionary 1 value list
err = cudaMalloc((void **)&d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value,r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Sampling for proteins begin\n");
for(i=0;i<sampleTimes;i=i+256){
threadCount = (r1_cnt>1024)?1024:r1_cnt;
//sampleStrides: maximum number of strides every thread need to take for sampling
sampleStrides = ceil(r1_cnt/threadCount);
printf("Kernel deviceDDI launched with %d blocks of %d threads each\n", (sampleTimes-i)>256?256:(sampleTimes-i), threadCount);
totalTime += elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceDDI<<<(sampleTimes-i)>256?256:(sampleTimes-i), threadCount, (sizeof(int)*(r1_dict_cnt))>>>( d_r2_str, d_r1_dict_value, sampleTimes, r1_cnt, r2_cnt, r1_dict_cnt, d_r1_dict_keys, sampleStrides, threadCount,i);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(r1_dict_value,d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from device to Host(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value, r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from host to device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
printf("Sampling completed\n");
err = cudaFree(d_r1_dict_keys);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r2_str);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(temp1);
//Allocate Z-score array pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&z_score_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate z-score host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//z_score_arr = (float*)malloc(sizeof(float)*r1_dict_cnt);
float * d_z_score_p;
err = cudaMalloc((void **)&d_z_score_p,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_z_score_p (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate P-value array pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&p_value_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate p-value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_p_value,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r1_dict_keys);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation for proteins begin\n");
printf("Kernel deviceZP launch with %d blocks of %d threads each\n", (int)ceil(r1_dict_cnt/256.0),256);
totalTime += elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceZP<<<ceil(r1_dict_cnt/256.0), 256>>>(d_r1_dict_value, sampleTimes, r1_dict_cnt, d_z_score_p, d_p_value);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation for proteins completed\n");
printf( "\n******** Total Running Time of Kernel = %0.5f seconds ******* \n", (elapsedTime+totalTime)/1000);
printf("Copy output data to host memory\n");
err = cudaMemcpy(p_value_arr,d_p_value,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from p-value device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_p_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//float *d_z_score;
//float *z_score_arr1 = (float*)malloc(sizeof(float)*r1_dict_cnt);;
err = cudaMemcpy(z_score_arr,d_z_score_p,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from z-score device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_z_score_p);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_z_score (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize sortedIndex2, this will hold correct index of the dictionary 1 records after sorting based on descending order of Z-score
int sortedIndex2[r1_dict_cnt];
for(i=0;i<r1_dict_cnt;i++){
sortedIndex2[i] = i;
}
//Sort the array based on descending order of Z-score
quickSort(z_score_arr, sortedIndex2, 0, r1_dict_cnt-1);
//Write to output files
strcpy(filename1, argv[6]);
strcat(filename1, "_temp_result1_Substance_proteins_cutoff_");
strcat(filename1,cutoffstr);
strcat(filename1,"_p_");
strcat(filename1,pvaluestr);
strcpy(filename2, argv[6]);
strcat(filename2, "_temp_result1_Substance_proteins_cutoff_");
strcat(filename2,cutoffstr);
strcat(filename2,".txt");
printf("Write With star output files\n");
op1 = fopen(filename1, "w");
fprintf(op1,"Term Pair\tMeSHID\tDistribution\tZ-Score\tP-value\n");
op2 = fopen(filename2, "w");
fprintf(op2,"Term Pair\tMeSHID\tDistribution\tZ-Score\tP-value\n");
k=0;
for(i=0;i<r1_dict_cnt;i++){
//char key[80];
//j=0;
/*while(r1_dict_list[k] != '~'){
key[j++] = r1_dict_list[k++];
}*/
//key[j] = '\0';
if((r1_dict_value[(sortedIndex2[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex2[i]]<=p_value)){
fprintf(op1,"%s;%s\t[",argv[6],r1_dict_keys[i].key);
for(j=0;j<sampleTimes;j++)
fprintf(op1,"%d, ",r1_dict_value[(sortedIndex2[i]*(sampleTimes+1))+j]);
fprintf(op1,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex2[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex2[i]]);
}
if((r1_dict_value[(sortedIndex2[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex2[i]]<=1.0)){
fprintf(op2,"%s;%s\t[",argv[6],r1_dict_keys[i].key);
for(j=0;j<=sampleTimes;j++)
fprintf(op2,"%d, ",r1_dict_value[(sortedIndex2[i]*(sampleTimes+1))+j]);
fprintf(op2,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex2[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex2[i]]);
}
k++;
}
fclose(op1);
fclose(op2);
printf("Processing completed\n");
//free(r1_dict_list);
free(r1_dict_keys);
//free(temp1);
err = cudaFreeHost(p_value_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host p_value_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(z_score_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host z_score_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
} |
8,375 | #include "includes.h"
__global__ void matrixMul(float* A, float* B, float* C, int width)
{
__shared__ float As[TILE_WIDTH] [TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH] [TILE_WIDTH];
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
float c_val = 0.0f;for(int i = 0; i < width/TILE_WIDTH; i++)
{
As[threadIdx.y][threadIdx.x] = A[row * width + (i * TILE_WIDTH + threadIdx.x)];
Bs[threadIdx.y][threadIdx.x] = B[(i * TILE_WIDTH + threadIdx.y) * width + col ];
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++)
c_val += As[threadIdx.y][k] * Bs[k][threadIdx.x];__syncthreads();
}
C[row * width + col] = c_val;
} |
8,376 | //
// Created by zhangjian on 19-6-20.
//
|
8,377 | //multiplication of two matrices using a kernel with a 2d grid and 2d blocks
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void multiMatrix(int* A, int* B, int*C, int colA, int colB, int rowA){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int sum=0;
//check bounds
if(x < colB && y < rowA){
for(int i = 0; i < colA; i++){
sum += A[y * colA + i] * B[i * colB + x];
}
C[y * colB + x] = sum;
}
}
int main(){
int BLOCK_SIZE = 16;
//rows and columns
int rowA = 15;
int colA = 15;
int rowB = colA;
int colB = 10;
//Declaring host variables
int h_A[colA*rowA], h_B[colB*rowB], h_C[colB*rowA];
//Declaring device variables
int *d_A,*d_B,*d_C;
//Memory allocation of device variables
cudaMalloc((void**)&d_A, (colA*rowA)*sizeof(int));
cudaMalloc((void**)&d_B, (colB*rowB)*sizeof(int));
cudaMalloc((void**)&d_C, (colB*rowA)*sizeof(int));
//initializing host matrices
for(int i = 0; i < (colA*rowA); i++){
h_A[i] = i+1;
}
for(int i = 0; i < (colB*rowB); i++){
h_B[i] = i+1;
}
//Copy Host memory to Device memory
cudaMemcpy(d_A,h_A, (colA*rowA)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B, (colB*rowB)*sizeof(int), cudaMemcpyHostToDevice);
//Declaring our 2D grid with 2D blocks
unsigned int gridRows = (rowA + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridCols = (colB + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(gridCols, gridRows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//Launch our Kernel
multiMatrix<<<dimGrid, dimBlock>>>(d_A,d_B,d_C,colA,colB, rowA);
//copy device results to host
cudaMemcpy(h_C,d_C, (colB*rowA)*sizeof(int), cudaMemcpyDeviceToHost);
//print the results
for(int i = 0; i < (rowA*colB); i++){
printf("%d ", h_C[i]);
if(((i+1) % colB) == 0)
printf("\n");
}
// free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
8,378 | #include "includes.h"
__global__ void sobelFilterShared2(unsigned char *data, unsigned char *result, int width, int height){
// Data cache: threadIdx.x , threadIdx.y
int ty = threadIdx.y;
int tx = threadIdx.x;
// shared memory represented here by 1D array
// each thread loads two values from global memory into shared mem
const int n = Mask_size / 2;
__shared__ int s_data[BLOCKSIZE * (BLOCKSIZE + Mask_size * 2)];
// global mem address of the current thread in the whole grid
const int pos = tx + blockIdx.x * blockDim.x + ty * width + blockIdx.y * blockDim.y * width;
// load cache (32x32 shared memory, 16x16 threads blocks)
// each threads loads four values from global memory into shared mem
// if in image area, get value in global mem, else 0
int y; // image based coordinate
// original image based coordinate
const int y0 = ty + blockIdx.y * blockDim.y;
const int shift = ty * (BLOCKSIZE);
// case1: upper left
y = y0 - n;
if ( y < 0 )
s_data[tx + shift] = 0;
else
s_data[tx + shift] = data[ pos - (width * n)];
// case2: lower
y = y0 - n;
const int shift1 = shift + blockDim.y * BLOCKSIZE;
if ( y > height - 1)
s_data[tx + shift1] = 0;
else
s_data[tx + shift1] = data[pos + (width * n)];
__syncthreads();
// convolution
int sum = 0;
for (int i = 0; i <= n*2; i++)
sum += s_data[tx + (ty+i) * BLOCKSIZE] * Global_Mask[i];
result[pos] = sum;
} |
8,379 | #include <stdio.h>
int main(int argc, char* argv[]) {
int i, count;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
for (i = 0; i < count; i++) {
cudaGetDeviceProperties(&prop, i);
printf("Device name: %s\n", prop.name);
}
return 0;
}
|
8,380 |
// calculate the field in each cell
__global__ void calc_field(int ncells, float dt, float *Jx, float *Ecx){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid<ncells) {
Ecx[tid] = Ecx[tid] - Jx[tid]*float(dt);
tid += blockDim.x * gridDim.x;
}
}
|
8,381 | /*
Hello world of wave propagation in CUDA. FDTD acoustic wave propagation in homogeneous medium. Second order accurate in time and eigth in space.
Oleg Ovcharenko
Vladimir Kazei, 2019
oleg.ovcharenko@kaust.edu.sa
vladimir.kazei@kaust.edu.sa
*/
#include "stdio.h"
#include "math.h"
#include "stdlib.h"
#include "string.h"
/*
Add this to c_cpp_properties.json if linting isn't working for CUDA libraries
"includePath": [
"/usr/local/cuda-10.0/targets/x86_64-linux/include",
"${workspaceFolder}/**"
],
*/
#include "cuda.h"
#include "cuda_runtime.h"
// Check error codes for CUDA functions
#define CHECK(call) \
{ \
cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
} \
}
#define PI 3.14159265359
// Padding for FD scheme
#define HALO 4
#define HALO2 8
// FD stencil coefficients
#define a0 -2.8472222f
#define a1 1.6000000f
#define a2 -0.2000000f
#define a3 0.0253968f
#define a4 -0.0017857f
// Block dimensions
#define BDIMX 32
#define BDIMY 32
// Shared memory tile dimenstions
#define SDIMX BDIMX + HALO2
#define SDIMY BDIMY + HALO2
// Constant device memory
__constant__ float c_coef[5]; /* coefficients for 8th order fd */
__constant__ int c_isrc; /* source location, ox */
__constant__ int c_jsrc; /* source location, oz */
__constant__ int c_nx; /* x dim */
__constant__ int c_ny; /* y dim */
__constant__ int c_nt; /* time steps */
__constant__ float c_dt2dx2; /* dt2 / dx2 for fd*/
// Save snapshot as a binary, filename snap/snap_tag_it_ny_nx
void saveSnapshotIstep(int it, float *data, int nx, int ny, const char *tag)
{
/*
it :timestep id
data :pointer to an array in device memory
nx, ny :model dimensions
tag :user-defined file identifier
*/
// Array to store wavefield
unsigned int isize = nx * ny * sizeof(float);
float *iwave = (float *)malloc(isize);
CHECK(cudaMemcpy(iwave, data, isize, cudaMemcpyDeviceToHost));
char fname[32];
sprintf(fname, "snap/snap_%s_%i_%i_%i", tag, it, ny, nx);
FILE *fp_snap = fopen(fname, "w");
fwrite(iwave, sizeof(float), nx * ny, fp_snap);
printf("\tSave...%s: nx = %i ny = %i it = %i tag = %s\n", fname, nx, ny, it, tag);
fflush(stdout);
fclose(fp_snap);
free(iwave);
return;
}
// Add source wavelet
__global__ void kernel_add_wavelet(float *d_u, float *d_wavelet, int it)
{
/*
d_u :pointer to an array on device where to add source term
d_wavelet :pointer to an array on device with source signature
it :time step id
*/
unsigned int gx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int gy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = gy * c_nx + gx;
if ((gx == c_isrc) && (gy == c_jsrc))
{
d_u[idx] += d_wavelet[it];
}
}
__device__ void set_halo(float *global, float shared[][SDIMX], int tx, int ty, int sx, int sy, int gx, int gy, int nx, int ny)
{
/*
global :pointer to an array in global memory (gmem)
shared :2D array in shared device memory
tx, ty :thread id's in a block
sx, sy :thread id's in a shared memory tile
gx, gy :thread id's in the entire computational domain
*/
// Each thread copies one value from gmem into smem
shared[sy][sx] = global[gy * nx + gx];
// Populate halo regions in smem for left, right, top and bottom boundaries of a block
// if thread near LEFT border of a block
if (tx < HALO)
{
// if global left
if (gx < HALO)
{
// reflective boundary
shared[sy][sx - HALO] = 0.0;
}
else
{
// if block left
shared[sy][sx - HALO] = global[gy * nx + gx - HALO];
}
}
// if thread near RIGHT border of a block
if ((tx >= (BDIMX - HALO)) || ((gx + HALO) >= nx))
{
// if global right
if ((gx + HALO) >= nx)
{
// reflective boundary
shared[sy][sx + HALO] = 0.0;
}
else
{
// if block right
shared[sy][sx + HALO] = global[gy * nx + gx + HALO];
}
}
// if thread near BOTTOM border of a block
if (ty < HALO)
{
// if global bottom
if (gy < HALO)
{
// reflective boundary
shared[sy - HALO][sx] = 0.0;
}
else
{
// if block bottom
shared[sy - HALO][sx] = global[(gy - HALO) * nx + gx];
}
}
// if thread near TOP border of a block
if ((ty >= (BDIMY - HALO)) || ((gy + HALO) >= ny))
{
// if global top
if ((gy + HALO) >= ny)
{
// reflective boundary
shared[sy + HALO][sx] = 0.0;
}
else
{
// if block top
shared[sy + HALO][sx] = global[(gy + HALO) * nx + gx];
}
}
}
// FD kernel
__global__ void kernel_2dfd(float *d_u1, float *d_u2, float *d_vp)
{
// save model dims in registers as they are much faster
const int nx = c_nx;
const int ny = c_ny;
// FD coefficient dt2 / dx2
const float dt2dx2 = c_dt2dx2;
// Thread address (ty, tx) in a block
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// Thread address (sy, sx) in shared memory
const unsigned int sx = threadIdx.x + HALO;
const unsigned int sy = threadIdx.y + HALO;
// Thread address (gy, gx) in global memory
const unsigned int gx = blockIdx.x * blockDim.x + tx;
const unsigned int gy = blockIdx.y * blockDim.y + ty;
// Global linear index
const unsigned int idx = gy * nx + gx;
// Allocate shared memory for a block (smem)
__shared__ float s_u1[SDIMY][SDIMX];
__shared__ float s_u2[SDIMY][SDIMX];
__shared__ float s_vp[SDIMY][SDIMX];
// If thread points into the physical domain
if ((gx < nx) && (gy < ny))
{
// Copy regions from gmem into smem
// gmem, smem, block, shared, global, dims
set_halo(d_u1, s_u1, tx, ty, sx, sy, gx, gy, nx, ny);
set_halo(d_u2, s_u2, tx, ty, sx, sy, gx, gy, nx, ny);
set_halo(d_vp, s_vp, tx, ty, sx, sy, gx, gy, nx, ny);
__syncthreads();
// Central point of fd stencil, o o o o x o o o o
float du2_xx = c_coef[0] * s_u2[sy][sx];
float du2_yy = c_coef[0] * s_u2[sy][sx];
#pragma unroll
for (int d = 1; d <= 4; d++)
{
du2_xx += c_coef[d] * (s_u2[sy][sx - d] + s_u2[sy][sx + d]);
du2_yy += c_coef[d] * (s_u2[sy - d][sx] + s_u2[sy + d][sx]);
}
// Second order wave equation
d_u1[idx] = 2.0 * s_u2[sy][sx] - s_u1[sy][sx] + s_vp[sy][sx] * s_vp[sy][sx] * (du2_xx + du2_yy) * dt2dx2;
__syncthreads();
}
}
/*
===================================================================================
MAIN
===================================================================================
*/
int main(int argc, char *argv[])
{
// Model dimensions
int nx = 512; /* x dim */
int ny = 512; /* z dim */
size_t nxy = nx * ny;
size_t nbytes = nxy * sizeof(float);/* bytes to store nx * ny */
float dx = 10.0; /* grid step, assume dy = dx, m */
// Allocate memory for velocity model
float _vp = 3300.0; /* p-wave velocity, m/s */
float *h_vp;
h_vp = (float *)malloc(nbytes);
for (int i = 0; i < nxy; i++)
{
h_vp[i] = _vp; /* assume homogeneous velocity model */
}
printf("MODEL:\n");
printf("\t%i x %i\t:ny x nx\n", ny, nx);
printf("\t%f\t:dx\n", dx);
printf("\t%f\t:h_vp[0]\n", h_vp[0]);
// Time stepping
float t_total = 1.5; /* total time of wave propagation, sec */
float dt = 0.5 * dx / _vp; /* time step assuming constant vp, sec */
int nt = round(t_total / dt); /* number of time steps */
int snap_step = round(0.1 * nt); /* save snapshot every ... steps */
printf("TIME STEPPING:\n");
printf("\t%e\t:t_total\n", t_total);
printf("\t%e\t:dt\n", dt);
printf("\t%i\t:nt\n", nt);
// Source
float f0 = 10.0; /* source dominant frequency, Hz */
float t0 = 1.2 / f0; /* source padding to move wavelet from left of zero */
int isrc = round((float)nx / 2); /* source location, ox */
int jsrc = round((float)ny / 2); /* source location, oz */
float *h_wavelet, *h_time;
float tbytes = nt * sizeof(float);
h_time = (float *)malloc(tbytes);
h_wavelet = (float *)malloc(tbytes);
// Fill source waveform vecror
float a = PI * PI * f0 * f0; /* const for wavelet */
float dt2dx2 = (dt * dt) / (dx * dx); /* const for fd stencil */
for (int it = 0; it < nt; it++)
{
h_time[it] = it * dt;
// Ricker wavelet (Mexican hat), second derivative of Gaussian
h_wavelet[it] = 1e10 * (1.0 - 2.0 * a * pow(h_time[it] - t0, 2)) * exp(-a * pow(h_time[it] - t0, 2));
h_wavelet[it] *= dt2dx2;
}
printf("SOURCE:\n");
printf("\t%f\t:f0\n", f0);
printf("\t%f\t:t0\n", t0);
printf("\t%i\t:isrc - ox\n", isrc);
printf("\t%i\t:jsrc - oy\n", jsrc);
printf("\t%e\t:dt2dx2\n", dt2dx2);
printf("\t%f\t:min wavelength [m]\n",(float)_vp / (2*f0));
printf("\t%f\t:ppw\n",(float)_vp / (2*f0) / dx);
// Allocate memory on device
printf("Allocate and copy memory on the device...\n");
float *d_u1, *d_u2, *d_vp, *d_wavelet;
CHECK(cudaMalloc((void **)&d_u1, nbytes)) /* wavefield at t-2 */
CHECK(cudaMalloc((void **)&d_u2, nbytes)) /* wavefield at t-1 */
CHECK(cudaMalloc((void **)&d_vp, nbytes)) /* velocity model */
CHECK(cudaMalloc((void **)&d_wavelet, tbytes)); /* source term for each time step */
// Fill allocated memory with a value
CHECK(cudaMemset(d_u1, 0, nbytes))
CHECK(cudaMemset(d_u2, 0, nbytes))
// Copy arrays from host to device
CHECK(cudaMemcpy(d_vp, h_vp, nbytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_wavelet, h_wavelet, tbytes, cudaMemcpyHostToDevice));
// Copy constants to device constant memory
float coef[] = {a0, a1, a2, a3, a4};
CHECK(cudaMemcpyToSymbol(c_coef, coef, 5 * sizeof(float)));
CHECK(cudaMemcpyToSymbol(c_isrc, &isrc, sizeof(int)));
CHECK(cudaMemcpyToSymbol(c_jsrc, &jsrc, sizeof(int)));
CHECK(cudaMemcpyToSymbol(c_nx, &nx, sizeof(int)));
CHECK(cudaMemcpyToSymbol(c_ny, &ny, sizeof(int)));
CHECK(cudaMemcpyToSymbol(c_nt, &nt, sizeof(int)));
CHECK(cudaMemcpyToSymbol(c_dt2dx2, &dt2dx2, sizeof(float)));
printf("\t%f MB\n", (4 * nbytes + tbytes)/1024/1024);
printf("OK\n");
// Print out specs of the main GPU
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, 0));
printf("GPU0:\t%s\t%d.%d:\n", deviceProp.name, deviceProp.major, deviceProp.minor);
printf("\t%lu GB:\t total Global memory (gmem)\n", deviceProp.totalGlobalMem / 1024 / 1024 / 1000);
printf("\t%lu MB:\t total Constant memory (cmem)\n", deviceProp.totalConstMem / 1024);
printf("\t%lu MB:\t total Shared memory per block (smem)\n", deviceProp.sharedMemPerBlock / 1024);
printf("\t%d:\t total threads per block\n", deviceProp.maxThreadsPerBlock);
printf("\t%d:\t total registers per block\n", deviceProp.regsPerBlock);
printf("\t%d:\t warp size\n", deviceProp.warpSize);
printf("\t%d x %d x %d:\t max dims of block\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("\t%d x %d x %d:\t max dims of grid\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
CHECK(cudaSetDevice(0));
// Print out CUDA domain partitioning info
printf("CUDA:\n");
printf("\t%i x %i\t:block dim\n", BDIMY, BDIMX);
printf("\t%i x %i\t:shared dim\n", SDIMY, SDIMX);
printf("CFL:\n");
printf("\t%f\n", _vp * dt / dx);
// Setup CUDA run
dim3 block(BDIMX, BDIMY);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// MAIN LOOP
printf("Time loop...\n");
for (int it = 0; it < nt; it++)
{
// These kernels are in the same stream so they will be executed one by one
kernel_add_wavelet<<<grid, block>>>(d_u2, d_wavelet, it);
kernel_2dfd<<<grid, block>>>(d_u1, d_u2, d_vp);
CHECK(cudaDeviceSynchronize());
// Exchange time steps
float *d_u3 = d_u1;
d_u1 = d_u2;
d_u2 = d_u3;
// Save snapshot every snap_step iterations
if ((it % snap_step == 0))
{
printf("%i/%i\n", it+1, nt);
saveSnapshotIstep(it, d_u3, nx, ny,"u3");
}
}
printf("OK\n");
CHECK(cudaGetLastError());
printf("Clean memory...");
delete[] h_vp;
delete[] h_time;
delete[] h_wavelet;
CHECK(cudaFree(d_u1));
CHECK(cudaFree(d_u2));
CHECK(cudaFree(d_vp));
CHECK(cudaFree(d_wavelet));
printf("OK\n");
CHECK(cudaDeviceReset());
return 0;
}
|
8,382 | // the cpu variant
void inclusive_scan(float* output, float* input, int length) {
if (length <= 0) return;
output[0] = input[0];
for (int i = 1; i < length; ++i) {
output[i] = output[i - 1] + input[i];
}
}
void exclusive_scan(float* output, float* input, int length) {
if (length <= 0) return;
output[0] = 0;
for (int i = 1; i < length; ++i) {
output[i] = output[i - 1] + input[i - 1];
}
} |
8,383 | #include <stdio.h>
#include "helpers.cuh"
__device__
int match_direct(char a, char b) {
if (a == b) {
return ALIGN_GAIN;
}
else {
return MISALIGN_PENALTY;
}
}
int match_direct_host(char a, char b) {
if (a == b) {
return ALIGN_GAIN;
}
else {
return MISALIGN_PENALTY;
}
}
__constant__ int BLOSUM_50[26 * 26] =
/*A */ {5, -2, -1, -2, -1, -3, 0, -2, -1, 0, -1, -2, -1, -1, 0, -1, -1, -2, 1, 0, 0, 0, -3, -1, -2, -1,
/*B */ -2, 5, -3, 5, 1, -4, -1, 0, -4, 0, 0, -4, -3, 4, 0, -2, 0, -1, 0, 0, 0, -4, -5, -1, -3, 2,
/*C */ -1, -3, 13, -4, -3, -2, -3, -3, -2, 0, -3, -2, -2, -2, 0, -4, -3, -4, -1, -1, 0, -1, -5, -2, -3, -3,
/*D */ -2, 5, -4, 8, 2, -5, -1, -1, -4, 0, -1, -4, -4, 2, 0, -1, 0, -2, 0, -1, 0, -4, -5, -1, -3, 1,
/*E */ -1, 1, -3, 2, 6, -3, -3, 0, -4, 0, 1, -3, -2, 0, 0, -1, 2, 0, -1, -1, 0, -3, -3, -1, -2, 5,
/*F */ -3, -4, -2, -5, -3, 8, -4, -1, 0, 0, -4, 1, 0, -4, 0, -4, -4, -3, -3, -2, 0, -1, 1, -2, 4, -4,
/*G */ 0, -1, -3, -1, -3, -4, 8, -2, -4, 0, -2, -4, -3, 0, 0, -2, -2, -3, 0, -2, 0, -4, -3, -2, -3, -2,
/*H */ -2, 0, -3, -1, 0, -1, -2, 10, -4, 0, 0, -3, -1, 1, 0, -2, 1, 0, -1, -2, 0, -4, -3, -1, 2, 0,
/*I */ -1, -4, -2, -4, -4, 0, -4, -4, 5, 0, -3, 2, 2, -3, 0, -3, -3, -4, -3, -1, 0, 4, -3, -1, -1, -3,
/**J*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
/*K */ -1, 0, -3, -1, 1, -4, -2, 0, -3, 0, 6, -3, -2, 0, 0, -1, 2, 3, 0, -1, 0, -3, -3, -1, -2, 1,
/*L */ -2, -4, -2, -4, -3, 1, -4, -3, 2, 0, -3, 5, 3, -4, 0, -4, -2, -3, -3, -1, 0, 1, -2, -1, -1, -3,
/*M */ -1, -3, -2, -4, -2, 0, -3, -1, 2, 0, -2, 3, 7, -2, 0, -3, 0, -2, -2, -1, 0, 1, -1, -1, 0, -1,
/*N */ -1, 4, -2, 2, 0, -4, 0, 1, -3, 0, 0, -4, -2, 7, 0, -2, 0, -1, 1, 0, 0, -3, -4, -1, -2, 0,
/**O*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
/*P */ -1, -2, -4, -1, -1, -4, -2, -2, -3, 0, -1, -4, -3, -2, 0, 10, -1, -3, -1, -1, 0, -3, -4, -2, -3, -1,
/*Q */ -1, 0, -3, 0, 2, -4, -2, 1, -3, 0, 2, -2, 0, 0, 0, -1, 7, 1, 0, -1, 0, -3, -1, -1, -1, 4,
/*R */ -2, -1, -4, -2, 0, -3, -3, 0, -4, 0, 3, -3, -2, -1, 0, -3, 1, 7, -1, -1, 0, -3, -3, -1, -1, 0,
/*S */ 1, 0, -1, 0, -1, -3, 0, -1, -3, 0, 0, -3, -2, 1, 0, -1, 0, -1, 5, 2, 0, -2, -4, -1, -2, 0,
/*T */ 0, 0, -1, -1, -1, -2, -2, -2, -1, 0, -1, -1, -1, 0, 0, -1, -1, -1, 2, 5, 0, 0, -3, 0, -2, -1,
/**U*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
/*V */ 0, -4, -1, -4, -3, -1, -4, -4, 4, 0, -3, 1, 1, -3, 0, -3, -3, -3, -2, 0, 0, 5, -3, -1, -1, -3,
/*W */ -3, -5, -5, -5, -3, 1, -3, -3, -3, 0, -3, -2, -1, -4, 0, -4, -1, -3, -4, -3, 0, -3, 15, -3, 2, -2,
/*X */ -1, -1, -2, -1, -1, -2, -2, -1, -1, 0, -1, -1, -1, -1, 0, -2, -1, -1, -1, 0, 0, -1, -3, -1, -1, -1,
/*Y */ -2, -3, -3, -3, -2, 4, -3, 2, -1, 0, -2, -1, 0, -2, 0, -3, -1, -1, -2, -2, 0, -1, 2, -1, 8, -2,
/*Z */ -1, 2, -3, 1, 5, -4, -2, 0, -3, 0, 1, -3, -1, 0, 0, -1, 4, 0, 0, -1, 0, -3, -2, -1, -2, 5};
int BLOSUM_50_direct[26 * 26] =
/*A */ {5, -2, -1, -2, -1, -3, 0, -2, -1, 0, -1, -2, -1, -1, 0, -1, -1, -2, 1, 0, 0, 0, -3, -1, -2, -1,
/*B */ -2, 5, -3, 5, 1, -4, -1, 0, -4, 0, 0, -4, -3, 4, 0, -2, 0, -1, 0, 0, 0, -4, -5, -1, -3, 2,
/*C */ -1, -3, 13, -4, -3, -2, -3, -3, -2, 0, -3, -2, -2, -2, 0, -4, -3, -4, -1, -1, 0, -1, -5, -2, -3, -3,
/*D */ -2, 5, -4, 8, 2, -5, -1, -1, -4, 0, -1, -4, -4, 2, 0, -1, 0, -2, 0, -1, 0, -4, -5, -1, -3, 1,
/*E */ -1, 1, -3, 2, 6, -3, -3, 0, -4, 0, 1, -3, -2, 0, 0, -1, 2, 0, -1, -1, 0, -3, -3, -1, -2, 5,
/*F */ -3, -4, -2, -5, -3, 8, -4, -1, 0, 0, -4, 1, 0, -4, 0, -4, -4, -3, -3, -2, 0, -1, 1, -2, 4, -4,
/*G */ 0, -1, -3, -1, -3, -4, 8, -2, -4, 0, -2, -4, -3, 0, 0, -2, -2, -3, 0, -2, 0, -4, -3, -2, -3, -2,
/*H */ -2, 0, -3, -1, 0, -1, -2, 10, -4, 0, 0, -3, -1, 1, 0, -2, 1, 0, -1, -2, 0, -4, -3, -1, 2, 0,
/*I */ -1, -4, -2, -4, -4, 0, -4, -4, 5, 0, -3, 2, 2, -3, 0, -3, -3, -4, -3, -1, 0, 4, -3, -1, -1, -3,
/**J*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
/*K */ -1, 0, -3, -1, 1, -4, -2, 0, -3, 0, 6, -3, -2, 0, 0, -1, 2, 3, 0, -1, 0, -3, -3, -1, -2, 1,
/*L */ -2, -4, -2, -4, -3, 1, -4, -3, 2, 0, -3, 5, 3, -4, 0, -4, -2, -3, -3, -1, 0, 1, -2, -1, -1, -3,
/*M */ -1, -3, -2, -4, -2, 0, -3, -1, 2, 0, -2, 3, 7, -2, 0, -3, 0, -2, -2, -1, 0, 1, -1, -1, 0, -1,
/*N */ -1, 4, -2, 2, 0, -4, 0, 1, -3, 0, 0, -4, -2, 7, 0, -2, 0, -1, 1, 0, 0, -3, -4, -1, -2, 0,
/**O*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
/*P */ -1, -2, -4, -1, -1, -4, -2, -2, -3, 0, -1, -4, -3, -2, 0, 10, -1, -3, -1, -1, 0, -3, -4, -2, -3, -1,
/*Q */ -1, 0, -3, 0, 2, -4, -2, 1, -3, 0, 2, -2, 0, 0, 0, -1, 7, 1, 0, -1, 0, -3, -1, -1, -1, 4,
/*R */ -2, -1, -4, -2, 0, -3, -3, 0, -4, 0, 3, -3, -2, -1, 0, -3, 1, 7, -1, -1, 0, -3, -3, -1, -1, 0,
/*S */ 1, 0, -1, 0, -1, -3, 0, -1, -3, 0, 0, -3, -2, 1, 0, -1, 0, -1, 5, 2, 0, -2, -4, -1, -2, 0,
/*T */ 0, 0, -1, -1, -1, -2, -2, -2, -1, 0, -1, -1, -1, 0, 0, -1, -1, -1, 2, 5, 0, 0, -3, 0, -2, -1,
/**U*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
/*V */ 0, -4, -1, -4, -3, -1, -4, -4, 4, 0, -3, 1, 1, -3, 0, -3, -3, -3, -2, 0, 0, 5, -3, -1, -1, -3,
/*W */ -3, -5, -5, -5, -3, 1, -3, -3, -3, 0, -3, -2, -1, -4, 0, -4, -1, -3, -4, -3, 0, -3, 15, -3, 2, -2,
/*X */ -1, -1, -2, -1, -1, -2, -2, -1, -1, 0, -1, -1, -1, -1, 0, -2, -1, -1, -1, 0, 0, -1, -3, -1, -1, -1,
/*Y */ -2, -3, -3, -3, -2, 4, -3, 2, -1, 0, -2, -1, 0, -2, 0, -3, -1, -1, -2, -2, 0, -1, 2, -1, 8, -2,
/*Z */ -1, 2, -3, 1, 5, -4, -2, 0, -3, 0, 1, -3, -1, 0, 0, -1, 4, 0, 0, -1, 0, -3, -2, -1, -2, 5};
__device__
int match_blosum(char a, char b) {
return BLOSUM_50[(a-65)*26 + (b-65)];
}
int match_blosum_host(char a, char b) {
return BLOSUM_50_direct[(a-65)*26 + (b-65)];
}
__device__
AlignedPair backtrace(const char *seq1, unsigned long len1, const char *seq2, unsigned long len2,
CellDecision *decisions, BestCell bestCell, bool globalAlign) {
if (globalAlign) {
bestCell.i = len1;
bestCell.j = len2;
}
// Find path ending at best cell
Direction* path = (Direction*) malloc((len1+len2+1) * sizeof(Direction));
path[0] = decisions[(bestCell.i)*(len2+1) + bestCell.j].direction;
int pathLen = 0;
while (path[pathLen] != Nil) {
if (path[pathLen] == Diagonal) {
bestCell.i--;
bestCell.j--;
}
else if (path[pathLen] == Left) {
bestCell.j--;
}
else if (path[pathLen] == Above) {
bestCell.i--;
}
path[++pathLen] = decisions[(bestCell.i)*(len2+1) + bestCell.j].direction;
}
char* aligned1 = (char*) malloc(sizeof(char) * (pathLen + 1));
char* aligned2 = (char*) malloc(sizeof(char) * (pathLen + 1));
int p = 0;
// Align fragments
for (int pathPos = pathLen-1; pathPos >= 0; pathPos--, p++) {
if (path[pathPos] == Diagonal) {
aligned1[p] = seq1[bestCell.i++];
aligned2[p] = seq2[bestCell.j++];
} else if (path[pathPos] == Left) {
aligned1[p] = '-';
aligned2[p] = seq2[bestCell.j++];
} else if (path[pathPos] == Above) {
aligned1[p] = seq1[bestCell.i++];
aligned2[p] = '-';
}
}
aligned1[p] = '\0';
aligned2[p] = '\0';
free(path);
return (AlignedPair) {aligned1, aligned2, pathLen};
}
__global__
void backtraceRunner(const char *seq1, unsigned long len1, const char *seq2, unsigned long len2,
CellDecision *decisions, BestCell bestCell, bool globalAlign, AlignedPair* alignedPair) {
AlignedPair result = backtrace(seq1, len1, seq2, len2, decisions, bestCell, globalAlign);
memcpy(alignedPair->seq1, result.seq1, sizeof(char)*(result.len+1));
memcpy(alignedPair->seq2, result.seq2, sizeof(char)*(result.len+1));
alignedPair->len = result.len;
free(result.seq1);
free(result.seq2);
}
__device__
AlignedPair backtrace_gotoh(const char *seq1, unsigned long len1, const char *seq2, unsigned long len2,
CellDecision *decisions, GapDecision *vertical, GapDecision *horizontal,
BestCell bestCell, bool globalAlign, bool forceBottomVerticalGap, bool forceBottomHorizontalGap) {
if (globalAlign) {
bestCell.i = len1;
bestCell.j = len2;
}
// Find path ending at best cell
Direction* path = (Direction*) malloc((len1+len2+1) * sizeof(Direction));
path[0] = forceBottomVerticalGap ? Above : decisions[(bestCell.i)*(len2+1) + bestCell.j].direction;
path[0] = forceBottomHorizontalGap ? Left : path[0];
int pathLen = 0;
while (path[pathLen] != Nil) {
if (path[pathLen] == Diagonal) {
bestCell.i--;
bestCell.j--;
pathLen++;
}
else if (path[pathLen] == Left) {
do {
bestCell.j--;
pathLen++;
// The last path[pathLen] will be overwritten when the loop finishes
path[pathLen] = Left;
} while (bestCell.j > 0 && horizontal[(bestCell.i)*(len2+1) + bestCell.j + 1].gap != GapStart);
}
else if (path[pathLen] == Above) {
do {
bestCell.i--;
pathLen++;
// The last path[pathLen] will be overwritten when the loop finishes
path[pathLen] = Above;
} while (bestCell.i > 0 && vertical[(bestCell.i + 1)*(len2+1) + bestCell.j].gap != GapStart);
}
path[pathLen] = decisions[(bestCell.i)*(len2+1) + bestCell.j].direction;
}
char* aligned1 = (char*) malloc(sizeof(char) * (pathLen + 1));
char* aligned2 = (char*) malloc(sizeof(char) * (pathLen + 1));
int p = 0;
// Align fragments
for (int pathPos = pathLen-1; pathPos >= 0; pathPos--, p++) {
if (path[pathPos] == Diagonal) {
aligned1[p] = seq1[bestCell.i++];
aligned2[p] = seq2[bestCell.j++];
} else if (path[pathPos] == Left) {
aligned1[p] = '-';
aligned2[p] = seq2[bestCell.j++];
} else if (path[pathPos] == Above) {
aligned1[p] = seq1[bestCell.i++];
aligned2[p] = '-';
}
}
aligned1[p] = '\0';
aligned2[p] = '\0';
free(path);
return (AlignedPair) {aligned1, aligned2, pathLen};
}
__global__
void backtraceGotohRunner(const char *seq1, unsigned long len1, const char *seq2, unsigned long len2,
CellDecision *decisions, GapDecision *vertical, GapDecision *horizontal,
BestCell bestCell, bool globalAlign, bool forceBottomVerticalGap, bool forceBottomHorizontalGap,
AlignedPair* alignedPair) {
AlignedPair result = backtrace_gotoh(seq1, len1, seq2, len2,
decisions, vertical, horizontal,
bestCell, globalAlign, forceBottomVerticalGap, forceBottomHorizontalGap
);
memcpy(alignedPair->seq1, result.seq1, sizeof(char)*(result.len+1));
memcpy(alignedPair->seq2, result.seq2, sizeof(char)*(result.len+1));
alignedPair->len = result.len;
free(result.seq1);
free(result.seq2);
}
__global__
void printSeqs(char *d_seq1, unsigned long len1, char *d_seq2, unsigned long len2) {
printf("Solving ");
for (int i = 0; i < len1; i++)
printf("%c", d_seq1[i]);
printf(" ");
for (int i = 0; i < len2; i++)
printf("%c", d_seq2[i]);
printf("\n");
}
int score_aligned_pair(char* seq1, char* seq2) {
int score = 0;
while (*seq1) {
if (*seq1 == '-' || *seq2 == '-') {
score += GAP_PENALTY;
}
else {
score += match_host(*seq1, *seq2);
}
seq1++; seq2++;
}
return score;
}
int score_gotoh(char* seq1, char* seq2) {
int score = 0;
bool above_gap = false;
bool left_gap = false;
while (*seq1) {
if (*seq1 == '-') {
above_gap = false;
if (left_gap)
score += GAP_EXTEND;
else {
left_gap = true;
score += GAP_START;
}
}
else if (*seq2 == '-') {
left_gap = false;
if (above_gap)
score += GAP_EXTEND;
else {
above_gap = true;
score += GAP_START;
}
}
else {
above_gap = false;
left_gap = false;
score += match_host(*seq1, *seq2);
}
seq1++; seq2++;
}
return score;
}
|
8,384 | #include "includes.h"
__global__ void ResetHeap_kernel(int *mplHeap, int *mplHeapPtr, int numBlock)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= numBlock)
return;
if (index == 0)
mplHeapPtr[0] = numBlock - 1;
mplHeap[index] = numBlock - index - 1;
} |
8,385 | /*
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <device_functions.h>
#include <string.h>
__global__ void whatever(char *d_a,char *d_res,int len)
{
int i=0;
int flag=1;
int in=blockIdx.x*blockDim.x+threadIdx.x;
if(d_a[in]==' ')
d_res[in]='f';
else if((d_a[in]>=65 && d_a[in]<=90)||(d_a[in]>=97 && d_a[in]<=122))
d_res[in]='f';
else
{
for(i=in-1;i>=0 && d_a[i]!=' ';i--)
{
if((d_a[i]>=65 && d_a[i]<=90)||(d_a[i]>=97 && d_a[i]<=122))
{
flag=0;
break;
}
}
if(flag==0)
d_res[in]='f';
else
d_res[in]='t';
}
}
int main()
{
int i;
char h_a[1000];
char h_res[1000];
char *d_a;
char *d_res;
printf("Enter the string: \n");
gets(h_a);
//puts(h_a);
int len=strlen(h_a);
int size=sizeof(char)*len;
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_res,size);
cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice);
whatever<<<1,len>>>(d_a,d_res,len);
cudaMemcpy(h_res,d_res,size,cudaMemcpyDeviceToHost);
printf("OUTPUT :\n");
for(i=0;i<len;i++)
printf("%c, ",h_res[i]);
printf("\n\n");
cudaFree(d_a);
cudaFree(d_res);
}
*/ |
8,386 | #include "includes.h"
__global__ void copyp2p( int4* __restrict__ dest, int4 const* __restrict__ src, size_t num_elems)
{
size_t globalId = blockIdx.x * blockDim.x + threadIdx.x;
size_t gridSize = blockDim.x * gridDim.x;
#pragma unroll(5)
for (size_t i=globalId; i < num_elems; i+= gridSize)
{
dest[i] = src[i];
}
} |
8,387 | // kernel
__global__ void kernel_vec_add(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
extern "C"
void ivk_krnl_vec_add(const float *a, const float *b, float *c, int n)
{
dim3 block_size = dim3(256, 1, 1);
dim3 grid_size = dim3((n + block_size.x - 1) / block_size.x, 1, 1);
kernel_vec_add<<<grid_size, block_size>>>(a, b, c, n);
}
|
8,388 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <string.h>
#include <math.h>
#define N 8 //Filas
#define M 8 //Columnas
__global__ void multMatrices(float *c, float *a, float *b){ //Kernel, salto a la GPU. Esta funcion es ejecutada por todos los hilos al mismo tiempo.
int ix = (blockIdx.y*blockDim.y+threadIdx.y)*N+(blockIdx.x*blockDim.x+threadIdx.x);
if(ix<N*M) {
int adder = 0;
for(int i=0;i<N;++i)
adder+= (float) a[(blockIdx.y*blockDim.y+threadIdx.y)*N+(i)]*b[(i)*N+(blockIdx.x*blockDim.x+threadIdx.x)];
c[ix]=adder;
}
}
int main() {
int memsize = sizeof(float )*N*M;
float *h_a,*h_b,*h_c; //Arrays en el host (CPU & RAM)
h_a=(float *)malloc(memsize);
h_b=(float *)malloc(memsize);
h_c=(float *)malloc(memsize);
for(int i=0; i<N*M; ++i)
h_a[i]=h_b[i]=h_c[i]=(float) 1.0f;
float *d_a,*d_b,*d_c; //Arrays en la GPU
cudaMalloc(&d_a, memsize);
cudaMalloc(&d_b, memsize);
cudaMalloc(&d_c, memsize);
cudaMemcpy(d_a, h_a, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, memsize, cudaMemcpyHostToDevice);
dim3 block(4,4);
dim3 thread(2,2);
printf("El numero de bloques es %d, y el numero de hilos es %d\n", block.x, thread.x);
multMatrices <<<block,thread>>> (d_c, d_a, d_b);//El multiplicar ambos numeros tiene que darme N
cudaMemcpy(h_c, d_c, memsize, cudaMemcpyDeviceToHost);
printf("Resultado multiplicacion de matrices: \n");
for(int i=0; i<N*M; ++i){
printf("%f, ", h_c[i]);
if(i!=0 && i%N==(N-1))
printf("\n");
}
printf("\n");
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
8,389 | #include <cstdio>
#include <cuda.h>
#include <string>
#include <ctime>
#include <chrono>
#include <queue>
#include <vector>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
// #include "graph.h"
// #include "bfsCPU.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#define DeviceNum 1
struct Graph {
std::vector<int> adjacencyList; // all edges
std::vector<int> edgesOffset; // offset to adjacencyList for every vertex
std::vector<int> edgesSize; //number of edges for every vertex
int numVertices = 0;
int numEdges = 0;
};
Graph G;
int getDev(int which)
{
return which / (G.numVertices / DeviceNum);
}
void readGraph(Graph &G, int argc, char **argv);
void readGraphFromFile(Graph &G, int argc, char **argv);
void bfsCPU(int start, Graph &G, std::vector<int> &distance,
std::vector<int> &parent, std::vector<bool> &visited);
inline __device__
int* getLevel(int which, int **d_distance, int nodenum, int devicenum)
{
// printf("get level: %d %d\n", devicenum, which - ((nodenum / DeviceNum) *devicenum ));
return &(d_distance[devicenum][which - ((nodenum / DeviceNum) *devicenum)]);
}
// void *args[] = {&begin, &end, &G.numVertices, &level, &d_adjacencyList[i], &d_edgesOffset[i], &d_edgesSize[i], &d_distance[i], &d_parent[i],
// &changed};
// __global__
// void multiBfs(int begin, int end, int nodenum, int level, int deviceid, int *d_adjacencyList, int* d_edgesOffset,
// int *d_edgesSize, int **d_distance, int **d_parent, int *changed) {
// int threadid = blockIdx.x * blockDim.x + threadIdx.x;
// int valueChange = 0;
// int u = threadid + begin;
// // int u = threadid;
// //printf("blockid=%d blockdim=%d threadid=%d dev=%d, u=%d\n",blockIdx.x, blockDim.x, threadIdx.x,deviceid, u);
// if(u == 2)
// {
// //*changed = 1;
// printf("blockid=%d blockdim=%d threadid=%d dev=%d, u=%d \n",blockIdx.x, blockDim.x, threadIdx.x, deviceid, u);
// printf("inside u==2 d_edgesize=%d\n", d_edgesSize[u]);
// for(int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; ++i)
// {
// int v = d_adjacencyList[i];
// printf("from %d to %d on dev %d dis=%d end=%d %d \n",u, v, deviceid, d_distance[0][u], end, d_distance[0][0]);
// }
// }
// int mylevel = -1;
// if(u < end){
// mylevel = *getLevel(u, d_distance, nodenum, deviceid);
// //if(u == 0)
// printf("u=%d mylevel=%d\n",u, mylevel);
// }
// if(u < end && mylevel == level)
// {
// printf("u=%d\n", u);
// for(int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; ++i)
// {
// int v = d_adjacencyList[i];
// if(level + 1 < *getLevel(v, d_distance, nodenum, deviceid))
// {
// printf("v=%d\n", v);
// *getLevel(v, d_distance, nodenum, deviceid) = level + 1;
// *getLevel(v, d_parent, nodenum, deviceid) = i;
// valueChange = 1;
// }
// }
// }
// if(valueChange){
// *changed = valueChange;
// }
// }
__global__
void multiBfs(int begin, int end, int nodenum, int level, int deviceid, int *d_adjacencyList, int* d_edgesOffset,
int *d_edgesSize, int **d_distance, int **d_parent, int *changed) {
int threadid = blockIdx.x * blockDim.x + threadIdx.x;
int valueChange = 0;
int u = threadid + begin;
// int u = threadid;
//printf("blockid=%d blockdim=%d threadid=%d dev=%d, u=%d\n",blockIdx.x, blockDim.x, threadIdx.x,deviceid, u);
int mylevel = -1;
if(u < end){
mylevel = *getLevel(u, d_distance, nodenum, deviceid);
//printf("u=%d mylevel=%d\n",u, mylevel);
}
if(u < end && mylevel == level)
{
for(int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; ++i)
{
int v = d_adjacencyList[i];
if(level + 1 < *getLevel(v, d_distance, nodenum, deviceid))
{
*getLevel(v, d_distance, nodenum, deviceid) = level + 1;
*getLevel(v, d_parent, nodenum, deviceid) = i;
valueChange = 1;
}
}
}
if(valueChange){
*changed = valueChange;
}
}
__global__
void queueBfs(int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent,
int queueSize, int *nextQueueSize, int *d_currentQueue, int *d_nextQueue, int part, int deviceid, int numVertices) {
int thid = blockIdx.x * blockDim.x + threadIdx.x;
if (thid < queueSize) {
int u = d_currentQueue[thid];
// printf("dev %d searching %d\n",deviceid, u);
for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) {
int v = d_adjacencyList[i];
// printf("out v=%d dev=%d size=%d dis=%d\n", v, deviceid, d_edgesSize[u], d_distance[v]);
if (d_distance[v] == INT_MAX && atomicMin(&d_distance[v], level + 1) == INT_MAX) {
d_parent[v] = i;
int which = (v / part);
int position = atomicAdd(&nextQueueSize[which], 1);
d_nextQueue[position + which * numVertices] = v;
// printf("new v=%d dev=%d size=%d dis=%d pos=%d which=%d part=%d\n",
// v, deviceid, d_edgesSize[u], d_distance[v], position, which, part);
}
}
}
// __syncthreads();
// if(thid == 0)
// {
// for(int i = 0; i < 8; ++i)
// {
// printf("%d ", d_nextQueue[i]);
// }
// printf("dev=%d\n", deviceid);
// }
}
// __global__
// void queueBfs(int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent,
// int queueSize, int *nextQueueSize, int *d_currentQueue, int *d_nextQueue, int part, int deviceid, int numVertices) {
// int thid = blockIdx.x * blockDim.x + threadIdx.x;
// int remote = (deviceid + 1) % 2;
// if (thid < queueSize) {
// int u = d_currentQueue[thid];
// printf("searching %d\n", u);
// for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) {
// int v = d_adjacencyList[i];
// printf("out v=%d dev=%d size=%d dis=%d\n", v, deviceid, d_edgesSize[u], d_distance[v]);
// if (d_distance[v] == INT_MAX && atomicMin(&d_distance[v], level + 1) == INT_MAX) {
// d_parent[v] = i;
// int which = (v / part);
// int position = atomicAdd(&nextQueueSize[which], 1);
// d_nextQueue[position + which * numVertices] = v;
// printf("new v=%d dev=%d size=%d dis=%d pos=%d which=%d part=%d\n", v, deviceid, d_edgesSize[u], d_distance[v], position, which, part);
// }
// }
// }
// }
__global__
void queueBfsSingle(int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent,
int queueSize, int *nextQueueSize, int *d_currentQueue, int *d_nextQueue) {
int thid = blockIdx.x * blockDim.x + threadIdx.x;
if (thid < queueSize) {
int u = d_currentQueue[thid];
for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) {
int v = d_adjacencyList[i];
if (d_distance[v] == INT_MAX && atomicMin(&d_distance[v], level + 1) == INT_MAX) {
d_parent[v] = i;
int position = atomicAdd(nextQueueSize, 1);
d_nextQueue[position] = v;
}
}
}
}
void runCpu(int startVertex, Graph &G, std::vector<int> &distance,
std::vector<int> &parent, std::vector<bool> &visited) {
printf("Starting sequential bfs.\n");
auto start = std::chrono::steady_clock::now();
bfsCPU(startVertex, G, distance, parent, visited);
auto end = std::chrono::steady_clock::now();
long duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
printf("Elapsed time in milliseconds : %li ms.\n\n", duration);
}
void checkError(CUresult error, std::string msg) {
if (error != CUDA_SUCCESS) {
printf("%s: %d\n", msg.c_str(), error);
exit(1);
}
}
// CUdevice cuDevice;
// CUcontext cuContext;
// CUmodule cuModule;
// CUdevice cuDevice2[DeviceNum];
// CUcontext cuContext2[DeviceNum];
// CUmodule cuModule2[DeviceNum];
// CUfunction cuSimpleBfs;
// // CUfunction multiBfs[DeviceNum];
// CUfunction cuQueueBfs;
// CUfunction cuNextLayer;
// CUfunction cuCountDegrees;
// CUfunction cuScanDegrees;
// CUfunction cuAssignVerticesNextQueue;
// CUdeviceptr d_adjacencyList;
// CUdeviceptr d_edgesOffset;
// CUdeviceptr d_edgesSize;
// CUdeviceptr d_distance;
// CUdeviceptr d_parent;
// CUdeviceptr d_currentQueue;
// CUdeviceptr d_nextQueue;
// CUdeviceptr d_degrees;
int *incrDegrees;
__managed__ int* d_adjacencyList2[DeviceNum];
__managed__ int* d_edgesOffset2[DeviceNum];
__managed__ int* d_edgesSize2[DeviceNum];
__managed__ int* d_distance2[DeviceNum];
__managed__ int* d_parent2[DeviceNum];
__managed__ int* d_currentQueue2[DeviceNum];
// __managed__ int* d_nextQueue2[DeviceNum];
__managed__ int* d_nextQueue2[DeviceNum];
__managed__ int* d_degrees2[DeviceNum];
__managed__ int *incrDegrees2[DeviceNum];
__managed__ int* localQueue[DeviceNum][DeviceNum];
__managed__ int* remoteQueue[DeviceNum][DeviceNum];
__managed__ int nextQueueSize[DeviceNum][DeviceNum];
__managed__ int queueSize[DeviceNum];
// void initCuda(Graph &G) {
// //initialize CUDA
// cuInit(0);
// checkError(cuDeviceGet(&cuDevice, 0), "cannot get device 0");
// checkError(cuCtxCreate(&cuContext, 0, cuDevice), "cannot create context");
// checkError(cuModuleLoad(&cuModule, "bfsCUDA.ptx"), "cannot load module");
// checkError(cuModuleGetFunction(&cuSimpleBfs, cuModule, "simpleBfs"), "cannot get kernel handle");
// checkError(cuModuleGetFunction(&cuQueueBfs, cuModule, "queueBfs"), "cannot get kernel handle");
// checkError(cuModuleGetFunction(&cuNextLayer, cuModule, "nextLayer"), "cannot get kernel handle");
// checkError(cuModuleGetFunction(&cuCountDegrees, cuModule, "countDegrees"), "cannot get kernel handle");
// checkError(cuModuleGetFunction(&cuScanDegrees, cuModule, "scanDegrees"), "cannot get kernel handle");
// checkError(cuModuleGetFunction(&cuAssignVerticesNextQueue, cuModule, "assignVerticesNextQueue"),
// "cannot get kernel handle");
// //copy memory to device
// checkError(cuMemAlloc(&d_adjacencyList, G.numEdges * sizeof(int)), "cannot allocate d_adjacencyList");
// checkError(cuMemAlloc(&d_edgesOffset, G.numVertices * sizeof(int)), "cannot allocate d_edgesOffset");
// checkError(cuMemAlloc(&d_edgesSize, G.numVertices * sizeof(int)), "cannot allocate d_edgesSize");
// checkError(cuMemAlloc(&d_distance, G.numVertices * sizeof(int)), "cannot allocate d_distance");
// checkError(cuMemAlloc(&d_parent, G.numVertices * sizeof(int)), "cannot allocate d_parent");
// checkError(cuMemAlloc(&d_currentQueue, G.numVertices * sizeof(int)), "cannot allocate d_currentQueue");
// checkError(cuMemAlloc(&d_nextQueue, G.numVertices * sizeof(int)), "cannot allocate d_nextQueue");
// checkError(cuMemAlloc(&d_degrees, G.numVertices * sizeof(int)), "cannot allocate d_degrees");
// checkError(cuMemAllocHost((void **) &incrDegrees, sizeof(int) * G.numVertices), "cannot allocate memory");
// checkError(cuMemcpyHtoD(d_adjacencyList, G.adjacencyList.data(), G.numEdges * sizeof(int)),
// "cannot copy to d_adjacencyList");
// checkError(cuMemcpyHtoD(d_edgesOffset, G.edgesOffset.data(), G.numVertices * sizeof(int)),
// "cannot copy to d_edgesOffset");
// checkError(cuMemcpyHtoD(d_edgesSize, G.edgesSize.data(), G.numVertices * sizeof(int)),
// "cannot copy to d_edgesSize");
// }
void initCuda2(Graph &G) {
//initialize CUDA
// cuInit(0);
// int i = 0;
// // for(int i = 0 ; i < DeviceNum; ++i)
// {
// checkError(cuDeviceGet(&cuDevice2[i], i), "cannot get device 0");
// checkError(cuCtxCreate(&cuContext2[i], 0, cuDevice), "cannot create context");
// checkError(cuModuleLoad(&cuModule2[i], "bfsCUDA.ptx"), "cannot load module");
// // checkError(cuModuleGetFunction(&cuSimpleBfs, cuModule2[i], "simpleBfs"), "cannot get kernel handle");
// checkError(cuModuleGetFunction(&multiBfs[i], cuModule2[i], "multiBfs"), "cannot get multi kernel handle");
// // checkError(cuModuleGetFunction(&cuQueueBfs, cuModule2[i], "queueBfs"), "cannot get kernel handle");
// // checkError(cuModuleGetFunction(&cuNextLayer, cuModule2[i], "nextLayer"), "cannot get kernel handle");
// // checkError(cuModuleGetFunction(&cuCountDegrees, cuModule2[i], "countDegrees"), "cannot get kernel handle");
// // checkError(cuModuleGetFunction(&cuScanDegrees, cuModule2[i], "scanDegrees"), "cannot get kernel handle");
// // checkError(cuModuleGetFunction(&cuAssignVerticesNextQueue, cuModule2[i], "assignVerticesNextQueue"),
// // "cannot get kernel handle");
// }
//copy memory to device
printf("Enabling peer access between GPU%d and GPU%d...\n", 0, 1);
(cudaSetDevice(0));
(cudaDeviceEnablePeerAccess(1, 0));
(cudaSetDevice(1));
(cudaDeviceEnablePeerAccess(0, 0));
for(int i =0; i < DeviceNum; ++i)
{
cudaSetDevice(i);
(cudaMalloc(&d_adjacencyList2[i], G.numEdges * sizeof(int)), "cannot allocate d_adjacencyList2");
(cudaMalloc(&d_edgesOffset2[i], G.numVertices * sizeof(int)), "cannot allocate d_edgesOffset2");
(cudaMalloc(&d_edgesSize2[i], G.numVertices * sizeof(int)), "cannot allocate d_edgesSize2");
(cudaMalloc(&d_distance2[i], G.numVertices * sizeof(int)), "cannot allocate d_distance2");
(cudaMalloc(&d_parent2[i], G.numVertices * sizeof(int)), "cannot allocate d_parent2");
(cudaMalloc(&d_currentQueue2[i], G.numVertices * sizeof(int)), "cannot allocate d_currentQueue2");
(cudaMalloc(&d_degrees2[i], G.numVertices * sizeof(int)), "cannot allocate d_degrees2");
(cudaMallocHost((void **) &incrDegrees2[i], sizeof(int) * G.numVertices), "cannot allocate memory");
(cudaMalloc(&d_nextQueue2[i], 2 * G.numVertices * sizeof(int)), "cannot allocate d_nextQueue2");
(cudaMemcpy(d_adjacencyList2[i], G.adjacencyList.data(), G.numEdges * sizeof(int), cudaMemcpyHostToDevice),
"cannot copy to d_adjacencyList2");
(cudaMemcpy(d_edgesOffset2[i], G.edgesOffset.data(), G.numVertices * sizeof(int), cudaMemcpyHostToDevice),
"cannot copy to d_edgesOffset2");
(cudaMemcpy(d_edgesSize2[i], G.edgesSize.data(), G.numVertices * sizeof(int), cudaMemcpyHostToDevice),
"cannot copy to d_edgesSize2");
//(cudaMalloc(&localQueue[i], DeviceNum * sizeof(int*)), "cannot allocate localqueue");
//(cudaMalloc(&remoteQueue[i], DeviceNum * sizeof(int*)), "cannot allocate localqueue");
}
}
// void finalizeCuda() {
// //free memory
// checkError(cuMemFree(d_adjacencyList), "cannot free memory for d_adjacencyList");
// checkError(cuMemFree(d_edgesOffset), "cannot free memory for d_edgesOffset");
// checkError(cuMemFree(d_edgesSize), "cannot free memory for d_edgesSize");
// checkError(cuMemFree(d_distance), "cannot free memory for d_distance");
// checkError(cuMemFree(d_parent), "cannot free memory for d_parent");
// checkError(cuMemFree(d_currentQueue), "cannot free memory for d_parent");
// checkError(cuMemFree(d_nextQueue), "cannot free memory for d_parent");
// checkError(cuMemFreeHost(incrDegrees), "cannot free memory for incrDegrees");
// }
void checkOutput(std::vector<int> &distance, std::vector<int> &expectedDistance, Graph &G) {
for (int i = 0; i < G.numVertices; i++) {
if (distance[i] != expectedDistance[i]) {
printf("%d %d %d\n", i, distance[i], expectedDistance[i]);
printf("Wrong output!\n");
exit(1);
}
}
printf("Output OK!\n\n");
}
// void initializeCudaBfs(int startVertex, std::vector<int> &distance, std::vector<int> &parent, Graph &G) {
// //initialize values
// std::fill(distance.begin(), distance.end(), std::numeric_limits<int>::max());
// std::fill(parent.begin(), parent.end(), std::numeric_limits<int>::max());
// distance[startVertex] = 0;
// parent[startVertex] = 0;
// checkError(cuMemcpyHtoD(d_distance, distance.data(), G.numVertices * sizeof(int)),
// "cannot copy to d)distance");
// checkError(cuMemcpyHtoD(d_parent, parent.data(), G.numVertices * sizeof(int)),
// "cannot copy to d_parent");
// int firstElementQueue = startVertex;
// cuMemcpyHtoD(d_currentQueue, &firstElementQueue, sizeof(int));
// }
void initializeCudaBfs2(int startVertex, std::vector<int> &distance, std::vector<int> &parent, Graph &G) {
//initialize values
std::fill(distance.begin(), distance.end(), std::numeric_limits<int>::max());
std::fill(parent.begin(), parent.end(), std::numeric_limits<int>::max());
distance[startVertex] = 0;
parent[startVertex] = 0;
for(int i = 0; i < DeviceNum; ++i)
{
cudaSetDevice(i);
(cudaMemcpy(d_distance2[i], distance.data(), G.numVertices * sizeof(int), cudaMemcpyHostToDevice),
"cannot copy to d)distance multi");
(cudaMemcpy(d_parent2[i], parent.data(), G.numVertices * sizeof(int), cudaMemcpyHostToDevice),
"cannot copy to d_parent multi");
if(getDev(startVertex) == i){
int firstElementQueue = startVertex;
cudaMemcpy(d_currentQueue2[i], &firstElementQueue, sizeof(int), cudaMemcpyHostToDevice);
}
}
}
void finalizeCudaBfs(std::vector<int> &distance, std::vector<int> &parent, Graph &G) {
//copy memory from device
std::vector<int> v0(distance);
std::vector<int> v1(distance);
cudaMemcpy(v0.data(), d_distance2[0], G.numVertices * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(v1.data(), d_distance2[1], G.numVertices * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < distance.size(); ++i)
{
if(v0[i] < v1[i])
{
distance[i] = v0[i];
}
else
distance[i] = v1[i];
}
// cudaMemcpy(parent.data(), d_parent2[0], G.numVertices * sizeof(int), cudaMemcpyDeviceToHost);
}
// void runCudaSimpleBfs(int startVertex, Graph &G, std::vector<int> &distance,
// std::vector<int> &parent) {
// initializeCudaBfs(startVertex, distance, parent, G);
// int *changed;
// checkError(cuMemAllocHost((void **) &changed, sizeof(int)), "cannot allocate changed");
// //launch kernel
// printf("Starting simple parallel bfs.\n");
// auto start = std::chrono::steady_clock::now();
// *changed = 1;
// int level = 0;
// while (*changed) {
// *changed = 0;
// void *args[] = {&G.numVertices, &level, &d_adjacencyList, &d_edgesOffset, &d_edgesSize, &d_distance, &d_parent,
// &changed};
// checkError(cuLaunchKernel(cuSimpleBfs, G.numVertices / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0),
// "cannot run kernel simpleBfs");
// cuCtxSynchronize();
// level++;
// }
// auto end = std::chrono::steady_clock::now();
// long duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// printf("Elapsed time in milliseconds : %li ms.\n", duration);
// finalizeCudaBfs(distance, parent, G);
// }
void runCudaSimpleBfsMulti(int startVertex, Graph &G, std::vector<int> &distance,
std::vector<int> &parent) {
initializeCudaBfs2(startVertex, distance, parent, G);
int *changed;
// (cudaMallocHost((void **) &changed, sizeof(int)), "cannot allocate changed");
cudaMallocManaged(&changed, 1 * sizeof(int));
//launch kernel
printf("Starting simple parallel bfs.\n");
size_t temp = 1;
cudaDeviceGetLimit(&temp,cudaLimitPrintfFifoSize);
printf("limit=%d\n", temp);
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 11474836);
cudaDeviceGetLimit(&temp,cudaLimitPrintfFifoSize);
printf("limit=%d\n", temp);
auto start = std::chrono::steady_clock::now();
*changed = 1;
int level = 0;
while (*changed) {
*changed = 0;
printf("level=%d\n", level);
for(int i = 0; i < DeviceNum; ++i)
{
int part = G.numVertices / DeviceNum;
printf("part: %d\n", part);
int begin = i * part;
int end = (i + 1) * part;
if(i == DeviceNum - 1)
end = G.numVertices;
int deviceid = i;
printf("device id :%d\n", i);
cudaSetDevice(i);
printf("block=%d thread=%d\n", G.numVertices / 1024 + 1, 1024 / DeviceNum);
dim3 grid(128);
dim3 threads(20);
//multiBfs <<<grid, threads>>>(begin, end, G.numVertices,
multiBfs <<<G.numVertices / 1024 + 1, 1024 / DeviceNum, 0, 0>>>(begin, end, G.numVertices,
level, deviceid, d_adjacencyList2[i],
d_edgesOffset2[i], d_edgesSize2[i], d_distance2, d_parent2,
changed);
// cudaDeviceSynchronize();
}
for(int i = 0; i < DeviceNum; ++i)
{
cudaSetDevice(i);
cudaDeviceSynchronize();
}
level++;
}
auto end = std::chrono::steady_clock::now();
long duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
printf("Elapsed time in milliseconds : %li ms.\n", duration);
finalizeCudaBfs(distance, parent, G);
}
// hkz
void runCudaQueueBfs(int startVertex, Graph &G, std::vector<int> &distance,
std::vector<int> &parent) {
initializeCudaBfs2(startVertex, distance, parent, G);
//int *nextQueueSize;
//checkError(cuMemAllocHost((void **) &nextQueueSize, sizeof(int)), "cannot allocate nextQueueSize");
//launch kernel
printf("Starting queue parallel bfs.\n");
auto start = std::chrono::steady_clock::now();
//int queueSize = 1;
//*nextQueueSize = 0;
for(int i = 0; i < DeviceNum; ++i)
{
if(getDev(startVertex) == i)
{
queueSize[i] = 1;
}
else queueSize[i] = 0;
for(int j = 0; j < DeviceNum; ++j)
{
nextQueueSize[i][j] = 0;
}
}
int level = 0;
while (queueSize[0] + queueSize[1] > 0) {
//void *args[] = {&level, &d_adjacencyList, &d_edgesOffset, &d_edgesSize, &d_distance, &d_parent, &queueSize,
// &nextQueueSize, &d_currentQueue, &d_nextQueue};
//checkError(cuLaunchKernel(cuQueueBfs, queueSize / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0),
// "cannot run kernel queueBfs");
//cuCtxSynchronize();
// printf("level=%d q0=%d q1=%d\n", level, queueSize[0], queueSize[1]);
for(int i = 0; i < DeviceNum; ++i)
{
cudaSetDevice(i);
queueBfs <<<queueSize[i] / 1024 + 1, 1024 >>> (level, d_adjacencyList2[i], d_edgesOffset2[i], d_edgesSize2[i], d_distance2[i],
d_parent2[i], queueSize[i],
nextQueueSize[i],
d_currentQueue2[i],
d_nextQueue2[i],
G.numVertices / DeviceNum, i, G.numVertices);
}
for(int i = 0; i < DeviceNum; ++i)
{
cudaSetDevice(i);
cudaDeviceSynchronize();
}
level++;
// cudaMemcpy(d_currentQueue2[0], )
for(int i = 0; i < DeviceNum; ++i)
{
int offset = 0;
for(int j = 0; j < DeviceNum; ++j)
{
// cudaMemcpy(d_currentQueue2[i] + offset, &(d_nextQueue2[j][i * G.numVertices]),
// nextQueueSize[i][j] * sizeof(int),
// cudaMemcpyDefault);
// printf("copying %d\n", nextQueueSize[i][j]);
cudaMemcpyPeer(d_currentQueue2[i] + offset, i, &(d_nextQueue2[j][i * G.numVertices]), j,
nextQueueSize[j][i] * sizeof(int));
offset += nextQueueSize[j][i];
// offset += nextQueueSize[i][j];
}
}
// multiBfs <<<G.numVertices / 1024 + 1, 1024 / DeviceNum, 0, 0>>>(begin, end, G.numVertices,
// level, deviceid, d_adjacencyList2[i],
// d_edgesOffset2[i], d_edgesSize2[i], d_distance2, d_parent2,
// changed);
queueSize[0] = nextQueueSize[0][0] + nextQueueSize[1][0];
queueSize[1] = nextQueueSize[0][1] + nextQueueSize[1][1];
cudaMemset(nextQueueSize, 0, DeviceNum*DeviceNum*sizeof(int));
// std::swap(d_currentQueue, d_nextQueue);
}
auto end = std::chrono::steady_clock::now();
long duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
printf("Elapsed time in milliseconds : %li ms.\n", duration);
finalizeCudaBfs(distance, parent, G);
}
// void runCudaSimpleBfs(int startVertex, Graph &G, std::vector<int> &distance,
// std::vector<int> &parent) {
// initializeCudaBfs(startVertex, distance, parent, G);
// int *changed;
// checkError(cuMemAllocHost((void **) &changed, sizeof(int)), "cannot allocate changed");
// //launch kernel
// printf("Starting simple parallel bfs.\n");
// auto start = std::chrono::steady_clock::now();
// *changed = 1;
// int level = 0;
// while (*changed) {
// *changed = 0;
// for(int i = 0; i < DeviceNum; ++i)
// {
// int part = G.numVertices / DeviceNum;
// int begin = i * part;
// int end = (i + 1) * part;
// void *args[] = {&G.numVertices, &level, &d_adjacencyList[i], &d_edgesOffset[i], &d_edgesSize[i], &d_distance[i], &d_parent[i],
// &changed};
// checkError(cuLaunchKernel(cuSimpleBfs, G.numVertices / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0),
// "cannot run kernel simpleBfs");
// }
// for(int i = 0; i < DeviceNum; ++i)
// cuCtxSynchronize();
// level++;
// }
// auto end = std::chrono::steady_clock::now();
// long duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// printf("Elapsed time in milliseconds : %li ms.\n", duration);
// finalizeCudaBfs(distance, parent, G);
// }
// void runCudaQueueBfs(int startVertex, Graph &G, std::vector<int> &distance,
// std::vector<int> &parent) {
// initializeCudaBfs(startVertex, distance, parent, G);
// int *nextQueueSize;
// checkError(cuMemAllocHost((void **) &nextQueueSize, sizeof(int)), "cannot allocate nextQueueSize");
// //launch kernel
// printf("Starting queue parallel bfs.\n");
// auto start = std::chrono::steady_clock::now();
// int queueSize = 1;
// *nextQueueSize = 0;
// int level = 0;
// while (queueSize) {
// void *args[] = {&level, &d_adjacencyList, &d_edgesOffset, &d_edgesSize, &d_distance, &d_parent, &queueSize,
// &nextQueueSize, &d_currentQueue, &d_nextQueue};
// checkError(cuLaunchKernel(cuQueueBfs, queueSize / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0),
// "cannot run kernel queueBfs");
// cuCtxSynchronize();
// level++;
// queueSize = *nextQueueSize;
// *nextQueueSize = 0;
// std::swap(d_currentQueue, d_nextQueue);
// }
// auto end = std::chrono::steady_clock::now();
// long duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// printf("Elapsed time in milliseconds : %li ms.\n", duration);
// finalizeCudaBfs(distance, parent, G);
// }
// void nextLayer(int level, int queueSize) {
// void *args[] = {&level, &d_adjacencyList, &d_edgesOffset, &d_edgesSize, &d_distance, &d_parent, &queueSize,
// &d_currentQueue};
// checkError(cuLaunchKernel(cuNextLayer, queueSize / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0),
// "cannot run kernel cuNextLayer");
// cuCtxSynchronize();
// }
// void countDegrees(int level, int queueSize) {
// void *args[] = {&d_adjacencyList, &d_edgesOffset, &d_edgesSize, &d_parent, &queueSize,
// &d_currentQueue, &d_degrees};
// checkError(cuLaunchKernel(cuCountDegrees, queueSize / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0),
// "cannot run kernel cuNextLayer");
// cuCtxSynchronize();
// }
// void scanDegrees(int queueSize) {
// //run kernel so every block in d_currentQueue has prefix sums calculated
// void *args[] = {&queueSize, &d_degrees, &incrDegrees};
// checkError(cuLaunchKernel(cuScanDegrees, queueSize / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0), "cannot run kernel scanDegrees");
// cuCtxSynchronize();
// //count prefix sums on CPU for ends of blocks exclusive
// //already written previous block sum
// incrDegrees[0] = 0;
// for (int i = 1024; i < queueSize + 1024; i += 1024) {
// incrDegrees[i / 1024] += incrDegrees[i / 1024 - 1];
// }
// }
// void assignVerticesNextQueue(int queueSize, int nextQueueSize) {
// void *args[] = {&d_adjacencyList, &d_edgesOffset, &d_edgesSize, &d_parent, &queueSize, &d_currentQueue,
// &d_nextQueue, &d_degrees, &incrDegrees, &nextQueueSize};
// checkError(cuLaunchKernel(cuAssignVerticesNextQueue, queueSize / 1024 + 1, 1, 1,
// 1024, 1, 1, 0, 0, args, 0),
// "cannot run kernel assignVerticesNextQueue");
// cuCtxSynchronize();
// }
// void runCudaScanBfs(int startVertex, Graph &G, std::vector<int> &distance,
// std::vector<int> &parent) {
// initializeCudaBfs(startVertex, distance, parent, G);
// //launch kernel
// printf("Starting scan parallel bfs.\n");
// auto start = std::chrono::steady_clock::now();
// int queueSize = 1;
// int nextQueueSize = 0;
// int level = 0;
// while (queueSize) {
// // next layer phase
// nextLayer(level, queueSize);
// // counting degrees phase
// countDegrees(level, queueSize);
// // doing scan on degrees
// scanDegrees(queueSize);
// nextQueueSize = incrDegrees[(queueSize - 1) / 1024 + 1];
// // assigning vertices to nextQueue
// assignVerticesNextQueue(queueSize, nextQueueSize);
// level++;
// queueSize = nextQueueSize;
// std::swap(d_currentQueue, d_nextQueue);
// }
// auto end = std::chrono::steady_clock::now();
// long duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// printf("Elapsed time in milliseconds : %li ms.\n", duration);
// finalizeCudaBfs(distance, parent, G);
// }
int main(int argc, char **argv) {
// read graph from standard input
int startVertex = atoi(argv[1]);
readGraphFromFile(G, argc, argv);
printf("Number of vertices %d\n", G.numVertices);
printf("Number of edges %d\n\n", G.numEdges);
//vectors for results
std::vector<int> distance(G.numVertices, std::numeric_limits<int>::max());
std::vector<int> parent(G.numVertices, std::numeric_limits<int>::max());
std::vector<bool> visited(G.numVertices, false);
//run CPU sequential bfs
runCpu(startVertex, G, distance, parent, visited);
//save results from sequential bfs
std::vector<int> expectedDistance(distance);
std::vector<int> expectedParent(parent);
initCuda2(G);
//run CUDA simple parallel bfs
// runCudaSimpleBfs(startVertex, G, distance, parent);
// checkOutput(distance, expectedDistance, G);
// runCudaSimpleBfsMulti(startVertex, G, distance, parent);
// checkOutput(distance, expectedDistance, G);
// //run CUDA queue parallel bfs
runCudaQueueBfs(startVertex, G, distance, parent);
checkOutput(distance, expectedDistance, G);
// //run CUDA scan parallel bfs
// runCudaScanBfs(startVertex, G, distance, parent);
// checkOutput(distance, expectedDistance, G);
// finalizeCuda();
return 0;
}
void readGraphFromFile(Graph &G, int argc, char **argv) {
printf("%s\n", argv[2]);
std::ifstream f;
// f.is_open();
try {
f.open(argv[2]);
}
catch (std::ios_base::failure& e) {
std::cerr << e.what() <<" open file error \n";
}
//assert(fin.isopen());
int n, m;
// std::string line = "dsadsa";
if(f.good())
{
f>>n>>m;
printf("nodes num: %d\n", n);
std::vector<std::vector<int> > adjecancyLists(n);
printf("edge num: %d\n", m);
int cnt = 0;
int mmax = -1;
for (int i = 0; i < m; i++) {
int u, v;
// printf("%d\n", cnt);
f >> u >> v;
// if(v == 319 || v == 320)
// printf("%d %d\n", u, v);
// if(u > mmax) mmax = u;
// if(v > mmax) mmax = v;
adjecancyLists[u].push_back(v);
adjecancyLists[v].push_back(u);
// ++cnt;
}
// printf("%d\n", mmax);
// exit(0);
for (int i = 0; i < n; i++) {
G.edgesOffset.push_back(G.adjacencyList.size());
G.edgesSize.push_back(adjecancyLists[i].size());
for (auto &edge: adjecancyLists[i]) {
G.adjacencyList.push_back(edge);
}
}
G.numVertices = n;
G.numEdges = G.adjacencyList.size();
printf("finish load graph\n");
}
else printf("not open %s\n", argv[2]);
}
void readGraph(Graph &G, int argc, char **argv) {
int n;
int m;
//If no arguments then read graph from stdin
bool fromStdin = argc <= 2;
if (fromStdin) {
scanf("%d %d", &n, &m);
} else {
srand(12345);
n = atoi(argv[2]);
m = atoi(argv[3]);
}
std::vector<std::vector<int> > adjecancyLists(n);
for (int i = 0; i < m; i++) {
int u, v;
if (fromStdin) {
scanf("%d %d", &u, &v);
adjecancyLists[u].push_back(v);
} else {
u = rand() % n;
v = rand() % n;
adjecancyLists[u].push_back(v);
adjecancyLists[v].push_back(u);
}
}
for (int i = 0; i < n; i++) {
G.edgesOffset.push_back(G.adjacencyList.size());
G.edgesSize.push_back(adjecancyLists[i].size());
for (auto &edge: adjecancyLists[i]) {
G.adjacencyList.push_back(edge);
}
}
G.numVertices = n;
G.numEdges = G.adjacencyList.size();
}
void bfsCPU(int start, Graph &G, std::vector<int> &distance,
std::vector<int> &parent, std::vector<bool> &visited) {
distance[start] = 0;
parent[start] = start;
visited[start] = true;
std::queue<int> Q;
Q.push(start);
while (!Q.empty()) {
int u = Q.front();
Q.pop();
for (int i = G.edgesOffset[u]; i < G.edgesOffset[u] + G.edgesSize[u]; i++) {
int v = G.adjacencyList[i];
if (!visited[v]) {
visited[v] = true;
distance[v] = distance[u] + 1;
parent[v] = i;
Q.push(v);
}
}
}
}
|
8,390 | //Example 3.2.1
#include <stdio.h>
int main(void) {
printf("Hello, World!");
return 0;
} |
8,391 | #include "includes.h"
__global__ void copy_columns(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[ix*ny + iy];
}
} |
8,392 | #include "includes.h"
__global__ void emptyMarkerKernel() {} |
8,393 | #include <stdio.h>
#include <stdlib.h>
#define MYDEBUG
#ifdef MYDEBUG
#define DEBUG_PRINT printf("here: %d\n", __LINE__); fflush(stdout);
#else
#define DEBUG_PRINT
#endif
#define SIZE 10240
__global__ void MyKernel(int *a, int *b, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size)
b[id] = (a[id] >> 1) + a[id];
}
int main()
{
int i;
int size = SIZE;
int BlockSize = 256;
int BlockNum = (size + BlockSize - 1) / BlockSize;
int *d_a, *d_b;
int sum_a, sum_b;
sum_a = sum_b = 0;
DEBUG_PRINT
cudaMallocManaged((void **)&d_a, size*sizeof(int));
cudaMallocManaged((void **)&d_b, size*sizeof(int));
DEBUG_PRINT
for(i = 0; i < size; i++) {
d_a[i] = rand() % 100;
sum_a += d_a[i];
}
DEBUG_PRINT
MyKernel<<<BlockNum, BlockSize>>>(d_a, d_b, size);
cudaDeviceSynchronize();
DEBUG_PRINT
for(i = 0; i < size; i++)
sum_b += d_b[i];
DEBUG_PRINT
cudaFree(d_a);
cudaFree(d_b);
DEBUG_PRINT
printf("sum_a: %d, sum_b: %d\n", sum_a, sum_b);
return 0;
}
|
8,394 | #include "includes.h"
__global__ void modifyArrayKernel(int *val, int *arr){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < 6 && arr[i] > -1)
arr[i] = arr[i] - *val;
} |
8,395 | /**
File name: bfs_cpu_status_array.cu
Author: Yuede Ji
Last update: 11:00 10-09-2015
Description: Using status array to implent CPU version of bfs.
Calculate the shortest distance from 0 to others
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//Using arrays to implement queue
char filein[] = "/home/yuede/dataset/kron_16_16.dat";// no need
char fileout[] = "/home/yuede/dataset/kron_16_16.cpu.as.result";
char file_v_e[] = "/home/yuede/dataset/kron_16_16.v_e";
char file_beg_pos[] = "/home/yuede/dataset/kron_16_16.beg.pos";
char file_csr[] = "/home/yuede/dataset/kron_16_16.csr";
/**int *beg_pos;
int *csr;
int *sa;
**/
const int v_num = 65535;
const int e_num = 2097152;
const int INF = 0x7FFFFFFF;
int beg_pos[v_num+1];
int csr[e_num];
int sa[v_num];
//load from .dat files, and store in array csr[N*N], beg_pos[N]
int csr_begin(int v, int e)
{
/**
int v, e;
FILE * fp_v_e = fopen(file_v_e, "r");
fscanf(fp_v_e, "%d%d", &v, &e);
fclose(fp_v_e);
beg_pos = (int *) malloc(v+1);
csr = (int *) malloc(e);
**/
FILE * fp_beg = fopen(file_beg_pos, "r");
int i = 0;
int p;
while(fscanf(fp_beg, "%d", &p) != EOF)
{
beg_pos[i] = p;
++i;
}
fclose(fp_beg);
i = 0;
FILE * fp_csr = fopen(file_csr, "r");
while(fscanf(fp_csr, "%d", &p) != EOF)
{
csr[i] = p;
++i;
}
fclose(fp_csr);
printf("i=%d\n", i);
return v;
}
void bfs_sa(int root, int v)
{
for(int i=0; i<v; ++i)
sa[i] = INF;
int count = 1;
int level = 0;
sa[0] = 0;
bool flag; //flag whether current level has nodes
while(count < v)
{
flag = false;
for(int i=0; i<v; ++i)
{
if(sa[i] == level)///node i belongs to current level
{
if(!flag)
flag = true;
for(int j=beg_pos[i]; j<beg_pos[i+1]; ++j)
{
if(sa[csr[j]] <= level + 1)
continue;
sa[csr[j]] = level + 1;
++count;
//printf("count = %d\n", count);
}
}
}
++level;
//printf("level = %d\n", level);
if(!flag)//indicates current level has no vertex
break;
}
}
int main()
{
csr_begin(v_num, e_num);
bfs_sa(0, v_num);
FILE * fp_out = fopen(fileout, "w");
for(int i=0; i<v_num; ++i)
fprintf(fp_out, "%d\n", sa[i]);
fclose(fp_out);
return 0;
}
|
8,396 | #include "includes.h"
__global__ void update_accel_elastic_kernel(float * accel, const float * veloc, const int size, const float two_omega_earth, const float * rmassx, const float * rmassy, const float * rmassz){
int id;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + (blockIdx.y) * ((gridDim.x) * (blockDim.x));
if (id < size) {
accel[(id) * (3)] = (accel[(id) * (3)]) * (rmassx[id]) + (two_omega_earth) * (veloc[(id) * (3) + 1]);
accel[(id) * (3) + 1] = (accel[(id) * (3) + 1]) * (rmassy[id]) - ((two_omega_earth) * (veloc[(id) * (3)]));
accel[(id) * (3) + 2] = (accel[(id) * (3) + 2]) * (rmassz[id]);
}
} |
8,397 |
#include "type.cuh"
void FindCoeffs(IPTR pop, Population *p);
void Scalepop(IPTR pop, Population *p)
{
/* linearly scale the population */
IPTR pj;
int i;
FindCoeffs(pop, p);
p->scaledSumFitness = 0.0;
for(i = 0; i < p->popsize; i++){
pj = &pop[i];
pj->scaledFitness = p->scaleConstA * pj->fitness + p->scaleConstB;
p->scaledSumFitness += pj->scaledFitness;
}
}
void FindCoeffs(IPTR pop, Population *p)
{
/* find coeffs scale_constA and scale_constB for linear scaling according to
f_scaled = scale_constA * f_raw + scale_constB */
double d;
if(p->min > (p->scaleFactor * p->avg - p->max)/
(p->scaleFactor - 1.0)) { /* if nonnegative smin */
d = p->max - p->avg;
p->scaleConstA = (p->scaleFactor - 1.0) * p->avg / d;
p->scaleConstB = p->avg * (p->max - (p->scaleFactor * p->avg))/d;
} else { /* if smin becomes negative on scaling */
d = p->avg - p->min;
p->scaleConstA = p->avg/d;
p->scaleConstB = -p->min * p->avg/d;
}
if(d < 0.00001 && d > -0.00001) { /* if converged */
p->scaleConstA = 1.0;
p->scaleConstB = 0.0;
}
}
|
8,398 | #include "includes.h"
char* concat(char *s1, char *s2);
__global__ void x_calculation(float * x ,float * r,float * r_squared ,int size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x ;
if (index < size)
{
float alpha = r_squared[0] ;
x[index] = x[index] + alpha * r[index] ;
}
} |
8,399 | # include <stdio.h>
# include <assert.h>
# include <cuda.h>
# include <time.h>
void incrementArrayOnHost(float *a, int N){
int i;
for(i=0; i<N;i++){
a[i] = a[i] + 1.f;
}
}
__global__ void incrementArrayOnDevice(float *a, int N){// N is used to check idx
// This function is simultaneously executed by an array of threads on the CUDA device.
// This "block" is an array, so here we simply calculates the array ID.
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/*
Here we have some built-in variables:
(1) "blockIdx" contains the block index within the grid.
(2) "blockDim" contains the number of threads in a block
(3) "threadIdx" contains the thread index within the block
These variables are structures that contain integer components of the variables. Blocks, for example, have x-, y-, and z- integer components because they are three-dimensional
"idx" is then used to uniquely reference each element in the array
*/
if(idx<N){
/*
each thread is provided with a unique ID that can be used to compute different array indicies or make control decisions (such as not doing anything if the thread index exceeds the array size).
Since the number of threads can be larger than the size of the array, idx is first checked against N, an argument passed to the kernel that specifies the number of elements in the array, to see if any work needs to be done.
*/
a[idx] = a[idx] + 1.f;
}
}
/*
The function type qualifier __global__ declares a function as being an executable kernel on the CUDA device, which can only be called from the host.
All kernels must be declared with a return type void
*/
int main(void){
float *a_h, * b_h;
float *a_d;
int i, N = 10000000;
size_t size = N*sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
cudaMalloc((void**) &a_d, size);
// initialize the host data
for(i=0; i<N; i++){
a_h[i] = (float) i;
}
// copy data from host to device
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
clock_t start_host = clock();
// do calculation on host
incrementArrayOnHost(a_h, N);
printf("Time elapsed on host: %f milliseconds\n", (double)(clock() - start_host)/(CLOCKS_PER_SEC / 1000));
// do calculation on device.
// (1) Compute execution configuration
int blockSize = 400;
// Threads inside a block are able to cooperate.
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// In cases where N is not evenly divisible by blockSize, the last term in the nBlocks calculation adds an extra block, which for some cases implies some threads in the block will not perform any useful work.
// (2) call incrementArrayOnDevice kernel
/*A kernel is a function callable from the host and executed on the CUDA device -- simultaneously by many threads in parallel.
*/
clock_t start_device = clock();
incrementArrayOnDevice <<< nBlocks, blockSize>>>(a_d,N);
/*
How to execute a kernel?
1. specifying the name of the kernel plus an execution configuration
2. number of parallel threads in a group/block (blockSize) and the number of groups/blocks in the grid(nBlocks)
3. Synchronization:
Meanwhile, the host continues to the next line of code after the kernel launch. At this point, both the CUDA device and host are simultaneously running their separate programs. In the case of incrementArrays.cu, the host immediately calls cudaMemcpy, which waits until all threads have finished on the device (e.g., returned from incrementArrayOnDevice) after which it pulls the modified array back to the host
*/
cudaThreadSynchronize();
// retrieve resutl from device and store in b_h
printf("Time elapsed on device: %f milliseconds\n", (double)(clock() - start_device)/(CLOCKS_PER_SEC / 1000));
cudaMemcpy(b_h, a_d, size, cudaMemcpyDeviceToHost);
// check result
for(i=0; i<N;i++){
assert(a_h[i] == b_h[i]);
}
// clean up
free(a_h);
free(b_h);
cudaFree(a_d);
}
// Overall comments
/*
kernel calls are asynchronous -- after a kernel launch, control immediately returns to the host CPU. The kernel will run on the CUDA device once all previous CUDA calls have finished.
The asynchronous kernel call is a wonderful way to overlap computation on the host and device. In this example, the call to incrementArrayOnHost could be placed after the call to incrementArrayOnDevice to overlap computation on the host and device to get better performance. Depending on the amount of time the kernel takes to complete, it is possible for both host and device to compute simultaneously.
*/
|
8,400 | /*!
\file TestExternalKernel.cu
\author Andrew Kerr <arkerr@gatech.edu>
\brief implements tests for external kernel launching
*/
#include <stdio.h>
extern "C" __global__ void testExternalKernel(int *A, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
A[i] = i * 3; // this gets overridden
}
}
int main(int argc, char *arg[]) {
int N = 64;
size_t bytes = sizeof(int)*N;
int *A_host, *A_device;
A_host = (int *)malloc(bytes);
cudaMalloc((void **)&A_device, bytes);
testExternalKernel<<< dim3((N+31) / 32, 1), dim3(32, 1) >>>(A_device, N);
cudaMemcpy(A_host, A_device, bytes, cudaMemcpyDeviceToHost);
int errors = 0;
for (int i = 0; !errors && i < N; i++) {
if (A_host[i] != i * 4) {
++errors;
}
}
free(A_host);
cudaFree(A_device);
if (errors) {
printf("Test FAILED\n");
}
else {
printf("Test PASSED\n");
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.