serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
1,301
|
#include <iostream>
#include "sys/time.h"
using namespace std;
double timeInSeconds (timeval& starttime, timeval& stopstime) {
return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec));
}
//__device__ double* dev_vector1 = 0;
//__device__ double* dev_vector2 = 0;
//__device__ double* dev_results = 0;
__global__ void device_vector_mult (double* v1, double* v2, double* res) {
// IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the
// corresponding element of dev_vector2, and store in dev_results.
res[threadIdx.x] = 2 * v1[threadIdx.x] * v2[threadIdx.x];
}
__global__ void device_vector_reduce (double* results, int* length) {
int len = *length;
while (len > 1) {
if (threadIdx.x < (len - (len % 2)) / 2) {
results[threadIdx.x] += results[threadIdx.x + (len + (len % 2)) / 2];
}
len = (len + (len % 2)) / 2;
__syncthreads();
}
}
__global__ void device_vector_simpleAdd (double* vec, double* res) {
if (threadIdx.x == 0) {
res[0] = 2;
}
}
int main (int argc, char** argv) {
int sizeOfVector = 100;
if (argc > 1) sizeOfVector = atoi(argv[1]);
// Declare and fill host-side arrays of doubles.
double* vector1 = new double[sizeOfVector];
double* vector2 = new double[sizeOfVector];
double* results = new double[sizeOfVector];
double* gpuresults = new double[sizeOfVector];
double* gpuAddresults = new double[sizeOfVector];
srand(42);
for (int i = 0; i < sizeOfVector; ++i) {
vector1[i] = rand() % 100;
vector2[i] = rand() % 100;
results[i] = 0;
gpuresults[i] = 0;
gpuAddresults[i] = 0;
}
timeval startTime;
timeval interTime;
timeval stopsTime;
gettimeofday(&startTime, NULL);
// Use the CPU for this part.
// IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding
// element in vector2 and store in results.
for (int i = 0; i < sizeOfVector; ++i) {
results[i] = vector1[i] * vector2[i];
}
gettimeofday(&interTime, NULL);
double total = 0;
// IMPLEMENT ME 2: Sum the results array and store the sum in total.
for (int i = 0; i < sizeOfVector; ++i) {
total += results[i];
}
gettimeofday(&stopsTime, NULL);
cout << "Dot product is: " << total << endl;
// IMPLEMENT ME 3: Time the above operations together and separately
// using 'gettimeofday'.
cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
double* dev_vector1 = 0;
double* dev_vector2 = 0;
double* dev_results = 0;
int sizeInBytes = sizeOfVector * sizeof(double);
cudaMalloc((void**) &dev_vector1, sizeInBytes);
cudaMalloc((void**) &dev_vector2, sizeInBytes);
cudaMalloc((void**) &dev_results, sizeInBytes);
cudaMemcpy(dev_vector1, vector1, sizeInBytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_vector2, vector2, sizeInBytes, cudaMemcpyHostToDevice);
gettimeofday(&startTime, NULL);
device_vector_mult<<<1, sizeOfVector>>>(dev_vector1, dev_vector2, dev_results);
double gputotal = 0;
cudaMemcpy(gpuresults, dev_results, sizeInBytes, cudaMemcpyDeviceToHost);
gettimeofday(&interTime, NULL);
for (int i = 0; i < sizeOfVector; ++i) {
gputotal += gpuresults[i];
}
gettimeofday(&stopsTime, NULL);
cout << "GPU-mult Dot product is: " << gputotal << endl;
cout << "GPU-mult Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "GPU-mult Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "GPU-mult Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
double * dev_added = 0;
cudaMalloc((void**) &dev_added, sizeof(double));
//device_vector_simpleAdd<<<1, sizeOfVector>>>(dev_results, dev_added);
device_vector_reduce<<<1, sizeOfVector>>>(dev_results, &sizeOfVector);
double host_added = 2;
//cudaMemcpy(&host_added, &dev_added[0], sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&host_added, &dev_results[0], sizeof(double), cudaMemcpyDeviceToHost);
cout <<"GPU-full Dot product is: " << host_added << endl;
cout << "Size of Vectors is: " << sizeOfVector << endl;
return 0;
}
|
1,302
|
#include <iostream>
#define N 1024
using namespace std;
__global__ void fun(int* arr) {
int id = threadIdx.x;
arr[id] = id*id*id;
}
int main() {
int ha[N], *a;
cudaMalloc(&a, N*sizeof(N));
fun<<<1,N>>>(a);
cudaMemcpy(ha, a, N*sizeof(int), cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize();
for(int i=0; i<N; i++) {
cout << ha[i] << endl;
}
return 0;
}
|
1,303
|
#include<iostream>
using namespace std;
//test file product on GPU
template<unsigned int blocksize, typename T>
__global__ void prob(T* a, T* b,unsigned int n) {
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockDim.x*blockIdx.x*8;
T *data = a + blockDim.x*blockIdx.x*8;
//增加存储效率
if(idx + blockDim.x*7 < n) {
a[idx] *= a[idx + blockDim.x];
a[idx] *= a[idx + blockDim.x*2];
a[idx] *= a[idx + blockDim.x*3];
a[idx] *= a[idx + blockDim.x*4];
a[idx] *= a[idx + blockDim.x*5];
a[idx] *= a[idx + blockDim.x*6];
a[idx] *= a[idx + blockDim.x*7];
}
__syncthreads();
//规约
if(blocksize >= 1024 && tid < 512) {
data[tid] *= data[tid + 512];
}
__syncthreads();
if(blocksize >= 512 && tid < 256) {
data[tid] *= data[tid + 256];
}
__syncthreads();
if(blocksize >= 256 && tid < 128) {
data[tid] *= data[tid + 128];
}
__syncthreads();
if(blocksize >= 128 && tid < 64) {
data[tid] *= data[tid + 64];
}
__syncthreads();
if(tid < 32) {
volatile T *vmem = data;
vmem[tid] *= vmem[tid + 32];
vmem[tid] *= vmem[tid + 16];
vmem[tid] *= vmem[tid + 8];
vmem[tid] *= vmem[tid + 4];
vmem[tid] *= vmem[tid + 2];
vmem[tid] *= vmem[tid + 1];
}
if(tid == 0) {b[blockIdx.x] = data[0];}
}
int main() {
unsigned int N = 1<<20;
int SIZE = 512;
dim3 block(SIZE,1);
dim3 grid((block.x + N - 1)/block.x,1);
float a[N],b[grid.x];
for (int i = 0;i < N;i++) {
a[i] = 1;
}
float *a_dev, *b_dev;
cudaMalloc((float**)&a_dev, sizeof(float)*N);
cudaMalloc((float**)&b_dev, sizeof(float)*grid.x);
cudaMemcpy(a_dev, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
switch(SIZE) {
case 512:
prob<512><<<grid.x/8,block>>>(a_dev, b_dev, N);
break;
}
cudaMemcpy(b, b_dev, sizeof(float)*grid.x, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
float ans = 1.0f;
for(int i = 0;i < grid.x;i++) {
ans += b[i];
}
cout<<ans<<endl;
return 0;
}
|
1,304
|
#include <stdio.h>
#include <stdlib.h>
const int ARR_SIZE = 64;
const int ARR_BYTES = ARR_SIZE*sizeof(float);
__global__
void cuadrado(float* d_out, float* d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(int argc, char **argv){
// Apuntadores a arreglos en host y en device
float *h_orig, *h_res;
float *d_in, *d_out;
//Reserva espacio para arreglos
h_orig = (float *)malloc(ARR_SIZE*sizeof(float));
h_res = (float *)malloc(ARR_SIZE*sizeof(float));
cudaMalloc((void**) &d_in, ARR_BYTES);
cudaMalloc((void**) &d_out, ARR_BYTES);
for(int i=0;i< ARR_SIZE; i++){ // Llena arreglo inicial
h_orig[i]= (float)i;
}
//Transfiere arreglo a device
cudaMemcpy(d_in, h_orig, ARR_BYTES, cudaMemcpyHostToDevice);
//Lanza el kernel
cuadrado<<<1,ARR_SIZE>>>(d_out,d_in);
//Toma el resultado
cudaMemcpy(h_res, d_out, ARR_BYTES, cudaMemcpyDeviceToHost);
//Despliega resultado
for(int i=0;i<ARR_SIZE; i++){
printf("%4.2f",h_res[i]);
printf("%c",((i%5)<4) ? '\t':'\n');
}
//libera memoria y termina
free(h_orig);
free(h_res);
cudaFree(d_in);
cudaFree(d_out);
return(0);
}
|
1,305
|
// printf("\n\n\nGPU versions:\n");
// printf("\n1.adj_diff_naive:\n");
// printf("Time cost (GPU):%.9lf s\n", adj_diff_naive(data_input, data_output_from_gpu, n));
// if(compare(data_output_from_gpu, data_output_cpu, n)==1){ printf("Passed!\n"); }
// else{ printf("Failed!\n"); }
// memset(data_output_from_gpu, 0, n*sizeof(DTYPE));
|
1,306
|
//parameters: shiftX,Y, scaleX,Y, shearX,Y = 6dimensions
// multiply angle = 7
__global__ void AffineForward(const float* bottom_data,
const int* bottomSize, const float* affine, const int len, float* top_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len) return;
// get current index, [h,w,c,n]
int h = index % bottomSize[0];
int w = (index / bottomSize[0]) % bottomSize[1];
int c = (index / bottomSize[0] / bottomSize[1]) % bottomSize[2];
int n = index / bottomSize[0] / bottomSize[1] / bottomSize[2];
// get current affine start index
const float* a = affine + n*7;
// calc bottom index
// [a0 a1 0] [cos6 sin6 0] [0cos6-1sin6 0sin6+1cos6 0]
// [x y 1] = [u v 1] *[a2 a3 0]*[-sin6 cos6 0] = [u v 1] * [2cos6-3sin6 2sin6+3cos6 0]
// [a4 a5 1] [0 0 1] [4cos6-5sin6 4sin6+5cos6 1]
float nw = 2.0*((float)w/(float)bottomSize[1]-0.5); //-1~1
float nh = 2.0*((float)h/(float)bottomSize[0]-0.5); //-1~1
float w_new = nw*(a[0]*cos(a[6])-a[1]*sin(a[6])) + nh*(a[2]*cos(a[6])-a[3]*sin(a[6])) + (a[4]*cos(a[6])-a[5]*sin(a[6]));
float h_new = nw*(a[0]*sin(a[6])+a[1]*cos(a[6])) + nh*(a[2]*sin(a[6])+a[3]*cos(a[6])) + (a[4]*sin(a[6])+a[5]*cos(a[6]));
w_new = (w_new/2.0+0.5)*(float)bottomSize[1];
h_new = (h_new/2.0+0.5)*(float)bottomSize[0];
// calc neighbor pixel index, if > size or < size, do
float v = 0.0;
for (int x = floor(w_new); x<=ceil(w_new); x++) {
for (int y = floor(h_new); y<=ceil(h_new); y++) {
if (x<0 || x>= bottomSize[1] || y < 0 || y >= bottomSize[0]){
v = 0.0;
}else{
v = bottom_data[n*bottomSize[2]*bottomSize[1]*bottomSize[0] + c*bottomSize[1]*bottomSize[0] + x*bottomSize[0] + y];
}
top_data[index] += v * (1-abs(w_new - (float)x)) * (1-abs(h_new - (float)y));
}
}
}
__global__ void AffineBackward(const float* bottom_data,
const int* bottomSize, const float* affine, const int len, const float* top_data, const float* top_diff, float* bottom_diff1, float* bottom_diff2) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len) return;
// get current index, [h,w,c,n]
int h = index % bottomSize[0];
int w = (index / bottomSize[0]) % bottomSize[1];
int c = (index / bottomSize[0] / bottomSize[1]) % bottomSize[2];
int n = index / bottomSize[0] / bottomSize[1] / bottomSize[2];
// get current affine start index
const float* a = affine + n*7;
// calc bottom index
// [a0 a1 0]
// [x y 1] = [u v 1] *[a2 a3 0]
// [a4 a5 1]
float nw = 2.0*((float)w/(float)bottomSize[1]-0.5); //-1~1
float nh = 2.0*((float)h/(float)bottomSize[0]-0.5); //-1~1
float w_new = nw*(a[0]*cos(a[6])-a[1]*sin(a[6])) + nh*(a[2]*cos(a[6])-a[3]*sin(a[6])) + (a[4]*cos(a[6])-a[5]*sin(a[6]));
float h_new = nw*(a[0]*sin(a[6])+a[1]*cos(a[6])) + nh*(a[2]*sin(a[6])+a[3]*cos(a[6])) + (a[4]*sin(a[6])+a[5]*cos(a[6]));
w_new = (w_new/2.0+0.5)*(float)bottomSize[1];
h_new = (h_new/2.0+0.5)*(float)bottomSize[0];
float v = 0.0;
float dx = 0.0;
float dy = 0.0;
for (int x = floor(w_new); x<=ceil(w_new); x++) {
for (int y = floor(h_new); y<=ceil(h_new); y++) {
if (x<0 || x>= bottomSize[1] || y < 0 || y >= bottomSize[0]){
v = 0.0;
}else{
v = bottom_data[n*bottomSize[2]*bottomSize[1]*bottomSize[0] + c*bottomSize[1]*bottomSize[0] + x*bottomSize[0] + y];
}
bottom_diff1[n*bottomSize[2]*bottomSize[1]*bottomSize[0] + c*bottomSize[1]*bottomSize[0] + x*bottomSize[0] + y] += top_diff[index] * (1-abs(w_new - (float)x)) * (1-abs(h_new - (float)y));
dx += v * (1-abs(h_new - (float)y)) * ((float)x > w_new ? 1.0:-1.0 );
dy += v * (1-abs(w_new - (float)x)) * ((float)y > h_new ? 1.0:-1.0 );
}
}
atomicAdd((bottom_diff2+n*7)+0, nw *(cos(a[6])+sin(a[6])) *dx*top_diff[index]);
atomicAdd((bottom_diff2+n*7)+2, nh *(cos(a[6])+sin(a[6])) *dx*top_diff[index]);
atomicAdd((bottom_diff2+n*7)+4, 1.0*(cos(a[6])+sin(a[6])) *dx*top_diff[index]);
atomicAdd((bottom_diff2+n*7)+1, nw *(cos(a[6])-sin(a[6])) *dy*top_diff[index]);
atomicAdd((bottom_diff2+n*7)+3, nh *(cos(a[6])-sin(a[6])) *dy*top_diff[index]);
atomicAdd((bottom_diff2+n*7)+5, 1.0*(cos(a[6])-sin(a[6])) *dy*top_diff[index]);
float ba6 = (nw*(-a[0]*sin(a[6])-a[1]*cos(a[6])) + nh*(-a[2]*sin(a[6])-a[3]*cos(a[6])) + (-a[4]*sin(a[6])-a[5]*cos(a[6])))*dx*top_diff[index];
ba6 += (nw*(a[0]*cos(a[6])-a[1]*sin(a[6])) + nh*(a[2]*cos(a[6])-a[3]*sin(a[6])) + (a[4]*cos(a[6])-a[5]*sin(a[6])))*dy*top_diff[index];
atomicAdd((bottom_diff2+n*7)+6, ba6);
}
|
1,307
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define DEBUG
#define float double
#define INDEX(fst, snd, n) ((fst) * (n) + (snd))
#define SIZE (5000)
#define TILL (100)
#define N_TILL (SIZE / TILL)
__global__ void multiple(float* matrix, float* vector, float* out) {
/*
* a thread get 100 element in a line (one line 50 thread)
* thread is (50, 20)
* 20 lines fill a block.
* a matrix has 250 blocks
*/
int x = threadIdx.x;
int y = threadIdx.y;
int blk = blockIdx.x;
float sum = 0;
for (int i = x * 100; i < (x + 1) * 100; ++i) {
sum += matrix[INDEX(y + blk * 20, i, SIZE)] * vector[i];
}
atomicAdd(&out[y + blk * 20], (float)sum);
}
void validator(float* matrix, float* vector, float* out) {
for (int i = 0; i < SIZE; ++i) {
float sum = 0;
for (int j = 0; j < SIZE; ++j) {
sum += matrix[INDEX(i, j, SIZE)] * vector[j];
}
out[i] = sum;
}
}
int main() {
float* hA = (float*) malloc(sizeof(float) * SIZE * SIZE);
float* dA;
cudaMalloc((void**) &dA, sizeof(float) * SIZE * SIZE);
float* hx = (float*) malloc(sizeof(float) * SIZE);
float* dx;
cudaMalloc((void**) &dx, sizeof(float) * SIZE * SIZE);
float* out;
cudaMalloc((void**) &out, sizeof(float) * SIZE);
float* valout = (float*) malloc(sizeof(float) * SIZE);
// init hA and hx
for (int i = 0; i < SIZE; ++i) {
for (int j = 0; j < SIZE; ++j) {
hA[INDEX(i, j, SIZE)] = i - 0.1 * j + 1;
}
hx[i] = 0.2 * i - 0.1 * sqrt(i);
}
// init out
cudaMemset(out, 0, sizeof(float)* SIZE);
memset(valout, 0, sizeof(float) * SIZE);
// transfer to gpu
cudaMemcpy(dA, hA, sizeof(float) * SIZE * SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dx, hx, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
dim3 threads(50, 20);
multiple<<<250, threads>>>(dA, dx, out);
validator(hA, hx, valout);
free(hA);
free(hx);
cudaFree(dA);
cudaFree(dx);
float* hout = (float*) malloc(sizeof(float) * SIZE);
cudaMemcpy(hout, out, sizeof(float)* SIZE, cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; ++i) {
printf("%f, (%f) \n", hout[i], valout[i]);
}
free(valout);
free(hout);
cudaFree(out);
}
|
1,308
|
// CUDA programming
// Exercise n. 10
#include <errno.h>
#include <cuda.h>
#include <stdio.h>
#define N_ELEMS 16
#define THREADS 4
// Prototype
__global__ void dot_prod(int *a, int *b, int *c);
__host__ void ints(int *m, int N);
__host__ void print_array(int *a, int N);
int main(void)
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N_ELEMS * sizeof(int);
// Allocate space for host copies of a, b, c
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(sizeof(int));
// Setup input values
ints(a, N_ELEMS);
ints(b, N_ELEMS);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, sizeof(int));
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Call the kernel on GPU
dot_prod<<< N_ELEMS/THREADS, THREADS >>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
// Check the result
print_array(a, N_ELEMS);
print_array(b, N_ELEMS);
printf("%d\n", *c);
// Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return(EXIT_SUCCESS);
}
// Vector addition (on device)
__global__ void dot_prod(int *a, int *b, int *c)
{
__shared__ int tmp[THREADS];
int index = blockIdx.x * blockDim.x + threadIdx.x;
tmp[threadIdx.x] = a[index] * b[index];
__syncthreads();
if(0 == threadIdx.x)
{
int sum = 0;
for(int i = 0; i < THREADS; i++)
{
sum += tmp[i];
}
atomicAdd(c, sum); // atomic operation to avoid race condition
}
}
// Initialisation
__host__ void ints(int *m, int N)
{
int i;
for(i = 0; i < N; i++)
m[i] = 1;
}
// Print the elements of the array
__host__ void print_array(int *a, int N)
{
for(int i = 0; i < N; i++)
{
printf("%d\t", a[i]);
}
printf("\n");
}
|
1,309
|
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
#define BSZ (16)
void checkErrors(char *label) {
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess) {
char *e = (char *) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess) {
char *e = (char *) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time() {
struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec + (tim.tv_usec / 1000000.0);
}
// GPU kernel
__global__ void copy_array(float *u, float *u_prev, int N) {
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
u_prev[I] = u[I];
}
__global__ void update(float *u, float *u_prev, int N, float h, float dt, float alpha) {
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y * BSZ * N + blockIdx.x * BSZ + j * N + i;
if (I >= N * N) { return; }
__shared__ float u_prev_sh[BSZ][BSZ];
u_prev_sh[i][j] = u_prev[I];
__syncthreads();
bool bound_check = ((I > N) && (I < N * N - 1 - N) && (I % N != 0) && (I % N != N - 1));
bool block_check = ((i > 0) && (i < BSZ - 1) && (j > 0) && (j < BSZ - 1));
// if not on block boundary do
if (block_check) {
u[I] = u_prev_sh[i][j] + alpha * dt / h / h * (u_prev_sh[i + 1][j] + u_prev_sh[i - 1][j] + u_prev_sh[i][j + 1] +
u_prev_sh[i][j - 1] - 4 * u_prev_sh[i][j]);
}
// if not on boundary
else if (bound_check)
//if (bound_check)
{
u[I] = u_prev[I] +
alpha * dt / (h * h) * (u_prev[I + 1] + u_prev[I - 1] + u_prev[I + N] + u_prev[I - N] - 4 * u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main(int argc, char *argv[]) {
// Allocate in CPU
int N = atoi(argv[1]);
cudaSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax - xmin) / (N - 1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = (int) ceil(time / dt);
int I;
float *x = new float[N * N];
float *y = new float[N * N];
float *u = new float[N * N];
float *u_prev = new float[N * N];
// Generate mesh and intial condition
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
x[I] = xmin + h * i;
y[I] = ymin + h * j;
u[I] = 0.0f;
if ((i == 0) || (j == 0)) { u[I] = 200.0f; }
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
cudaMalloc((void **) &u_d, N * N * sizeof(float));
cudaMalloc((void **) &u_prev_d, N * N * sizeof(float));
// Copy to GPU
cudaMemcpy(u_d, u, N * N * sizeof(float), cudaMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N - 0.5) / BSZ) + 1, int((N - 0.5) / BSZ) + 1);
dim3 dimBlock(BSZ, BSZ);
double start = get_time();
for (int t = 0; t < steps; t++) {
copy_array <<<dimGrid, dimBlock>>>(u_d, u_prev_d, N);
update <<<dimGrid, dimBlock>>>(u_d, u_prev_d, N, h, dt, alpha);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout << elapsed << std::endl;
// Copy result back to host
cudaMemcpy(u, u_d, N * N * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream temperature("temperature_shared.txt");
for (int j = 0; j < N; j++) {
for (int i = 0; i < N; i++) {
I = N * j + i;
// std::cout<<u[I]<<"\t";
temperature << x[I] << "\t" << y[I] << "\t" << u[I] << std::endl;
}
temperature << "\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
cudaFree(u_d);
cudaFree(u_prev_d);
}
|
1,310
|
#include <iostream>
#include <cuda.h>
#include <time.h>
#include <math.h>
using namespace std;
// 테스트 용이므로 일단 자료 크기는 1000으로
// 1D이니까 그냥 블럭사이즈는 512로
// EP는 슬라이스의 사이즈
//10만개부터 에러났음. 아마 랜덤 숫자 만들어내는 데, 아니면 GPU메모리 상에서 문제가 발생한 것 같음.
// 만일 화면 데이터를 정렬한다고 하면, 2560x1600 = 4,096,000 픽셀이니까 GPU메모리 상에서의 문제가
// 아니라 랜덤 숫자 만들어내는 곳에서 문제가 발생한 것일 수도...
#define DATASIZE 100000
#define BLOCK_SIZE 512
__global__ void oddevensort ( int * input, unsigned int len, int i )
{
//개별 블럭의 좌표
unsigned int tx = threadIdx.x;
//전체 이미지의 좌표
unsigned int x = tx + blockDim.x * blockIdx.x;
//이동에 쓸 임시 변수
int temp;
//자료의 길이만큼 돌리는데, 인덱스(i)가 짝수이면 데이터의 짝수자리와 그 다음 숫자를 비교.
//인덱스가 홀수이면 데이터의 홀수자리와 그 다음 숫자를 비교해서 정렬한다.
if( i % 2 == 0 )
{
// 길이를 측정안해주면 블럭에 남아있던 자리에 있는 자료가 튀어나올 수 있으니 조심.
if( input[x] > input[x+1] && x < len && x % 2 == 0)
{
temp = input[x+1];
input[x+1] = input[x];
input[x] = temp;
}
}
else
{
if( input[x] > input[x+1] && x < len && x % 2 != 0)
{
temp = input[x+1];
input[x+1] = input[x];
input[x] = temp;
}
}
__syncthreads();
}
int main()
{
// 테스트에 쓸 숫자 생성
int TestInput[DATASIZE], TestOutput[DATASIZE];
srand(time(NULL));
for( int i = 0; i < DATASIZE; i++ )
{
TestInput[i] = rand() % 500;
}
//device 설정
int *devInput, *devOutput;
//일단 크기는 아니까
unsigned int MemDataSize = DATASIZE * sizeof(int);
// device 자리 잡아주고
cudaMalloc((void**)&devInput, MemDataSize );
cudaMalloc((void**)&devOutput, MemDataSize );
cudaMemset( devOutput, 0, MemDataSize );
// 자리 잡았으면 복사
cudaMemcpy( devInput, TestInput, MemDataSize, cudaMemcpyHostToDevice);
// block 크기 설정
// 1D 이니까, 그냥 간단하게...
dim3 dimBlocksize( BLOCK_SIZE );
dim3 dimGridsize( ceil((DATASIZE-1)/(float)BLOCK_SIZE) + 1 );
// 일단 Max값과 min값을 알아내야됨.
// 처음부터 끝까지 휙 둘러보면 되니 이건 CPU에게 맡김.
for( int i=0; i<DATASIZE; i++)
{
oddevensort<<< dimGridsize, dimBlocksize >>>( devInput, DATASIZE, i );
}
// 결과물 복사
cudaMemcpy( TestOutput, devInput, MemDataSize, cudaMemcpyDeviceToHost);
for( int i=0; i<DATASIZE; i++ )
{
cout << TestOutput[i] << ", ";
if( (i+1)%10 == 0 )
{
cout << endl;
}
}
// 위에 GPU에 마련한 자리 해소. 그때 그때 해놓는 게 편할 듯
cudaFree( devInput );
cudaFree( devOutput );
return 0;
}
|
1,311
|
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void sumArraysOnGpu(float *A, float *B, float *C){
int idx = threadIdx.x;
C[idx] = A[idx] + B[idx];
}
__global__ void mathOperationsOnGPU(float *A, float *B, float *C, int operations) {
int idx = threadIdx.x;
float result;
for (int i = 0; i < operations; i++) {
int r = i % 6;
switch (r) {
case 0:
result += B[idx];
break;
case 1:
result -= A[idx];
break;
case 2:
result += 9;
break;
case 3:
result -= 9203.34;
break;
case 4:
result *= 0.2;
break;
case 5:
result -= (A[idx] / 1024);
break;
}
}
C[idx] = result;
}
void initialData(float *ip, int size){
// generate different seed for random number
time_t t;
srand((unsigned int) time (&t));
for (int i=0; i<size; i++){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void linearData(float *input, int size) {
for (int i = 0; i < size; i++) {
input[i] = i + (size / (1024 * 1e3));
}
}
int main(int argc, char **argv) {
int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento
int blocks = atoi(argv[2]); // Primeiro argumento é a quantidade de blocos
int operations = atoi(argv[3]); // Segundo argumento é a quantidade de operações matemáticas por thread
srand(time(NULL));
size_t nBytes = (2 << (expoente + 1)) / sizeof(float);
int nElem = nBytes / sizeof(float);
float *h_A, *h_B, *h_C, *result;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
result = (float *)malloc(nBytes);
initialData(h_A, nElem);
linearData(h_B, nElem);
printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB, Quantidade de operações: %d\n\n", nElem, (nBytes / (1024*1024)), operations);
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the
// parameter cudaMemcpyHostToDevice specifying the transfer direction.
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
mathOperationsOnGPU<<<blocks, 1024>>>(d_A, d_B, d_C, operations);
// sumArraysOnGpu<<<1, nElem>>>(d_A, d_B, d_C);
cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost);
free(h_A);
free(h_B);
free(h_C);
free(result);
// use cudaFree to release the memory used on the GPU
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
return (0);
}
|
1,312
|
//#include <iostream>
//#include <fstream>
//#include <iomanip>
//#include <string>
//
//#include <cmath>
//#include <cstdio>
//
//#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
//
////using namespace std;
//using std::ifstream;
//using std::string;
//using std::cout;
//using std::endl;
//using std::ios;
//using std::setiosflags;
//using std::setprecision;
//
////#define length 8
//#define PI 3.14159265
//#define length 4
//#define block_len 16
//
//cudaError_t dctWithCuda_1(const double *d, double *D);
//
//cudaError_t dctWithCuda_2(const double *f, double *F);
//
///*__global__ void dct(float *f, float *F){
// int tidy = blockIdx.x*blockDim.x + threadIdx.x;
// int tidx = blockIdx.y*blockDim.y + threadIdx.y;
// int index = tidx*len + tidy;
// float tmp;
// float beta,alfa;
// if(tidx == 0)
// beta = sqrt(1.0/length);
// else
// beta = sqrt(2.0/length);
// if(tidy == 0)
// alfa = sqrt(1.0/length);
// else
// alfa = sqrt(2.0/length);
// if(tidx<length && tidy<length){
// for(i=0; i<length; i++){
// int x = i/length;
// int y = i%length;
// tmp+=((int)data[i])*cos((2*x+1)*tidx*PI/(2.0*length))*
// cos((2*y+1)*tidy*PI/(2.0*length));
// }
// F[index]=(float)alfa*beta*tmp;
// }
// }
//*/
//
//__global__ void dct_1(const double *f, double *F) {
// int bid = blockIdx.x;
// //int tid = threadIdx.x;
// int i, j;
// //double data[length]={0.0};
// double tmp;
// //printf("length = %d\n", length);
// if (bid < length)
// {
// //__shared__
// double data[length];
// for (i = 0; i < length; i++)
// {
// data[i] = f[bid * length + i]; //load row data from f.
// }
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// if (i == 0)
// {
// tmp = (double) (1.0 / sqrt(1.0 * length));
// F[bid * length + i] = 0.0; //why use F[bid]? Do transpose at the same time.
// for (j = 0; j < length; j++)
// F[bid * length + i] += data[j];
// F[bid * length] *= tmp;
// }
// else
// {
// tmp = (double) (sqrt(2.0 / (1.0 * length)));
// for (i = 1; i < length; i++)
// {
// F[bid * length + i] = 0.0;
// for (j = 0; j < length; j++)
// {
// F[bid * length + i] +=
// (double) (data[j] * cos((2 * j + 1) * i * PI / (2 * length)));
// }
// F[bid * length + i] *= tmp;
// }
// }
// }
//// __syncthreads();
//// if (bid == 0)
//// {
//// for (int k = 0; k < length; k++)
//// {
//// for (int l = 0; l < length; l++)
//// {
//// printf("%lf\t", F[k * length + l]);
//// }
//// printf("\n");
//// }
//// printf("\n");
//// }
//
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// data[i] = F[i * length + bid];
// }
// __syncthreads();
// for (i = 0; i < length; i++)
// {
// if (i == 0)
// {
// tmp = (double) (1.0 / sqrt(1.0 * length));
// F[bid] = 0;
// for (j = 0; j < length; j++)
// F[bid] += data[j];
// F[bid] *= tmp;
// }
// else
// {
// tmp = (double) (sqrt(2.0 / (1.0 * length)));
// for (i = 1; i < length; i++)
// {
// F[i * length + bid] = 0;
// for (j = 0; j < length; j++)
// {
// F[i * length + bid] +=
// (double) (data[j] * cos((2 * j + 1) * i * PI / (2 * length)));
// }
// F[i * length + bid] *= tmp;
// }
// }
// }
// __syncthreads();
// }
//}
//
//__global__ void dct_2(const double *f, double *F) {
// int tidy = blockIdx.x * blockDim.x + threadIdx.x;
// int tidx = blockIdx.y * blockDim.y + threadIdx.y;
// int index = tidx * length + tidy;
// int i;
// double tmp;
// double beta, alfa;
// if (tidx == 0)
// beta = sqrt(1.0 / length);
// else
// beta = sqrt(2.0 / length);
// if (tidy == 0)
// alfa = sqrt(1.0 / length);
// else
// alfa = sqrt(2.0 / length);
// if (tidx < length && tidy < length) {
// for (i = 0; i < length * length; i++) {
// int x = i / length;
// int y = i % length;
// tmp += ((double) f[i])
// * cos((2 * x + 1) * tidx * PI / (2.0 * length))
// * cos((2 * y + 1) * tidy * PI / (2.0 * length));
// }
// F[index] = (double) alfa * beta * tmp;
// }
//}
//
//int main() {
// ifstream infile("/home/zhujian/cuda-workspace/dct_10.16/gradient.txt");
// int i = 0;
// string line;
// double f[length * length] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
// 14, 15 };
// double F[length * length] = { 0.0 };
// while(i<length*length){
// if(getline(infile, line)){
//
// f[i] = atof(line.c_str());
// cout<<"f[i]: "<<f[i]<<endl;
// }
// i++;
// }
// cout << "before" << endl;
// for (i = 0; i < length * length; i++)
// {
// cout << f[i] << " ";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// cout << endl;
//
// for (i = 0; i < length * length; i++)
// {
// cout << F[i] << " ";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// cout << endl;
//
//
// /*
// * execute dct_1
// */
//
// cudaError_t cudaStatus = dctWithCuda_1(f, F);
// if (cudaStatus != cudaSuccess)
// {
// fprintf(stderr, "dctWithCuda_1 failed!");
// return 1;
// }
//
// cout << "after" << endl;
// for (i = 0; i < length * length; i++)
// {
// cout << setiosflags(ios::right) << f[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
//
// cout << endl;
// for (i = 0; i < length * length; i++)
// {
//
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
// return 0;
//
//}
//
//cudaError_t dctWithCuda_1(const double *d, double *D) {
// double *dev_d = 0;
// double *dev_D = 0;
// cudaError_t cudaStatus;
//
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_d, length * length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_D, length * length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// //copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_d, d, length * length * sizeof(double),
// cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy-- failed");
// goto Error;
// }
// //launch a kernel on the GPU
// dct_1<<<length, 1>>>(dev_d, dev_D);
//
// cudaStatus = cudaThreadSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaThreadSynchronize returned error code %d after launching addKernel!\n",
// cudaStatus);
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(D, dev_D, length * length * sizeof(double),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
// Error: cudaFree(dev_d);
// cudaFree(dev_D);
// return cudaStatus;
//}
//
//cudaError_t dctWithCuda_2(const double *d, double *D) {
// double *dev_d = 0;
// double *dev_D = 0;
// cudaError_t cudaStatus;
//
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_d, length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**) &dev_D, length * sizeof(double));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// //copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_d, d, length * sizeof(double),
// cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed");
// goto Error;
// }
//
// //launch a kernel on the GPU
// dct_2<<<1, (length / block_len) * (length / block_len),
// block_len * block_len>>>(dev_d, dev_D);
//
// cudaStatus = cudaThreadSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr,
// "cudaThreadSynchronize returned error code %d after launching addKernel!\n",
// cudaStatus);
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(D, dev_D, length * length * sizeof(double),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
// Error: cudaFree(dev_d);
// cudaFree(dev_D);
//
// return cudaStatus;
//}
//
|
1,313
|
#include "includes.h"
__global__ void PowerInterleaved(float4 *src, float4 *dest) {
const size_t i = blockDim.x * blockIdx.x + threadIdx.x;
// Cross pols
dest[i].x += src[i].x * src[i].x + src[i].y * src[i].y;
dest[i].y += src[i].z * src[i].z + src[i].w * src[i].w;
// Parallel pols
dest[i].z += src[i].x * src[i].z + src[i].y * src[i].w;
dest[i].w += src[i].y * src[i].z - src[i].x * src[i].w;
}
|
1,314
|
#include "includes.h"
__global__ void mat_mult_kernel(int *a, int *b, int *c, int mat_rows, int mat_cols) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < mat_rows) {
int res = 0;
for (int i = 0; i < mat_cols; i++) {
res += a[tid * mat_cols + i] * b[i];
}
c[tid] = res;
tid += blockDim.x * gridDim.x;
}
}
|
1,315
|
// This example is taken from https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
// CUDA Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Thread ID: %d \t Block ID: %d\n", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
double delta, finish, start;
double flops, nd;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// start time measurement
get_walltime(&start);
// execute the CUDA kernel function
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// stop time measurement, why here and not directly after the kernel call?
get_walltime(&finish);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) N;
flops = nd/(delta * 1000000.);
std::cout << ">>>>> finish: " << finish << std::endl;
std::cout << ">>>>> delta: " << delta << std::endl;
std::cout << ">>>>> Mflops: " << flops << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
1,316
|
#include <stdio.h>
__global__ void parallel_vector_add(int* d_a, int* d_b, int* d_c, int* d_n)
{
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
//printf("I am thread #%d.", i);
if (i < *d_n)
{
printf("I am thread #%d, and about to compute c[%d].\n", i, i);
d_c[i] = d_a[i]+d_b[i];
}
else
{
printf("I am thread #%d, and doing nothing.\n", i);
}
}
int main()
{
int n;
scanf("%d", &n);
//declare input and output on host
int h_a[n];
int h_b[n];
for(int i=0; i<n; i++)
{
h_a[i] = i;
h_b[i] = n-i;
}
int h_c[n];
// PART I: Copy data from host to device
int* d_a, *d_b, *d_c, *d_n;
cudaMalloc((void **) &d_a, n*sizeof(int));
cudaMalloc((void **) &d_b, n*sizeof(int));
cudaMalloc((void **) &d_c, n*sizeof(int));
cudaMalloc((void **) &d_n, sizeof(int));
cudaMemcpy(d_a, &h_a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
// PART II: Kernel launch
parallel_vector_add<<<n/10, 10>>>(d_a, d_b, d_c, d_n);
cudaDeviceSynchronize();
// PART III: Copy data from device back to host, and free all data allocate on device
cudaMemcpy(&h_c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for(int i=0; i<n; i++)
printf("%d ", h_c[i]);
}
|
1,317
|
#include "includes.h"
__global__ void Sqrt( float * x, size_t idx, size_t N, float W0)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx-1)*N+i] = sqrt(abs(x[(idx-1)*N+i])*W0);
}
return;
}
|
1,318
|
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
__global__ void vecAdd(double *a,double *b,double *c,int n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
c[id]=a[id]+b[id];
}
int main()
{
srand(time(NULL));
double *h_a,*h_b,*h_c;
double *d_a,*d_b,*d_c;
int n=50;
int i=0;
clock_t t;
size_t bytes=n*sizeof(double);
h_a=(double*)malloc(bytes);
h_b=(double*)malloc(bytes);
h_c=(double*)malloc(bytes);
cudaMalloc(&d_a,bytes);
cudaMalloc(&d_b,bytes);
cudaMalloc(&d_c,bytes);
t=clock();
for(i=0;i<n;i++)
{
h_a[i]=i;
h_b[i]=i;
}
cudaMemcpy(d_a,h_a,bytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,bytes,cudaMemcpyHostToDevice);
int blksize,gridsize;
blksize=50;
gridsize=(int)ceil((float)n/blksize);
vecAdd<<<gridsize,blksize>>>(d_a,d_b,d_c,n);
cudaMemcpy(h_c,d_c,bytes,cudaMemcpyDeviceToHost);
t=clock()-t;
double time_taken=((double)t)/CLOCKS_PER_SEC;
double sum=0;
for(i=0;i<n;i++)
sum+=h_c[i];
double mean=sum/n;
double temp=0;
for(i=0;i<n;i++)
{
temp+=(h_c[i]-mean)*(h_c[i]-mean);
}
double sd=temp/n;
printf("\nAddition=%f",sum);
printf("\nAverage=%f",mean);
printf("\nSD=%f \n",sd);
printf("\n time taken to calculate max=%f",time_taken);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return(0);
}
|
1,319
|
extern "C"
__global__
void sigmoid(float *activation, unsigned int length)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < length;
i += blockDim.x * gridDim.x)
{
activation[i]=1.0f/(1.0f+__expf(-activation[i]));
//activation[i]=1.0f/(1.0f+expf(-activation[i]));
//activation[i]=activation[i]/(0.5f+0.5f*fabsf(activation[i]))+0.5f;
}
}
|
1,320
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
__global__ void kernel(float* d,float* d1,int size){
int length_per_block;
int length_per_thread;
int start,end;
length_per_block = size/gridDim.x;
length_per_thread = length_per_block/blockDim.x;
start = length_per_block*blockIdx.x + length_per_thread*threadIdx.x;
end = length_per_block*blockIdx.x + length_per_thread*(threadIdx.x+1);
for(int i = start ; i < end ; i ++ ){
d1[i] += 1.0f;
d[i] += d1[i];
}
}
static float elapsed(struct timeval tv0,struct timeval tv1){
return (float)(tv1.tv_sec - tv0.tv_sec)
+ (float)(tv1.tv_usec - tv0.tv_usec)
* 0.000001f;
}
int main(){
struct timeval t0,t1;
gettimeofday(&t0,NULL);
int res,length,ite;
ite = 10;
res = cudaSetDeviceFlags(cudaDeviceMapHost);
printf("cudaSetDeviceFlags(%d)\n",res);
float *h;
float *d,*d1;
length = 500000000;
// res = cudaHostAlloc((void**)&h,sizeof(float)*length,cudaHostAllocMapped|cudaHostAllocPortable);
h = (float*)valloc(sizeof(float)*length);
res = cudaHostRegister(h,sizeof(float)*length,cudaHostRegisterMapped);
printf("cudaHostAlloc(%d)\n",res);
res = cudaHostGetDevicePointer((void**)&d,h,0);
printf("cudaHostGetDevicePointer(%d)\n",res);
printf("device address : %p\n",d);
h = (float*)malloc(sizeof(float)*length);
for(int i = 0 ; i < length ; i ++){
h[i] = 0.0f;
}
dim3 threads(1000,1,1);
dim3 blocks(1000,1,1);
res = cudaMemcpy(d,h,sizeof(float)*length,cudaMemcpyHostToDevice);
printf("cudaMemcpyHostToDevice(%d)\n",res);
res = cudaMalloc((void**)&d1,sizeof(float)*length);
printf("cudaMalloc(%d) : Address %p\n",res,d1);
res = cudaMemcpy(d1,h,sizeof(float)*length,cudaMemcpyHostToDevice);
printf("cudaMemcpyHostToDevice(%d)\n",res);
for(int i = 0 ; i < ite ; i ++){
kernel<<<blocks,threads>>>(d,d1,length);
}
res = cudaMemcpy(h,d,sizeof(float)*length,cudaMemcpyDeviceToHost);
printf("cudaMemcpyDeviceToHost(%d)\n",res);
for(int i = 0 ; i < length ; i ++){
if(h[i] != ((ite+1)*ite)/2 ){
printf("Result test : Failed\n");
printf("h[%d] == %f\n",i,h[i]);
return -1;
}
}
printf("Result test PASS\n");
gettimeofday(&t1,NULL);
printf("TIME RESULT : %f[sec](MAP)\n",elapsed(t0,t1));
return 0;
}
|
1,321
|
//#include <hayai/hayai.hpp>
//
//#include "concurrent/containers/hash_tables/chaining.cuh"
//
//#include "hash_map-fixture.cu"
//
//using Chaining = gpu::concurrent::chaining<key_type, mapped_type, gpu::hash<key_type>>;
//using ChainingInsertionFixture = HashMapInsertionFixture<Chaining>;
//using ChainingGetFixture = HashMapGetFixture<Chaining>;
//using ChainingGetUnsuccessfulFixture = HashMapGetUnsuccessfulFixture<Chaining>;
//
//BENCHMARK_F(ChainingInsertionFixture, Chaining, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// insert();
//}
//
//BENCHMARK_F(ChainingGetFixture, Chaining, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get();
//}
//
//BENCHMARK_F(ChainingGetUnsuccessfulFixture, Chaining, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// unsuccessful_get();
//}
|
1,322
|
/// Assignment 06: Local Register Memory
///
/// Author: Justin Renga
/// Two Kernels -- Same Operation
///
/// Operation: Take an integer (randomly generated) from two input arrays,
/// take their modulo (input1 % input2) and store the result.
///
/// Kernel 1: Use the global memory to perform the operation (using local memory as an
/// intermediate.
/// Kernel 2: Transfer the data from global memory to local memory, perform the operation,
/// then transfer back to global memory
// Bibliography (source-list):
// [1] register.cu
// [2] http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html
// [3] https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
// Declare constant definitions here
#define UINT_SIZE sizeof(unsigned int)
#define INITIAL_DATA 0
#define THREAD_MIN 64
#define THREAD_MAX 4096
// Declare device constant memory here
__constant__ static unsigned int ADDITIVE_VALUES[16];
// Declare global host data here:
unsigned int initializedRNG;
// ---------------------------------------- DEVICE OPERATIONS -----------------------------------------
/// @brief GPU Kernel that utilizes only global data to perform a simple modular division operation. To
/// be used on conjunction with localModularDivide for comparison metrics. Algorithm performed:
/// output = input1 % input2
///
/// @param [ in] input1 The first of the two input arrays to be used in the modular division operation
/// @param [ in] input2 The second of the two input arrays to be used in the modular division operation
/// @param [out] output The array containing the results of the modular division operation
__global__ void globalModularDivide(const unsigned int* const input1,
const unsigned int* const input2,
unsigned int* const output)
{
// Compute the current thread index
unsigned int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Perform the modular operation and store in the output array (without using local memory)
output[thread_index] = input1[thread_index] % input2[thread_index];
}
/// @brief GPU Kernel that offloads the computations from global data completely, then re-inserts
/// the data back into global memory. To be used with globalModularDivide for comparison metrics.
/// Algorithm performed: output = input1 % input2
///
/// @param [ in] input1 The first of the two input arrays to be used in the modular division operation
/// @param [ in] input2 The second of the two input arrays to be used in the modular division operation
/// @param [out] output The array containing the results of the modular division operation
__global__ void localModularDivide(const unsigned int* const input1,
const unsigned int* const input2,
unsigned int* const output)
{
// Compute the current thread index
unsigned int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Create local registers to store the intermediate data for the algorithm
unsigned int input1_local = input1[thread_index];
unsigned int input2_local = input2[thread_index];
// Create a local register that will store the result of the algorithm
unsigned int output_local = input1_local % input2_local;
// Store the result of the algorithm into the global array
output[thread_index] = output_local;
}
__global__ void add_values_shared(unsigned int* deviceData, const unsigned int elementCount)
{
// Declare externally defined shared memory
__shared__ unsigned int sharedMemory[THREAD_MAX];
// Compute the current thread index
unsigned int threadIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
// Copy data from the device to the shared memory pool (and perform an operation using constant memory)
sharedMemory[threadIndex] = deviceData[threadIndex];
// Perform thread synchronization
__syncthreads();
unsigned int exponentPrimer = threadIndex % 2;
for (unsigned int i = 0; i < 16; ++i)
{
unsigned int currentConstant = ADDITIVE_VALUES[i];
float value = powf(-1, exponentPrimer) * currentConstant;
sharedMemory[threadIndex] += value;
}
__syncthreads();
// Copy the data from the shared memory back to the device
deviceData[threadIndex] = sharedMemory[elementCount - threadIndex - 1];
}
// ----------------------------------------- HOST OPERATIONS -----------------------------------------
// @brief Initialize the Random number generator and ensure it only initializes one time
__host__ void initializeRandomNumbers()
{
if (initializedRNG == 0)
{
srand(time(NULL));
initializedRNG = 1;
}
}
// @brief Generates a series of random numbers for the provided array, given the number of desired numbers
// and the maximum (exclusive) value.
//
// @param [inout] data The data array that will contain the random numbers
// @param [ in] elementCount The number of elements to store in the data array
// @param [ in] max The maximum random number to use (exclusive)
__host__ void generateRandomNumbers( unsigned int* data,
const unsigned int elementCount,
const unsigned int max)
{
// Check to make sure the RNG has been initialized
if (initializedRNG == 0)
{
// If not, initialize the RNG
initializeRandomNumbers();
}
// Generate random data between 0 and the provided maximum value
for (unsigned int i = 0; i < elementCount; ++i)
{
data[i] = rand() % max;
}
}
__host__ void run_gpu_algorithm(int blockCount, int threadCount)
{
// Step 1: Compute the size of the device array based on the block and thread/per block counts
unsigned int elementCount = threadCount * blockCount;
unsigned int deviceSize = UINT_SIZE * elementCount;
// Step 2: Allocate the necessary host memory (two input arrays and an output array
// (use malloc for the input, and calloc for the output since we want to modify
// the contents of the input PRIOR to executing the GPU kernels, but we want to
// initialize the output to 0 before copying the device output over)
unsigned int* hostInput1 = (unsigned int*) malloc(deviceSize);
unsigned int* hostInput2 = (unsigned int*) malloc(deviceSize);
unsigned int* hostOutput = (unsigned int*) calloc(elementCount, UINT_SIZE);
// Step 3: Populate the input arrays with random data, using the device size as the maximum value
// (the device size is used purely as a convenience number, and to ensure that the maximum
// value has the potential to change between each run)
generateRandomNumbers(hostInput1, elementCount, deviceSize);
generateRandomNumbers(hostInput2, elementCount, deviceSize);
// Step 4: Allocate the GPU memory arrays
unsigned int* deviceInput1;
unsigned int* deviceInput2;
unsigned int* deviceOutput;
cudaMalloc((void**)&deviceInput1, deviceSize);
cudaMalloc((void**)&deviceInput2, deviceSize);
cudaMalloc((void**)&deviceOutput, deviceSize);
// Step 5: Populate the GPU input with the host input data
cudaMemcpy(deviceInput1, hostInput1, deviceSize, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, deviceSize, cudaMemcpyHostToDevice);
// Step 6: Set the GPU output with all zeros
cudaMemset(deviceOutput, INITIAL_DATA, deviceSize);
// Step 7: Initialize the CUDA event start/stop timers for benchmarking
cudaEvent_t stopLocalEvent;
cudaEvent_t stopGlobalEvent;
cudaEvent_t stopSharedEvent;
cudaEvent_t startLocalEvent;
cudaEvent_t startGlobalEvent;
cudaEvent_t startSharedEvent;
cudaEventCreate(&stopLocalEvent);
cudaEventCreate(&stopGlobalEvent);
cudaEventCreate(&stopSharedEvent);
cudaEventCreate(&startLocalEvent);
cudaEventCreate(&startGlobalEvent);
cudaEventCreate(&startSharedEvent);
// Step 8: Invoke the global algorithm kernel with recording enabled
cudaEventRecord(startGlobalEvent);
globalModularDivide<<<blockCount, threadCount>>>(deviceInput1, deviceInput2, deviceOutput);
cudaEventRecord(stopGlobalEvent);
cudaThreadSynchronize();
cudaGetLastError();
// Step 9: Retrieve the output from the global algorithm kernel
cudaMemcpy(hostOutput, deviceOutput, deviceSize, cudaMemcpyDeviceToHost);
// Step 10: Obtain the ms duration for the global algorithm
cudaEventSynchronize(stopGlobalEvent);
float globalTimeMS = 0.0f;
cudaEventElapsedTime(&globalTimeMS, startGlobalEvent, stopGlobalEvent);
// Step 11: Invoke the local algorithm kernel with recording enabled
cudaEventRecord(startLocalEvent);
localModularDivide<<<blockCount, threadCount>>>(deviceInput1, deviceInput2, deviceOutput);
cudaEventRecord(stopLocalEvent);
cudaThreadSynchronize();
cudaGetLastError();
// Step 12: Retrieve the output from the local algorithm kernel
cudaMemcpy(hostOutput, deviceOutput, deviceSize, cudaMemcpyDeviceToHost);
// Step 13: Obtain the ms duration for the local algorithm
cudaEventSynchronize(stopLocalEvent);
float localTimeMS = 0.0f;
cudaEventElapsedTime(&localTimeMS, startLocalEvent, stopLocalEvent);
// Step 14: Upload the constant memory values to the device:
unsigned int* constantMemory = (unsigned int*) malloc(deviceSize);
generateRandomNumbers(constantMemory, elementCount, deviceSize);
cudaMemcpyToSymbol(ADDITIVE_VALUES, constantMemory, UINT_SIZE * 16);
// Step 15: Invoke the shared algorithm kernel with recording enabled
cudaEventRecord(startSharedEvent);
add_values_shared<<<blockCount, threadCount>>>(deviceOutput, elementCount);
cudaEventRecord(stopSharedEvent);
cudaThreadSynchronize();
cudaGetLastError();
// Step 16: Retrieve the output from the global algorithm kernel
cudaMemcpy(hostOutput, deviceOutput, deviceSize, cudaMemcpyDeviceToHost);
// Step 17: Obtain the ms duration for the global algorithm
cudaEventSynchronize(stopSharedEvent);
float sharedTimeMS = 0.0f;
cudaEventElapsedTime(&sharedTimeMS, startSharedEvent, stopSharedEvent);
// Step 18: Display the results of the two operations
printf("Block Count: %d\t Threads Per Block: %d\t", blockCount, threadCount);
printf("Global Duration: %2f ms\t", globalTimeMS);
printf("Shared Duration: %2f ms\t", sharedTimeMS);
printf("Local Duration: %2f ms\n", localTimeMS );
// Step 19: Free device memory:
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
// Step 20: Free host memory
free(hostInput1);
free(hostInput2);
free(hostOutput);
// Step 21: Free constant memory
free(constantMemory);
}
/// @brief determine if the provided number is a power of two
///
/// @param [in] number The number to validate
///
/// @return True if the provided number is a power of two, false otherwise
__host__ bool isPowerOfTwo(const int number)
{
// Initialize a mask a 00000000 00000000 00000000 00000001 (on 32-bit machines)
int mask = 0x1;
// Iterate over each of the bits in the mask, left shifting by one to
// iterate to the next power of two
for (unsigned int i = 0; i < sizeof(int) * 8; ++i, mask = mask << 1)
{
// Compute the resulting masked value
int maskedValue = number & mask;
// If the computed value is non-zero and is not the provided number,
// the provided number is not a power of two:
//
// For example, 3 would not be a power of two:
// 3 = 00000000 00000000 00000000 00000011
// mask = 00000000 00000000 00000000 00000010
// maskedValue = 00000000 00000000 00000000 00000010
// makedValue is non-zero (2), but is also not provided number (2 != 3)
if (maskedValue != 0 && maskedValue != number)
{
return false;
}
// If the maskedValue is the provided number, then we've confirmed that the
// value is a power of two
if (maskedValue == number)
{
return true;
}
}
// Return false if we've exhausted all possible powers of two the computer can handle
return false;
}
// @brief Display the proper program usage
__host__ void showUsage()
{
printf("Invalid arguments provided. Please see the usage below:\n");
printf(" module_6_jrenga2.exe <bc> <tpb>\n");
printf(" bc - The maximum number of blocks to run with. Must be a positive integer and a power of two.\n");
printf(" tpb - The maximum number of threads per blocks. Must be a positive integer and a power of two.\n");
printf("NOTE: The maximum number of threads (bc * tpb) must be greater than %d \n", THREAD_MIN);
printf(" and less than %d.\n", THREAD_MAX);
printf(" ** TERMINATING **\n");
}
// @brief Main Entry-Point
int main(int argc, char* argv[])
{
// 1. Check the number of arguments.
if (argc != 3)
{
showUsage();
return EXIT_FAILURE;
}
// 2. Attempt to retrieve the integer values of the parameters
// (a value less than or equal to 0 is considered invalid)
int numBlocks = atoi(argv[1]);
if (numBlocks <= 0 || !isPowerOfTwo(numBlocks))
{
showUsage();
return EXIT_FAILURE;
}
int numThreads = atoi(argv[2]);
if (numThreads <= 0 || !isPowerOfTwo(numThreads))
{
showUsage();
return EXIT_FAILURE;
}
int totalThreads = numBlocks * numThreads;
// 2.5 Check to see if the minimum number of threads has been achieved (64)
if (totalThreads < THREAD_MIN || totalThreads > THREAD_MAX)
{
showUsage();
return EXIT_FAILURE;
}
// Do some pre-processing to set up the random number generation
initializedRNG = false;
// Initialize the random numbers
initializeRandomNumbers();
// Iterate from 1 -> numBlocks and 1 -> numThreads to perform metrics on numerous configurations
for (unsigned int blockCount = 1; blockCount <= numBlocks; blockCount = blockCount << 1)
{
for (unsigned int threadCount = 1; threadCount <= numThreads; threadCount = threadCount << 1)
{
run_gpu_algorithm(blockCount, threadCount);
}
}
return EXIT_SUCCESS;
}
|
1,323
|
#include "noise_module_base.cuh"
// A table of 256 random normalized vectors. Each row is an (x, y, z, 0)
// coordinate. The 0 is used as padding so we can use bit shifts to index
// any row in the table. These vectors have an even statistical
// distribution, which improves the quality of the coherent noise
// generated by these vectors. For more information, see "GPU Gems",
// Chapter 5 - Implementing Improved Perlin Noise by Ken Perlin,
// specifically page 76.
__constant__ double device_g_randomVectors[256 * 4] = {
-0.763874, -0.596439, -0.246489, 0.0,
0.396055, 0.904518, -0.158073, 0.0,
-0.499004, -0.8665, -0.0131631, 0.0,
0.468724, -0.824756, 0.316346, 0.0,
0.829598, 0.43195, 0.353816, 0.0,
-0.454473, 0.629497, -0.630228, 0.0,
-0.162349, -0.869962, -0.465628, 0.0,
0.932805, 0.253451, 0.256198, 0.0,
-0.345419, 0.927299, -0.144227, 0.0,
-0.715026, -0.293698, -0.634413, 0.0,
-0.245997, 0.717467, -0.651711, 0.0,
-0.967409, -0.250435, -0.037451, 0.0,
0.901729, 0.397108, -0.170852, 0.0,
0.892657, -0.0720622, -0.444938, 0.0,
0.0260084, -0.0361701, 0.999007, 0.0,
0.949107, -0.19486, 0.247439, 0.0,
0.471803, -0.807064, -0.355036, 0.0,
0.879737, 0.141845, 0.453809, 0.0,
0.570747, 0.696415, 0.435033, 0.0,
-0.141751, -0.988233, -0.0574584, 0.0,
-0.58219, -0.0303005, 0.812488, 0.0,
-0.60922, 0.239482, -0.755975, 0.0,
0.299394, -0.197066, -0.933557, 0.0,
-0.851615, -0.220702, -0.47544, 0.0,
0.848886, 0.341829, -0.403169, 0.0,
-0.156129, -0.687241, 0.709453, 0.0,
-0.665651, 0.626724, 0.405124, 0.0,
0.595914, -0.674582, 0.43569, 0.0,
0.171025, -0.509292, 0.843428, 0.0,
0.78605, 0.536414, -0.307222, 0.0,
0.18905, -0.791613, 0.581042, 0.0,
-0.294916, 0.844994, 0.446105, 0.0,
0.342031, -0.58736, -0.7335, 0.0,
0.57155, 0.7869, 0.232635, 0.0,
0.885026, -0.408223, 0.223791, 0.0,
-0.789518, 0.571645, 0.223347, 0.0,
0.774571, 0.31566, 0.548087, 0.0,
-0.79695, -0.0433603, -0.602487, 0.0,
-0.142425, -0.473249, -0.869339, 0.0,
-0.0698838, 0.170442, 0.982886, 0.0,
0.687815, -0.484748, 0.540306, 0.0,
0.543703, -0.534446, -0.647112, 0.0,
0.97186, 0.184391, -0.146588, 0.0,
0.707084, 0.485713, -0.513921, 0.0,
0.942302, 0.331945, 0.043348, 0.0,
0.499084, 0.599922, 0.625307, 0.0,
-0.289203, 0.211107, 0.9337, 0.0,
0.412433, -0.71667, -0.56239, 0.0,
0.87721, -0.082816, 0.47291, 0.0,
-0.420685, -0.214278, 0.881538, 0.0,
0.752558, -0.0391579, 0.657361, 0.0,
0.0765725, -0.996789, 0.0234082, 0.0,
-0.544312, -0.309435, -0.779727, 0.0,
-0.455358, -0.415572, 0.787368, 0.0,
-0.874586, 0.483746, 0.0330131, 0.0,
0.245172, -0.0838623, 0.965846, 0.0,
0.382293, -0.432813, 0.81641, 0.0,
-0.287735, -0.905514, 0.311853, 0.0,
-0.667704, 0.704955, -0.239186, 0.0,
0.717885, -0.464002, -0.518983, 0.0,
0.976342, -0.214895, 0.0240053, 0.0,
-0.0733096, -0.921136, 0.382276, 0.0,
-0.986284, 0.151224, -0.0661379, 0.0,
-0.899319, -0.429671, 0.0812908, 0.0,
0.652102, -0.724625, 0.222893, 0.0,
0.203761, 0.458023, -0.865272, 0.0,
-0.030396, 0.698724, -0.714745, 0.0,
-0.460232, 0.839138, 0.289887, 0.0,
-0.0898602, 0.837894, 0.538386, 0.0,
-0.731595, 0.0793784, 0.677102, 0.0,
-0.447236, -0.788397, 0.422386, 0.0,
0.186481, 0.645855, -0.740335, 0.0,
-0.259006, 0.935463, 0.240467, 0.0,
0.445839, 0.819655, -0.359712, 0.0,
0.349962, 0.755022, -0.554499, 0.0,
-0.997078, -0.0359577, 0.0673977, 0.0,
-0.431163, -0.147516, -0.890133, 0.0,
0.299648, -0.63914, 0.708316, 0.0,
0.397043, 0.566526, -0.722084, 0.0,
-0.502489, 0.438308, -0.745246, 0.0,
0.0687235, 0.354097, 0.93268, 0.0,
-0.0476651, -0.462597, 0.885286, 0.0,
-0.221934, 0.900739, -0.373383, 0.0,
-0.956107, -0.225676, 0.186893, 0.0,
-0.187627, 0.391487, -0.900852, 0.0,
-0.224209, -0.315405, 0.92209, 0.0,
-0.730807, -0.537068, 0.421283, 0.0,
-0.0353135, -0.816748, 0.575913, 0.0,
-0.941391, 0.176991, -0.287153, 0.0,
-0.154174, 0.390458, 0.90762, 0.0,
-0.283847, 0.533842, 0.796519, 0.0,
-0.482737, -0.850448, 0.209052, 0.0,
-0.649175, 0.477748, 0.591886, 0.0,
0.885373, -0.405387, -0.227543, 0.0,
-0.147261, 0.181623, -0.972279, 0.0,
0.0959236, -0.115847, -0.988624, 0.0,
-0.89724, -0.191348, 0.397928, 0.0,
0.903553, -0.428461, -0.00350461, 0.0,
0.849072, -0.295807, -0.437693, 0.0,
0.65551, 0.741754, -0.141804, 0.0,
0.61598, -0.178669, 0.767232, 0.0,
0.0112967, 0.932256, -0.361623, 0.0,
-0.793031, 0.258012, 0.551845, 0.0,
0.421933, 0.454311, 0.784585, 0.0,
-0.319993, 0.0401618, -0.946568, 0.0,
-0.81571, 0.551307, -0.175151, 0.0,
-0.377644, 0.00322313, 0.925945, 0.0,
0.129759, -0.666581, -0.734052, 0.0,
0.601901, -0.654237, -0.457919, 0.0,
-0.927463, -0.0343576, -0.372334, 0.0,
-0.438663, -0.868301, -0.231578, 0.0,
-0.648845, -0.749138, -0.133387, 0.0,
0.507393, -0.588294, 0.629653, 0.0,
0.726958, 0.623665, 0.287358, 0.0,
0.411159, 0.367614, -0.834151, 0.0,
0.806333, 0.585117, -0.0864016, 0.0,
0.263935, -0.880876, 0.392932, 0.0,
0.421546, -0.201336, 0.884174, 0.0,
-0.683198, -0.569557, -0.456996, 0.0,
-0.117116, -0.0406654, -0.992285, 0.0,
-0.643679, -0.109196, -0.757465, 0.0,
-0.561559, -0.62989, 0.536554, 0.0,
0.0628422, 0.104677, -0.992519, 0.0,
0.480759, -0.2867, -0.828658, 0.0,
-0.228559, -0.228965, -0.946222, 0.0,
-0.10194, -0.65706, -0.746914, 0.0,
0.0689193, -0.678236, 0.731605, 0.0,
0.401019, -0.754026, 0.52022, 0.0,
-0.742141, 0.547083, -0.387203, 0.0,
-0.00210603, -0.796417, -0.604745, 0.0,
0.296725, -0.409909, -0.862513, 0.0,
-0.260932, -0.798201, 0.542945, 0.0,
-0.641628, 0.742379, 0.192838, 0.0,
-0.186009, -0.101514, 0.97729, 0.0,
0.106711, -0.962067, 0.251079, 0.0,
-0.743499, 0.30988, -0.592607, 0.0,
-0.795853, -0.605066, -0.0226607, 0.0,
-0.828661, -0.419471, -0.370628, 0.0,
0.0847218, -0.489815, -0.8677, 0.0,
-0.381405, 0.788019, -0.483276, 0.0,
0.282042, -0.953394, 0.107205, 0.0,
0.530774, 0.847413, 0.0130696, 0.0,
0.0515397, 0.922524, 0.382484, 0.0,
-0.631467, -0.709046, 0.313852, 0.0,
0.688248, 0.517273, 0.508668, 0.0,
0.646689, -0.333782, -0.685845, 0.0,
-0.932528, -0.247532, -0.262906, 0.0,
0.630609, 0.68757, -0.359973, 0.0,
0.577805, -0.394189, 0.714673, 0.0,
-0.887833, -0.437301, -0.14325, 0.0,
0.690982, 0.174003, 0.701617, 0.0,
-0.866701, 0.0118182, 0.498689, 0.0,
-0.482876, 0.727143, 0.487949, 0.0,
-0.577567, 0.682593, -0.447752, 0.0,
0.373768, 0.0982991, 0.922299, 0.0,
0.170744, 0.964243, -0.202687, 0.0,
0.993654, -0.035791, -0.106632, 0.0,
0.587065, 0.4143, -0.695493, 0.0,
-0.396509, 0.26509, -0.878924, 0.0,
-0.0866853, 0.83553, -0.542563, 0.0,
0.923193, 0.133398, -0.360443, 0.0,
0.00379108, -0.258618, 0.965972, 0.0,
0.239144, 0.245154, -0.939526, 0.0,
0.758731, -0.555871, 0.33961, 0.0,
0.295355, 0.309513, 0.903862, 0.0,
0.0531222, -0.91003, -0.411124, 0.0,
0.270452, 0.0229439, -0.96246, 0.0,
0.563634, 0.0324352, 0.825387, 0.0,
0.156326, 0.147392, 0.976646, 0.0,
-0.0410141, 0.981824, 0.185309, 0.0,
-0.385562, -0.576343, -0.720535, 0.0,
0.388281, 0.904441, 0.176702, 0.0,
0.945561, -0.192859, -0.262146, 0.0,
0.844504, 0.520193, 0.127325, 0.0,
0.0330893, 0.999121, -0.0257505, 0.0,
-0.592616, -0.482475, -0.644999, 0.0,
0.539471, 0.631024, -0.557476, 0.0,
0.655851, -0.027319, -0.754396, 0.0,
0.274465, 0.887659, 0.369772, 0.0,
-0.123419, 0.975177, -0.183842, 0.0,
-0.223429, 0.708045, 0.66989, 0.0,
-0.908654, 0.196302, 0.368528, 0.0,
-0.95759, -0.00863708, 0.288005, 0.0,
0.960535, 0.030592, 0.276472, 0.0,
-0.413146, 0.907537, 0.0754161, 0.0,
-0.847992, 0.350849, -0.397259, 0.0,
0.614736, 0.395841, 0.68221, 0.0,
-0.503504, -0.666128, -0.550234, 0.0,
-0.268833, -0.738524, -0.618314, 0.0,
0.792737, -0.60001, -0.107502, 0.0,
-0.637582, 0.508144, -0.579032, 0.0,
0.750105, 0.282165, -0.598101, 0.0,
-0.351199, -0.392294, -0.850155, 0.0,
0.250126, -0.960993, -0.118025, 0.0,
-0.732341, 0.680909, -0.0063274, 0.0,
-0.760674, -0.141009, 0.633634, 0.0,
0.222823, -0.304012, 0.926243, 0.0,
0.209178, 0.505671, 0.836984, 0.0,
0.757914, -0.56629, -0.323857, 0.0,
-0.782926, -0.339196, 0.52151, 0.0,
-0.462952, 0.585565, 0.665424, 0.0,
0.61879, 0.194119, -0.761194, 0.0,
0.741388, -0.276743, 0.611357, 0.0,
0.707571, 0.702621, 0.0752872, 0.0,
0.156562, 0.819977, 0.550569, 0.0,
-0.793606, 0.440216, 0.42, 0.0,
0.234547, 0.885309, -0.401517, 0.0,
0.132598, 0.80115, -0.58359, 0.0,
-0.377899, -0.639179, 0.669808, 0.0,
-0.865993, -0.396465, 0.304748, 0.0,
-0.624815, -0.44283, 0.643046, 0.0,
-0.485705, 0.825614, -0.287146, 0.0,
-0.971788, 0.175535, 0.157529, 0.0,
-0.456027, 0.392629, 0.798675, 0.0,
-0.0104443, 0.521623, -0.853112, 0.0,
-0.660575, -0.74519, 0.091282, 0.0,
-0.0157698, -0.307475, -0.951425, 0.0,
-0.603467, -0.250192, 0.757121, 0.0,
0.506876, 0.25006, 0.824952, 0.0,
0.255404, 0.966794, 0.00884498, 0.0,
0.466764, -0.874228, -0.133625, 0.0,
0.475077, -0.0682351, -0.877295, 0.0,
-0.224967, -0.938972, -0.260233, 0.0,
-0.377929, -0.814757, -0.439705, 0.0,
-0.305847, 0.542333, -0.782517, 0.0,
0.26658, -0.902905, -0.337191, 0.0,
0.0275773, 0.322158, -0.946284, 0.0,
0.0185422, 0.716349, 0.697496, 0.0,
-0.20483, 0.978416, 0.0273371, 0.0,
-0.898276, 0.373969, 0.230752, 0.0,
-0.00909378, 0.546594, 0.837349, 0.0,
0.6602, -0.751089, 0.000959236, 0.0,
0.855301, -0.303056, 0.420259, 0.0,
0.797138, 0.0623013, -0.600574, 0.0,
0.48947, -0.866813, 0.0951509, 0.0,
0.251142, 0.674531, 0.694216, 0.0,
-0.578422, -0.737373, -0.348867, 0.0,
-0.254689, -0.514807, 0.818601, 0.0,
0.374972, 0.761612, 0.528529, 0.0,
0.640303, -0.734271, -0.225517, 0.0,
-0.638076, 0.285527, 0.715075, 0.0,
0.772956, -0.15984, -0.613995, 0.0,
0.798217, -0.590628, 0.118356, 0.0,
-0.986276, -0.0578337, -0.154644, 0.0,
-0.312988, -0.94549, 0.0899272, 0.0,
-0.497338, 0.178325, 0.849032, 0.0,
-0.101136, -0.981014, 0.165477, 0.0,
-0.521688, 0.0553434, -0.851339, 0.0,
-0.786182, -0.583814, 0.202678, 0.0,
-0.565191, 0.821858, -0.0714658, 0.0,
0.437895, 0.152598, -0.885981, 0.0,
-0.92394, 0.353436, -0.14635, 0.0,
0.212189, -0.815162, -0.538969, 0.0,
-0.859262, 0.143405, -0.491024, 0.0,
0.991353, 0.112814, 0.0670273, 0.0,
0.0337884, -0.979891, -0.196654, 0.0
};
#ifdef __CUDA_ARCH__
#else
const double host_g_randomVectors[256 * 4] = {
-0.763874, -0.596439, -0.246489, 0.0,
0.396055, 0.904518, -0.158073, 0.0,
-0.499004, -0.8665, -0.0131631, 0.0,
0.468724, -0.824756, 0.316346, 0.0,
0.829598, 0.43195, 0.353816, 0.0,
-0.454473, 0.629497, -0.630228, 0.0,
-0.162349, -0.869962, -0.465628, 0.0,
0.932805, 0.253451, 0.256198, 0.0,
-0.345419, 0.927299, -0.144227, 0.0,
-0.715026, -0.293698, -0.634413, 0.0,
-0.245997, 0.717467, -0.651711, 0.0,
-0.967409, -0.250435, -0.037451, 0.0,
0.901729, 0.397108, -0.170852, 0.0,
0.892657, -0.0720622, -0.444938, 0.0,
0.0260084, -0.0361701, 0.999007, 0.0,
0.949107, -0.19486, 0.247439, 0.0,
0.471803, -0.807064, -0.355036, 0.0,
0.879737, 0.141845, 0.453809, 0.0,
0.570747, 0.696415, 0.435033, 0.0,
-0.141751, -0.988233, -0.0574584, 0.0,
-0.58219, -0.0303005, 0.812488, 0.0,
-0.60922, 0.239482, -0.755975, 0.0,
0.299394, -0.197066, -0.933557, 0.0,
-0.851615, -0.220702, -0.47544, 0.0,
0.848886, 0.341829, -0.403169, 0.0,
-0.156129, -0.687241, 0.709453, 0.0,
-0.665651, 0.626724, 0.405124, 0.0,
0.595914, -0.674582, 0.43569, 0.0,
0.171025, -0.509292, 0.843428, 0.0,
0.78605, 0.536414, -0.307222, 0.0,
0.18905, -0.791613, 0.581042, 0.0,
-0.294916, 0.844994, 0.446105, 0.0,
0.342031, -0.58736, -0.7335, 0.0,
0.57155, 0.7869, 0.232635, 0.0,
0.885026, -0.408223, 0.223791, 0.0,
-0.789518, 0.571645, 0.223347, 0.0,
0.774571, 0.31566, 0.548087, 0.0,
-0.79695, -0.0433603, -0.602487, 0.0,
-0.142425, -0.473249, -0.869339, 0.0,
-0.0698838, 0.170442, 0.982886, 0.0,
0.687815, -0.484748, 0.540306, 0.0,
0.543703, -0.534446, -0.647112, 0.0,
0.97186, 0.184391, -0.146588, 0.0,
0.707084, 0.485713, -0.513921, 0.0,
0.942302, 0.331945, 0.043348, 0.0,
0.499084, 0.599922, 0.625307, 0.0,
-0.289203, 0.211107, 0.9337, 0.0,
0.412433, -0.71667, -0.56239, 0.0,
0.87721, -0.082816, 0.47291, 0.0,
-0.420685, -0.214278, 0.881538, 0.0,
0.752558, -0.0391579, 0.657361, 0.0,
0.0765725, -0.996789, 0.0234082, 0.0,
-0.544312, -0.309435, -0.779727, 0.0,
-0.455358, -0.415572, 0.787368, 0.0,
-0.874586, 0.483746, 0.0330131, 0.0,
0.245172, -0.0838623, 0.965846, 0.0,
0.382293, -0.432813, 0.81641, 0.0,
-0.287735, -0.905514, 0.311853, 0.0,
-0.667704, 0.704955, -0.239186, 0.0,
0.717885, -0.464002, -0.518983, 0.0,
0.976342, -0.214895, 0.0240053, 0.0,
-0.0733096, -0.921136, 0.382276, 0.0,
-0.986284, 0.151224, -0.0661379, 0.0,
-0.899319, -0.429671, 0.0812908, 0.0,
0.652102, -0.724625, 0.222893, 0.0,
0.203761, 0.458023, -0.865272, 0.0,
-0.030396, 0.698724, -0.714745, 0.0,
-0.460232, 0.839138, 0.289887, 0.0,
-0.0898602, 0.837894, 0.538386, 0.0,
-0.731595, 0.0793784, 0.677102, 0.0,
-0.447236, -0.788397, 0.422386, 0.0,
0.186481, 0.645855, -0.740335, 0.0,
-0.259006, 0.935463, 0.240467, 0.0,
0.445839, 0.819655, -0.359712, 0.0,
0.349962, 0.755022, -0.554499, 0.0,
-0.997078, -0.0359577, 0.0673977, 0.0,
-0.431163, -0.147516, -0.890133, 0.0,
0.299648, -0.63914, 0.708316, 0.0,
0.397043, 0.566526, -0.722084, 0.0,
-0.502489, 0.438308, -0.745246, 0.0,
0.0687235, 0.354097, 0.93268, 0.0,
-0.0476651, -0.462597, 0.885286, 0.0,
-0.221934, 0.900739, -0.373383, 0.0,
-0.956107, -0.225676, 0.186893, 0.0,
-0.187627, 0.391487, -0.900852, 0.0,
-0.224209, -0.315405, 0.92209, 0.0,
-0.730807, -0.537068, 0.421283, 0.0,
-0.0353135, -0.816748, 0.575913, 0.0,
-0.941391, 0.176991, -0.287153, 0.0,
-0.154174, 0.390458, 0.90762, 0.0,
-0.283847, 0.533842, 0.796519, 0.0,
-0.482737, -0.850448, 0.209052, 0.0,
-0.649175, 0.477748, 0.591886, 0.0,
0.885373, -0.405387, -0.227543, 0.0,
-0.147261, 0.181623, -0.972279, 0.0,
0.0959236, -0.115847, -0.988624, 0.0,
-0.89724, -0.191348, 0.397928, 0.0,
0.903553, -0.428461, -0.00350461, 0.0,
0.849072, -0.295807, -0.437693, 0.0,
0.65551, 0.741754, -0.141804, 0.0,
0.61598, -0.178669, 0.767232, 0.0,
0.0112967, 0.932256, -0.361623, 0.0,
-0.793031, 0.258012, 0.551845, 0.0,
0.421933, 0.454311, 0.784585, 0.0,
-0.319993, 0.0401618, -0.946568, 0.0,
-0.81571, 0.551307, -0.175151, 0.0,
-0.377644, 0.00322313, 0.925945, 0.0,
0.129759, -0.666581, -0.734052, 0.0,
0.601901, -0.654237, -0.457919, 0.0,
-0.927463, -0.0343576, -0.372334, 0.0,
-0.438663, -0.868301, -0.231578, 0.0,
-0.648845, -0.749138, -0.133387, 0.0,
0.507393, -0.588294, 0.629653, 0.0,
0.726958, 0.623665, 0.287358, 0.0,
0.411159, 0.367614, -0.834151, 0.0,
0.806333, 0.585117, -0.0864016, 0.0,
0.263935, -0.880876, 0.392932, 0.0,
0.421546, -0.201336, 0.884174, 0.0,
-0.683198, -0.569557, -0.456996, 0.0,
-0.117116, -0.0406654, -0.992285, 0.0,
-0.643679, -0.109196, -0.757465, 0.0,
-0.561559, -0.62989, 0.536554, 0.0,
0.0628422, 0.104677, -0.992519, 0.0,
0.480759, -0.2867, -0.828658, 0.0,
-0.228559, -0.228965, -0.946222, 0.0,
-0.10194, -0.65706, -0.746914, 0.0,
0.0689193, -0.678236, 0.731605, 0.0,
0.401019, -0.754026, 0.52022, 0.0,
-0.742141, 0.547083, -0.387203, 0.0,
-0.00210603, -0.796417, -0.604745, 0.0,
0.296725, -0.409909, -0.862513, 0.0,
-0.260932, -0.798201, 0.542945, 0.0,
-0.641628, 0.742379, 0.192838, 0.0,
-0.186009, -0.101514, 0.97729, 0.0,
0.106711, -0.962067, 0.251079, 0.0,
-0.743499, 0.30988, -0.592607, 0.0,
-0.795853, -0.605066, -0.0226607, 0.0,
-0.828661, -0.419471, -0.370628, 0.0,
0.0847218, -0.489815, -0.8677, 0.0,
-0.381405, 0.788019, -0.483276, 0.0,
0.282042, -0.953394, 0.107205, 0.0,
0.530774, 0.847413, 0.0130696, 0.0,
0.0515397, 0.922524, 0.382484, 0.0,
-0.631467, -0.709046, 0.313852, 0.0,
0.688248, 0.517273, 0.508668, 0.0,
0.646689, -0.333782, -0.685845, 0.0,
-0.932528, -0.247532, -0.262906, 0.0,
0.630609, 0.68757, -0.359973, 0.0,
0.577805, -0.394189, 0.714673, 0.0,
-0.887833, -0.437301, -0.14325, 0.0,
0.690982, 0.174003, 0.701617, 0.0,
-0.866701, 0.0118182, 0.498689, 0.0,
-0.482876, 0.727143, 0.487949, 0.0,
-0.577567, 0.682593, -0.447752, 0.0,
0.373768, 0.0982991, 0.922299, 0.0,
0.170744, 0.964243, -0.202687, 0.0,
0.993654, -0.035791, -0.106632, 0.0,
0.587065, 0.4143, -0.695493, 0.0,
-0.396509, 0.26509, -0.878924, 0.0,
-0.0866853, 0.83553, -0.542563, 0.0,
0.923193, 0.133398, -0.360443, 0.0,
0.00379108, -0.258618, 0.965972, 0.0,
0.239144, 0.245154, -0.939526, 0.0,
0.758731, -0.555871, 0.33961, 0.0,
0.295355, 0.309513, 0.903862, 0.0,
0.0531222, -0.91003, -0.411124, 0.0,
0.270452, 0.0229439, -0.96246, 0.0,
0.563634, 0.0324352, 0.825387, 0.0,
0.156326, 0.147392, 0.976646, 0.0,
-0.0410141, 0.981824, 0.185309, 0.0,
-0.385562, -0.576343, -0.720535, 0.0,
0.388281, 0.904441, 0.176702, 0.0,
0.945561, -0.192859, -0.262146, 0.0,
0.844504, 0.520193, 0.127325, 0.0,
0.0330893, 0.999121, -0.0257505, 0.0,
-0.592616, -0.482475, -0.644999, 0.0,
0.539471, 0.631024, -0.557476, 0.0,
0.655851, -0.027319, -0.754396, 0.0,
0.274465, 0.887659, 0.369772, 0.0,
-0.123419, 0.975177, -0.183842, 0.0,
-0.223429, 0.708045, 0.66989, 0.0,
-0.908654, 0.196302, 0.368528, 0.0,
-0.95759, -0.00863708, 0.288005, 0.0,
0.960535, 0.030592, 0.276472, 0.0,
-0.413146, 0.907537, 0.0754161, 0.0,
-0.847992, 0.350849, -0.397259, 0.0,
0.614736, 0.395841, 0.68221, 0.0,
-0.503504, -0.666128, -0.550234, 0.0,
-0.268833, -0.738524, -0.618314, 0.0,
0.792737, -0.60001, -0.107502, 0.0,
-0.637582, 0.508144, -0.579032, 0.0,
0.750105, 0.282165, -0.598101, 0.0,
-0.351199, -0.392294, -0.850155, 0.0,
0.250126, -0.960993, -0.118025, 0.0,
-0.732341, 0.680909, -0.0063274, 0.0,
-0.760674, -0.141009, 0.633634, 0.0,
0.222823, -0.304012, 0.926243, 0.0,
0.209178, 0.505671, 0.836984, 0.0,
0.757914, -0.56629, -0.323857, 0.0,
-0.782926, -0.339196, 0.52151, 0.0,
-0.462952, 0.585565, 0.665424, 0.0,
0.61879, 0.194119, -0.761194, 0.0,
0.741388, -0.276743, 0.611357, 0.0,
0.707571, 0.702621, 0.0752872, 0.0,
0.156562, 0.819977, 0.550569, 0.0,
-0.793606, 0.440216, 0.42, 0.0,
0.234547, 0.885309, -0.401517, 0.0,
0.132598, 0.80115, -0.58359, 0.0,
-0.377899, -0.639179, 0.669808, 0.0,
-0.865993, -0.396465, 0.304748, 0.0,
-0.624815, -0.44283, 0.643046, 0.0,
-0.485705, 0.825614, -0.287146, 0.0,
-0.971788, 0.175535, 0.157529, 0.0,
-0.456027, 0.392629, 0.798675, 0.0,
-0.0104443, 0.521623, -0.853112, 0.0,
-0.660575, -0.74519, 0.091282, 0.0,
-0.0157698, -0.307475, -0.951425, 0.0,
-0.603467, -0.250192, 0.757121, 0.0,
0.506876, 0.25006, 0.824952, 0.0,
0.255404, 0.966794, 0.00884498, 0.0,
0.466764, -0.874228, -0.133625, 0.0,
0.475077, -0.0682351, -0.877295, 0.0,
-0.224967, -0.938972, -0.260233, 0.0,
-0.377929, -0.814757, -0.439705, 0.0,
-0.305847, 0.542333, -0.782517, 0.0,
0.26658, -0.902905, -0.337191, 0.0,
0.0275773, 0.322158, -0.946284, 0.0,
0.0185422, 0.716349, 0.697496, 0.0,
-0.20483, 0.978416, 0.0273371, 0.0,
-0.898276, 0.373969, 0.230752, 0.0,
-0.00909378, 0.546594, 0.837349, 0.0,
0.6602, -0.751089, 0.000959236, 0.0,
0.855301, -0.303056, 0.420259, 0.0,
0.797138, 0.0623013, -0.600574, 0.0,
0.48947, -0.866813, 0.0951509, 0.0,
0.251142, 0.674531, 0.694216, 0.0,
-0.578422, -0.737373, -0.348867, 0.0,
-0.254689, -0.514807, 0.818601, 0.0,
0.374972, 0.761612, 0.528529, 0.0,
0.640303, -0.734271, -0.225517, 0.0,
-0.638076, 0.285527, 0.715075, 0.0,
0.772956, -0.15984, -0.613995, 0.0,
0.798217, -0.590628, 0.118356, 0.0,
-0.986276, -0.0578337, -0.154644, 0.0,
-0.312988, -0.94549, 0.0899272, 0.0,
-0.497338, 0.178325, 0.849032, 0.0,
-0.101136, -0.981014, 0.165477, 0.0,
-0.521688, 0.0553434, -0.851339, 0.0,
-0.786182, -0.583814, 0.202678, 0.0,
-0.565191, 0.821858, -0.0714658, 0.0,
0.437895, 0.152598, -0.885981, 0.0,
-0.92394, 0.353436, -0.14635, 0.0,
0.212189, -0.815162, -0.538969, 0.0,
-0.859262, 0.143405, -0.491024, 0.0,
0.991353, 0.112814, 0.0670273, 0.0,
0.0337884, -0.979891, -0.196654, 0.0
};
#endif
double NoiseModule::g_randomVectors(int i) {
#ifdef __CUDA_ARCH__
return device_g_randomVectors[i];
#else
return host_g_randomVectors[i];
#endif
}
|
1,324
|
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 128 // 2^7
#define BLOCKS 1024 // 2^10
#define NUM_VALS THREADS*BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %fs\n", elapsed);
}
int main(int argc, char const *argv[])
{
clock_t start, stop;
int *val = (int*)malloc(NUM_VALS * sizeof(int));
thrust::device_vector<int> values(NUM_VALS);
FILE *f = fopen("reverse_dataset.txt", "r");
for(int i=0;i< NUM_VALS; i++) {
fscanf(f, "%d\n", &val[i]);
}
for(int i=0;i< NUM_VALS; i++) {
values[i] = val[i];
}
printf("Hello\n");
cudaEvent_t estart, estop;
cudaEventCreate( &estart );
cudaEventCreate( &estop );
start = clock();
cudaEventRecord( estart, 0 );
thrust::sort(values.begin(), values.end());
cudaEventRecord( estop, 0 ) ;
cudaEventSynchronize( estop );
stop = clock();
float elapsedTime;
cudaEventElapsedTime( &elapsedTime,
estart, estop ) ;
for(int i=0;i< NUM_VALS; i++) {
val[i] = values[i];
}
for (int i = 0; i < 20; ++i)
{
printf("%d\n", val[i]);
}
printf("Elapsed time: %f\n", elapsedTime);
return 0;
}
|
1,325
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdbool.h>
static int grid_array[5]={5,9,16,23,30};
static int block_array[5]={2,3,5,10,12};
static FILE *pointerToFile;
__device__
static void calculate(int *readingArray, int* writingArray, double *weights, int n ,int current,int xAxes, int yAxes){
double Sum = 0;
if(current < n*n)
{
// loop through all the points that affect
for(int p=-2;p<3;p++){
for(int q=-2;q<3;q++){
Sum += weights[(p+2)*5+(q+2)] * readingArray[((p + yAxes + n) % n) * n + ( q + xAxes + n) % n];
// index properly in order to include the wrap points
// add the weight to Sum
}
}
// check to decide which value the current spin should take
if(Sum > 0.00001)// set to 0.000001 in order to take into account
// floating points
writingArray[current] = 1;
else if(Sum < -0.00001)
writingArray[current] = -1;
else // if it is zero then let the value remain the same
writingArray[current] = readingArray[current];
}
}
// cuda function to parallelize the spin calculation
__global__ void spinCalculation(int n, double * gpuWeights,int *gpuG,int *gpuGTemp,int i,int block,int looper) {
// variable to hold the sum of the weights
int current = blockIdx.x * block * block + threadIdx.x; // calculation of the current index
int xAxes;
int yAxes;
for(int q=0;q<looper;q++)
{
// switch the i%2 which is the current number of iretarion
// so periodically we will be writing to gpuGTemp and then to gpuG
switch (i%2) {
case 0:
xAxes=(current*looper+q)%n;
yAxes=(current*looper+q)/n;
calculate(gpuG,gpuGTemp,gpuWeights,n,current*looper+q,xAxes,yAxes);
break;
// here everything is the same with the difference that is reading from the gpuGTemp array
// and write to the gpuG
case 1:
xAxes=(current*looper+q)%n;
yAxes=(current*looper+q)/n;
calculate(gpuGTemp,gpuG,gpuWeights,n,current*looper+q,xAxes,yAxes);
break;
}
}
}
void takeBinData(int *array, FILE *file,int n){
if (file==NULL)
{
printf("error opening file");
exit(1);
}
fread(array,sizeof(int),n*n,file);
fclose(file);
}
void ising (int *G, double *w, int k, int n,int grid ,int block)
{
int looper= n*n/(grid*grid*block*block) + 1;
double *weights;
cudaMalloc(&weights,sizeof(double)*25);
cudaMemcpy(weights,w,25*sizeof(double),cudaMemcpyHostToDevice);
int *tempG=(int *) malloc(sizeof(int)*n*n);
memcpy(tempG,G,n*n*sizeof(int));
int *gpuTempG;
cudaMalloc(&gpuTempG,n*n*sizeof(int));
int *gpuG;
cudaMalloc(&gpuG,n*n*sizeof(int));
cudaMemcpy(gpuTempG,tempG,n*n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gpuG,G,n*n*sizeof(int),cudaMemcpyHostToDevice);
for(int i=0;i<k;i++){
spinCalculation<<<grid*grid,block*block>>>(n,weights,gpuG,gpuTempG,i,block,looper);
cudaDeviceSynchronize();
}
if(k%2==1){
cudaMemcpy(G,gpuTempG,n*n*sizeof(int),cudaMemcpyDeviceToHost);
}
else{
cudaMemcpy(G,gpuG,n*n*sizeof(int),cudaMemcpyDeviceToHost);
}
cudaFree(gpuG);
cudaFree(gpuTempG);
free(tempG);
}
void checkCorrectness(int *G, int *expectedState,int n,int k){
bool noMistake=true;
int counter=0;
for(int i=0;i<n*n;i++)
{
if(expectedState[i]!=G[i])
{
//printf("wrong in index %d\n",i );
counter++;
noMistake=false;
}
}
if (noMistake) {
printf("ising for k=%d is correct\n",k );
}
else{
printf("ising for k=%d is wrong\n",k );
}
printf("%d\n",counter );
}
int main(){
int n=517;
int grid,block;
for(int i=0;i<5;i++){
for(int j=0;j<5;j++)
{
grid=grid_array[i];
block=block_array[j];
int *initialG=(int *) malloc(sizeof(int)*n*n);
int *G=(int *)malloc(sizeof(int)*n*n);
int *expectedState=(int *)malloc(sizeof(int)*n*n);
FILE *file;
file= fopen("conf-init.bin","rb");
takeBinData(initialG,file,n);
memcpy(G,initialG,sizeof(int)*n*n);
double weights[] = {0.004, 0.016, 0.026, 0.016, 0.004,
0.016, 0.071, 0.117, 0.071, 0.016,
0.026, 0.117, 0, 0.117, 0.026,
0.016, 0.071, 0.117, 0.071, 0.016,
0.004, 0.016, 0.026, 0.016, 0.004};
clock_t start,end;
start=clock();
ising(G,weights,1,n,grid,block);
end=clock();
printf("%lf\n",((double)(end-start))/CLOCKS_PER_SEC);
file=fopen("conf-1.bin","rb");
takeBinData(expectedState,file,n);
checkCorrectness(G,expectedState,n,1);
memcpy(G,initialG,sizeof(int)*n*n);
start=clock();
ising(G,weights,4,n,grid,block);
end=clock();
printf("%lf\n",((double)(end-start))/CLOCKS_PER_SEC );
file =fopen("conf-4.bin","rb");
takeBinData(expectedState,file,n);
checkCorrectness(G,expectedState,n,4);
memcpy(G,initialG,sizeof(int)*n*n);
start=clock();
ising(G,weights,11,n,grid,block);
end=clock();
printf("%lf\n",((double)(end-start))/CLOCKS_PER_SEC );
file =fopen("conf-11.bin","rb");
takeBinData(expectedState,file,n);
checkCorrectness(G,expectedState,n,11);
}
}
return 0;
}
|
1,326
|
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
cudaDeviceSynchronize(); // Wait for the GPU to finish before proceeding
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
}
|
1,327
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void sumSingleBlock(int* d) {
int tid = threadIdx.x;
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) {
// thread must be allowed to write
if(tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
d[pa] += d[pb];
}
}
}
__global__ void sumSingleBlock2(int* d) {
extern __shared__ int dcopy[];
int tid = threadIdx.x;
dcopy[tid * 2] = d[tid * 2];
dcopy[tid * 2 + 1] = d[tid * 2 + 1];
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) {
// thread must be allowed to write
if(tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
}
}
if(tid == 0) {
d[0] = dcopy[0];
}
}
int main() {
cudaError_t status;
const int count = 256;
const int size = count * sizeof(int);
int* h = new int[count];
for (int i = 0; i < count; ++i) {
h[i] = i + 1;
}
int* d;
status = cudaMalloc(&d, size);
status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sumSingleBlock<<<1, count /2, size>>>(d);
int result;
status = cudaMemcpy(&result, d, sizeof(int), cudaMemcpyHostToDevice);
cout << "Sum is " << result << endl;
delete [] h;
return 0;
}
|
1,328
|
#include <stdio.h>
#include <cuda.h>
#include <string.h>
#include <time.h>
// Parallel Computing Lab 3
// Author: Andrew Huang
// forward declare
void deviceProperties(void);
long getMax(long * a, long);
#define THREADS_PER_BLOCK 1024 // 3.x
void deviceProperties(void){ // displays device properties
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
__global__ void getMaxCUDA(long arr[], long size, long result[]){ // Cuda CALL kernal func
__shared__ long arr_all[THREADS_PER_BLOCK];
long gid = blockIdx.x * blockDim.x + threadIdx.x;
arr_all[threadIdx.x] = -INT_MAX;
if (gid < size){
arr_all[threadIdx.x] = arr[gid]; // bounds
}
__syncthreads();
for (long s = blockIdx.x/2; s >0; s = s/2){
__syncthreads();
if (threadIdx.x < s && gid < size) {
arr_all[threadIdx.x] = max(arr_all[threadIdx.x], arr_all[threadIdx.x + s]);
}
}
if (threadIdx.x == 0)result[blockIdx.x] = arr_all[0];
}
long getMax(long arr[], long size){ // array and n;
// safetogo
long * new_arr; // this is due to overflow
long * answer;
long * result;
long * arr_copy; // we have to make a copy to the device
long new_size;
if (size % THREADS_PER_BLOCK != 0) {
new_size = (size / THREADS_PER_BLOCK + 1) * THREADS_PER_BLOCK;
} else {
new_size = size;
}
new_arr = (long *) malloc(sizeof(long) * new_size);
for (long i = 0; i < new_size;i++){
if (i < size){
new_arr[i] = arr[i];
} else {
new_arr[i]=0;
}
}
long block_count = new_size / THREADS_PER_BLOCK;
cudaMalloc((void **) &arr_copy, sizeof(long) *new_size);
cudaMemcpy((void *) arr_copy, (void *) new_arr, sizeof(long)*new_size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &result, sizeof(long) *block_count); // block results
do {
block_count = ceil((float)new_size / (float)THREADS_PER_BLOCK);
getMaxCUDA<<<block_count, THREADS_PER_BLOCK>>>(arr_copy, new_size, result);
new_size = block_count;
arr_copy = result;
} while (block_count > 1);
answer = (long*) malloc(sizeof(long) * block_count);
cudaMemcpy((void *)answer, (void *)result, block_count * sizeof(long),cudaMemcpyDeviceToHost);
long res =answer[0];
cudaFree(result);
cudaFree(arr_copy);
free(new_arr);
free(answer);
return res;
}
int main(int argc, char * argv[]){
if (argc!= 2){
printf("Usage: maxgpu N\n");
printf("where N is the size of the array");
exit(1);
}
int n; // number of integers and size
long *arr; // array
n = atoi(argv[1]);
arr = (long *)malloc(sizeof(long) * n);
if (!arr){
printf("failed to allocate array\n");
exit(1);
}
srand(time(NULL));
for (long i = 0; i < n; i ++){
arr[i] = rand() % n;
}
// cuda time
//deviceProperties();
long res = getMax(arr, n);
printf("The maximum number in the array is: %ld\n", res);
free(arr);
return 0;
}
|
1,329
|
#include <stdio.h>
template<class T>
__device__ inline int CalcMandelbrot(const T xPos, const T yPos, const int crunch)
{
T y = yPos;
T x = xPos;
T yy = y * y;
T xx = x * x;
int i = crunch;
while (--i && (xx + yy < T(4.0))) {
y = x * y * T(2.0) + yPos;
x = xx - yy + xPos;
yy = y * y;
xx = x * x;
}
return i;
}
// The Mandelbrot CUDA GPU thread function
extern "C" __global__ void Mandelbrot0_sm10(int *dst, const int imageW, const int imageH, const int crunch,
const float xOff, const float yOff, const float scale)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
const float xPos = xOff + (float)ix * scale;
const float yPos = yOff - (float)iy * scale;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<float>(xPos, yPos, crunch);
m = m > 0 ? crunch - m : crunch;
// Output the pixel
int pixel = imageW * iy + ix;
dst[pixel] = m;
}
}
|
1,330
|
#include "includes.h"
__global__ void _ele_add(float *m, float *target, float val, int len){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len){
target[tid] = val + m[tid];
}
}
|
1,331
|
#include <cuda_runtime.h>
#include<stdio.h>
__global__ void add(int *a, int *b, int *c, int N)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < N)
for(int i=0;i<1000;i++){
c[idx] = a[idx] + b[idx]+1+c[idx];
}
}
__host__
int main(){
const int N=10000;
int *a,*b,*c,*da,*db,*dc;
a=new int[N];
b=new int[N];
c=new int[N];
for(int i=0;i<N;i++){
a[i]=1;
b[i]=2;
}
cudaMalloc(&da,sizeof(int)*N);
cudaMalloc(&db,sizeof(int)*N);
cudaMalloc(&dc,sizeof(int)*N);
cudaMemcpy(da,a,sizeof(int)*N,cudaMemcpyHostToDevice);
cudaMemcpy(db,b,sizeof(int)*N,cudaMemcpyHostToDevice);
int blockNum=(N+1023)/1024;
add<<<blockNum,1024>>>(da,db,dc,N);
cudaMemcpy(c,dc,sizeof(int)*N,cudaMemcpyDeviceToHost);
for(int i=0;i<10;i++){
printf("%d\n",c[i]);
}
return 0;
}
|
1,332
|
/*
Ye Wang
CPEG655
lab2 problem 2.a
*/
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <sys/time.h>
__global__ void
matrixMul_2a(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N);
void mm(float * C, float * A, float * B, int N);
float GetRand(int seed);
void randomInit(float *data, int size, float val);
void constantInit(float *data, int size, float val);
int matrixMultiply(int tile_size, int block_size, dim3 &dimsA, dim3 &dimsB);
int main(int argc, char **argv)
{
int block_size = 1;
int tile_size=1;
int N=1024;
dim3 dimsA(N,N);
dim3 dimsB(N,N);
//process of finding the best NB and NT
// for(int i=0;i<4;i++,block_size*=2)
// {
// tile_size=1;
// for(int j=0;j<5;j++,tile_size*=2){
// matrixMultiply(tile_size, block_size, dimsA, dimsB);
// }
// }
block_size = 16;
tile_size=1;
matrixMultiply(tile_size, block_size, dimsA, dimsB);
return 0;
}
//BLOCK_SIZE=width / GridDim(number of blocks in a dimention)
__global__ void
matrixMul_1a(float *C, float *A, float *B, int N)
{
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0.f;
for (int n=0; n<N-1; n++){
sum += A[ty*N+n]*B[n*N+tx];
}
C[ty*N+tx] = sum;
}
__global__ void
matrixMul_1b(int BLOCK_SIZE, float *C, float *A, float *B, int N)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x+bx*BLOCK_SIZE;
int ty = threadIdx.y+by*BLOCK_SIZE;
float Csub = 0;
for (int i= 0; i < N; i++)
{
Csub+=(A[ty*N+i]*B[tx+N*i]);
}
C[N * ty + tx] = Csub;
//__syncthreads();
}
__global__ void
matrixMul_2a(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE;
int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE;
int tx=Astart;
int ty=Bstart;
float Csub = 0;
for(int k=0;k<TILE_SIZE;k++,ty++)
{
tx=Astart;
for(int j=0;j<TILE_SIZE;j++, tx++)
{
Csub = 0;
for (int i= 0; i < N; i++)
{
Csub +=A[ty*N+i]*B[tx+N*i];// a*b;
}
C[N * ty + tx] =Csub;
}
}
}
__global__ void
matrixMul_2b_pragma(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE;
int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE;
int tx=Astart;
int ty=Bstart;
float Csub = 0;
for(int k=0;k<TILE_SIZE;k++,ty++)
{
tx=Astart;
for(int j=0;j<TILE_SIZE;j++, tx++)
{
Csub = 0;
#pragma unroll 8
for (int i= 0; i < N; i++)
{
Csub +=A[ty*N+i]*B[tx+N*i];// a*b;
}
C[N * ty + tx] =Csub;
}
}
}
__global__ void
matrixMul_2b_manual(int TILE_SIZE, int BLOCK_SIZE, float *C, float *A, float *B, int N)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int Astart = threadIdx.x*TILE_SIZE+bx*BLOCK_SIZE*TILE_SIZE;
int Bstart = threadIdx.y*TILE_SIZE+by*BLOCK_SIZE*TILE_SIZE;
int tx=Astart;
int ty=Bstart;
float Csub = 0;
for(int k=0;k<TILE_SIZE;k++,ty++)
{
tx=Astart;
for(int j=0;j<TILE_SIZE;j++, tx++)
{
Csub = 0;
for (int i= 0; i < N; i+=8)
{
int px=ty*N+i;
int py=tx+N*i;
Csub +=A[px]*B[py];
Csub +=A[px+1]*B[py+N];
Csub +=A[px+2]*B[py+2*N];
Csub +=A[px+3]*B[py+3*N];
Csub +=A[px+4]*B[py+4*N];
Csub +=A[px+5]*B[py+5*N];
Csub +=A[px+6]*B[py+6*N];
Csub +=A[px+7]*B[py+7*N];
}
C[N * ty + tx] =Csub;
}
}
}
void mm(float * C, float * A, float * B, int N)
{
int i,j,k;
float sum=0;
for(j=0;j<N;j++)
for(i=0;i<N;i++){
C[i*N+j]=0;
sum=0;
for(k=0;k<N;k++)
sum+=A[i*N+k]*B[k*N+j];
C[i*N+j]=sum;
}
}
float GetRand(int seed)
{
struct timeval tv;
gettimeofday(&tv,NULL);
srand(tv.tv_usec%17+seed);
// printf("xxxPacket_loss_rate:Random %f\n",(rand()% 1000) / 1000.0);
return((rand()% 1000) / 1.02);
}
void randomInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = GetRand(i);
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;//+i%2;
}
}
int matrixMultiply(int tile_size, int block_size, dim3 &dimsA, dim3 &dimsB)
{
printf("START: Tile[%d,%d],Block[%d,%d], Matrix[%d,%d]\n",tile_size,tile_size,block_size,block_size,dimsB.x,dimsA.y);
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
randomInit(h_A, size_A, 2.1f);
randomInit(h_B, size_B, 1.f);
unsigned int size_C = dimsB.x * dimsA.y;
// Allocate device memory
float *d_A, *d_B, *d_C;
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
float *test_C = (float *) malloc(mem_size_C);
constantInit(test_C, size_C, 0.f);
constantInit(h_C, size_C, 0.f);
cudaMalloc((void **) &d_A, mem_size_A);
cudaMalloc((void **) &d_B, mem_size_B);
cudaMalloc((void **) &d_C, mem_size_C);
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, mem_size_C, cudaMemcpyHostToDevice);
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsA.y/(block_size*tile_size), dimsB.x/(block_size*tile_size));
cudaDeviceSynchronize();//////////////////////****
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
// Record the start event
cudaEventRecord(start, NULL);
// Execute th kernel
int nIter = 2;
for (int j = 0; j < nIter; j++)
{
matrixMul_2a<<< grid, threads >>>(tile_size, block_size, d_C, d_A, d_B, dimsA.x);
}
// Record the stop event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf( "REPORT:\n Performance= %.2f GFlop/s\n Time= %.3f msec\n Size= %.0f Ops\n WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy t_C,h_A,h_B,dimsA.x);esult from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
//double eps = 1.e-6 ;
mm(test_C,h_A,h_B,dimsA.x);
int verify=1;
for (int i=0;i<mem_size_C/4;i++)
{
if(h_C[i]!=test_C[i]&&(fabs(h_C[i]-test_C[i])/test_C[i])>1E-6){
printf("Matrix[A:%d,B:%d,C:%d] C[%d]=%f, Expect= %f\n",mem_size_A,mem_size_B,mem_size_C,i,h_C[i],test_C[i]);
verify=0;
break;
}
}
free(h_A);
free(test_C);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
if (verify) {
printf("SUCCESS!\n\n");
return true;
}else{
printf("WRONG RESULT!\n\n");
return false;
}
}
|
1,333
|
#include "includes.h"
__global__ void zupdate(float *z, float *z0, float tau, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny)
{
float a = z[2 * idx + 0];
float b = z[2 * idx + 1];
float t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[2 * idx + 0] = (z0[2 * idx + 0] + tau*z[2 * idx + 0])*t;
z[2 * idx + 1] = (z0[2 * idx + 1] + tau*z[2 * idx + 1])*t;
}
}
|
1,334
|
// 因为我自己笔记本的显卡计算能力为2.1,所以这段代码其实是无法工作的,原因在于 cudaMallocManaged 这个函数是用不了的
#include<stdio.h>
#define N 64
#define TPB 32
float scale(int i,int n){
return ((float)i/(n-1));
}
__device__
float distance(float x1,float x2){
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out,float *d_in,float ref){
const int i = blockIdx.x*blockDim.x+threadIdx.x;
const float x=d_in[i];
d_out[i]=distance(x,ref);
printf("i=%2d: dist from %f to %f is %f.\n", i,ref,x,d_out[i]);
printf("Hello!\n");
}
int main(void)
{
const float ref=0.5f;
float *in =0;
float *out = 0;
cudaMallocManaged(&in,N*sizeof(float));
cudaMallocManaged(&out,N*sizeof(float));
for (int i = 0; i<N; ++i) {
in[i]=scale(i,N);
}
distanceKernel<<<N/TPB,TPB>>>(out,in,ref);
cudaDeviceSynchronize();
cudaFree(in);
cudaFree(out);
return 0;
}
|
1,335
|
#include <stdio.h>
__global__ void helloFromGPU(void){
printf("Hello from GPU!\n");
}
int main(void){
printf("Hello! from CPU\n");
helloFromGPU <<< 1,10 >>>();
cudaDeviceReset();
}
|
1,336
|
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
//#include "cutil.h"
using namespace std;
//////////////////////////////////////////////////////////////////////////
//this version calculates the aperture mass at the galaxy positions.
//////////////////////////////////////////////////////////////////////////
void checkCUDAerror(const char *msg);
int checkDeviceSpecs(int number_of_galaxies, int grid_size);
/////////////////////////////////////////////////////////////////////
// The kernel: calculates the aperture mass, noise and SNR
/////////////////////////////////////////////////////////////////////
__global__ void mApKernel(float* rgamma1, float* rgamma2, float* ra, float* dec, float* mAp_rgamma, float* var_rgamma, float* SN_rgamma, int tot_gals, float theta_max, int grid_size, float ra_pixsize, float dec_pixsize, float min_ra, float min_dec)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// need to have the grid coordinates in arcminutes.
float tempra = idx/grid_size;
float tempdec = idx - grid_size*tempra;
float this_ra = min_ra + tempra*ra_pixsize;
float this_dec = min_dec + tempdec*dec_pixsize;
// want to include any tails outside the halo radius to which our filter is tuned......
int kernel_radius = 1.5*theta_max;
float ang = 0.0;
float xc = 0.15; // a constant of the calculation.
float x = 0, Q = 0;
float rgammaMap = 0;
float rgammaVar=0;
float radiff, decdiff, dist;
float npoints = 0;
for(int i=0; i<tot_gals; i++){
radiff = (float)this_ra-ra[i];
if(abs(radiff)>kernel_radius) continue;
decdiff=(float)this_dec-dec[i];
if(abs(decdiff)>kernel_radius || (radiff==0 && decdiff==0)) continue;
dist = sqrtf(radiff*radiff + decdiff*decdiff);
if(abs(dist)>kernel_radius) continue;
// have to do something a bit complicated for the angle - make sure it's getting the correct range.
if(radiff==0 && decdiff>0) ang = M_PI/2.0;
else if(radiff==0 && decdiff<0) ang = -1.0 * M_PI/2.0;
else if(radiff>0) ang = atanf(decdiff/radiff);
else if(radiff<0 && decdiff>0) ang = atanf(decdiff/radiff) + M_PI;
else if(radiff<0 and decdiff<0) ang = atanf(decdiff/radiff)-M_PI;
x = dist / theta_max;
Q = (1.0 / (1.0 + exp(6.0 - 150.0*x) + exp(-47.0 + 50.0*x))) * (tanh(x/xc) / (x/xc));
rgammaMap+= Q* (-1* (rgamma1[i]*cos(2*ang) + rgamma2[i]*sin(2*ang) ));
rgammaVar+= Q*Q* (rgamma1[i]*rgamma1[i] + rgamma2[i]*rgamma2[i]);
npoints++;
}
// the outputs from this calculation:
mAp_rgamma[idx] = rgammaMap/npoints;// got to normalise by the # gals I did the sum over.
var_rgamma[idx] = rgammaVar /(2.0*npoints*npoints);
SN_rgamma[idx] = sqrtf(2) * rgammaMap / sqrtf(rgammaVar);
}
////////////////////////////////////////////////////////////////
// setting up the aperture mass calculation
//////////////////////////////////////////////////////////////
int main(int argc, char **argv){
char* input_filename; char* output_filename;
int number_of_galaxies, grid_size;
float filter_rad, min_ra, max_ra, min_dec, max_dec;
if (argc>1)
{
input_filename = argv[1];
output_filename = argv[2];
number_of_galaxies = atoi(argv[3]);
filter_rad = atof(argv[4]);
grid_size = atoi(argv[5]);
min_ra = atof(argv[6]);
max_ra = atof(argv[7]);
min_dec = atof(argv[8]);
max_dec = atof(argv[9]);
}
int ncalc = grid_size*grid_size;
float ra_pixsize = (max_ra - min_ra)/float(grid_size);
float dec_pixsize = (max_dec - min_dec)/float(grid_size);
// CPU memory
size_t sizeneeded = number_of_galaxies*sizeof(float);
float *h_rgamma1 = 0;
float *h_rgamma2 = 0;
float *h_ra = 0;
float *h_dec = 0;
h_rgamma1 = (float*) malloc(sizeneeded);
h_rgamma2 = (float*) malloc(sizeneeded);
h_ra = (float*) malloc(sizeneeded);
h_dec = (float*) malloc(sizeneeded);
ifstream infile;
infile.open(input_filename);
int i=0;
float x, y, g1, g2;
while(1)
{
infile>>x>>y>>g1>>g2;
h_ra[i] = x;
h_dec[i] = y;
h_rgamma1[i] = g1;
h_rgamma2[i] = g2;
i += 1;
if(!infile.good()) break;
}
// check whether the device has the capacity to do this calculation.
// this is taken from the SDK function deviceQuery
int max_threads = checkDeviceSpecs(number_of_galaxies, ncalc);
/// first, I need to test whether the device is busy. If so, it can wait a little while.
while(1){
size_t testsize = 1*sizeof(float);
float *d_test;
cudaMalloc(&d_test, testsize);
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err){
printf("gotta wait for a bit!: %s\n", cudaGetErrorString( err) );
sleep(10);
}
else break;
}
// GPU memory for input
float *d_rgamma1, *d_rgamma2, *d_ra, *d_dec;
cudaMalloc(&d_rgamma1, sizeneeded);
cudaMalloc(&d_rgamma2, sizeneeded);
cudaMalloc(&d_ra, sizeneeded);
cudaMalloc(&d_dec, sizeneeded);
// set up vectors for host and device for output.
size_t sizeneeded_out = ncalc*sizeof(float);
float *h_mAp_rgamma,*d_mAp_rgamma, *h_var_rgamma, *d_var_rgamma, *h_SN_rgamma, *d_SN_rgamma;
h_mAp_rgamma = (float*)malloc(sizeneeded_out);
cudaMalloc(&d_mAp_rgamma, sizeneeded_out);
h_var_rgamma = (float*)malloc(sizeneeded_out);
cudaMalloc(&d_var_rgamma, sizeneeded_out);
h_SN_rgamma = (float*)malloc(sizeneeded_out);
cudaMalloc(&d_SN_rgamma, sizeneeded_out);
//copy vectors from host to device memory
cudaMemcpy(d_rgamma1, h_rgamma1, sizeneeded, cudaMemcpyHostToDevice);
cudaMemcpy(d_rgamma2, h_rgamma2, sizeneeded, cudaMemcpyHostToDevice);
cudaMemcpy(d_ra, h_ra, sizeneeded, cudaMemcpyHostToDevice);
cudaMemcpy(d_dec, h_dec, sizeneeded, cudaMemcpyHostToDevice);
cudaMemcpy(d_mAp_rgamma, h_mAp_rgamma, sizeneeded_out, cudaMemcpyHostToDevice);
cudaMemcpy(d_var_rgamma, h_var_rgamma, sizeneeded_out, cudaMemcpyHostToDevice);
cudaMemcpy(d_SN_rgamma, h_SN_rgamma, sizeneeded_out, cudaMemcpyHostToDevice);
//check memory is alright
if (0==h_rgamma1 || 0==h_rgamma2 || 0==h_ra || 0==h_dec || 0==h_mAp_rgamma || 0==h_var_rgamma || 0==h_SN_rgamma) printf("can't allocate memory on host \n");
if (0==d_rgamma1 || 0==d_rgamma2 || 0==d_ra || 0==d_dec || 0==d_mAp_rgamma || 0==d_var_rgamma || 0==d_SN_rgamma ) printf("can't allocate memory on device \n");
checkCUDAerror("memory");
// set up kernel params
int threadsPerBlock = max_threads;
int blocksPerGrid = int(ceil( ncalc / float(max_threads)) ); // need grid_size*grid_size threads total
printf(" theads per block: %d and blocks per grid: %d for a total of: %d\n", threadsPerBlock, blocksPerGrid, threadsPerBlock*blocksPerGrid);
mApKernel<<<blocksPerGrid, threadsPerBlock >>>(d_rgamma1, d_rgamma2, d_ra, d_dec, d_mAp_rgamma,d_var_rgamma, d_SN_rgamma, number_of_galaxies, filter_rad, grid_size, ra_pixsize, dec_pixsize, min_ra, min_dec);
checkCUDAerror("kernel");
//get the output_mAp back off the device
cudaMemcpy(h_mAp_rgamma, d_mAp_rgamma, sizeneeded_out, cudaMemcpyDeviceToHost);
cudaMemcpy(h_var_rgamma, d_var_rgamma, sizeneeded_out, cudaMemcpyDeviceToHost);
cudaMemcpy(h_SN_rgamma, d_SN_rgamma, sizeneeded_out, cudaMemcpyDeviceToHost);
// finally, write out to the output file!
FILE *output_file;
double sq2=sqrt(2.0);
output_file = fopen(output_filename, "w");
fprintf(output_file, " # ra dec mAp Var S/N \n");
float this_ra, this_dec;
int tempra, tempdec;
for(int ii=0 ; ii<ncalc; ii++){
tempra = ii/grid_size;
tempdec = ii - grid_size*tempra;
this_ra = min_ra + tempra*ra_pixsize;
this_dec = min_dec + tempdec*dec_pixsize;
fprintf(output_file, "%f %f %f %f %f \n", this_ra, this_dec, h_mAp_rgamma[ii], h_var_rgamma[ii], h_SN_rgamma[ii] ) ;
}
fclose(output_file);
printf("successfuly completed!\n");
}
//////////////////////////////////////////////////////////////
// simple function to check for errors.
//////////////////////////////////////////////////////////////
void checkCUDAerror(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
///////////////////////////////////////////////////////////////////////////////////////
// function to check whether GPU device has the specs to perform the calculation.
// adapted from cuda SDK deviceQuery example.
///////////////////////////////////////////////////////////////////////////////////////
int checkDeviceSpecs( int number_of_galaxies, int ncalc){
int gpu_mem_needed = int(number_of_galaxies * sizeof(float))*4 + int(ncalc * sizeof(float))*3; // need to allocate gamma1, gamma2, ra, dec and output mAp and var and SN.
printf("Requirements: %d calculations and %d bytes memory on the GPU \n\n", ncalc, gpu_mem_needed);
int threadsPerBlock=0;
// now get the info from the device.
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf( "cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) );
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
else
printf("Found %d CUDA Capable device(s)\n", deviceCount);
int dev, driverVersion = 0;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
// you can uncomment this info if you want to know bit more about your device specs.
//Or just run devicQuery from teh SDK.
// //printf(" Warp size: %d\n", deviceProp.warpSize);
// printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
// deviceProp.maxThreadsDim[0],
// deviceProp.maxThreadsDim[1],
// deviceProp.maxThreadsDim[2]);
// printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
// deviceProp.maxGridSize[0],
// deviceProp.maxGridSize[1],
// deviceProp.maxGridSize[2]);
// does this device have enough capcacity for the calculation?
// check memory
if((unsigned long long) deviceProp.totalGlobalMem < gpu_mem_needed) {
printf(" FAILURE: Not eneough memeory on device for this calculation! \n");
exit(1);
}
else
{
printf("Hurrah! This device has enough memory to perform this calculation\n");
// check # threads
threadsPerBlock = deviceProp.maxThreadsPerBlock; // maximal efficiency exists if we use max # threads per block.
int blocksPerGrid = int(ceil(ncalc / threadsPerBlock)); // need grid_size*grid_size threads total
if( int(deviceProp.maxThreadsDim[1])*int(deviceProp.maxThreadsDim[2]) <blocksPerGrid) {
printf("FAILURE: Not enough threads on the device to do this calculation!\n");
exit(1);
}
else
{
printf("Hurrah! This device supports enough threads to do this calculation\n");
}
}
}// loop over devices
return threadsPerBlock;
}
|
1,337
|
#include "includes.h"
#define UPPERTHRESHOLD 90
#define LOWERTHRESHOLD 30
const float G_x[3 * 3] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
const float G_y[3 * 3] = {
1, 2, 1,
0, 0, 0,
-1, -2, -1
};
const float gaussian[5 * 5] = {
2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159,
4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159,
5.f/159, 12.f/159, 15.f/159, 12.f/159, 2.f/159,
4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159,
2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159
};
__global__ void nonMaxSuppression(int N, int width, int height, unsigned char * in, unsigned char * out) {
int D = 1;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int angle = in[y * width + x];
switch(angle) {
case 0:
if (out[y * width + x] < out[(y + D) * width + x] || out[y * width + x] < out[(y - D) * width + x]) {
out[y * width + x] = 0;
}
break;
case 45:
if (out[y * width + x] < out[(y + D) * width + x - D] || out[y * width + x] < out[(y - D) * width + x + D]) {
out[y * width + x] = 0;
}
break;
case 90:
if (out[y * width + x] < out[y * width + x + D] || out[y * width + x] < out[y * width + x - D]) {
out[y * width + x] = 0;
}
break;
case 135:
if (out[y * width + x] < out[(y + D) * width + x + D] || out[y * width + x] < out[(y - D) * width + x - D]) {
out[y * width + x] = 0;
}
break;
default:
break;
}
}
|
1,338
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <cstdlib>
#include <stdint.h>
#include <iostream>
#include <sys/time.h>
int main(int argc, char **argv) {
struct timeval tv1,tv2;
struct timeval trans_t1, trans_t2;
if (argc != 2) {
std::cout << "Incorrect args: needs size";
exit(-1);
}
// Parse arguments
int size = atoi(argv[1]);
//generate random vector of 32bit unsigned int
thrust::host_vector<uint32_t> h_vec(size);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
thrust::device_vector<uint32_t> d_res(size);
// copy host to device
gettimeofday(&trans_t1, NULL);
thrust::device_vector<uint32_t> d_vec = h_vec;
gettimeofday(&trans_t2, NULL);
gettimeofday(&tv1, NULL);
for (int i = 0; i < 1000; ++i) {
thrust::sort(d_vec.begin(), d_vec.end());
}
gettimeofday(&tv2, NULL);
std::cout << "done \n" ;
double secs =
(double) (tv2.tv_usec - tv1.tv_usec) /1000000 +
(double) (tv2.tv_sec - tv1.tv_sec);
double trans_secs =
(double) (trans_t2.tv_usec - trans_t1.tv_usec) /1000000 +
(double) (trans_t2.tv_sec - trans_t1.tv_sec);
std::cout << "ELEMENTS_PROCESSED: " << size << "\n" ;
std::cout << "SELFTIMED: " << secs << "\n" ;
std::cout << "TRANSFER_TO_DEVICE: " << trans_secs << "\n";
std::cout << "BYTES_TO_DEVICE: " << size * sizeof(uint32_t) << "\n";
std::cout << "BYTES_FROM_DEVICE: " << size * sizeof(uint32_t) << "\n";
return 0;
}
|
1,339
|
/*
CSC691 GPU programming
Project 4: Multi-Pi using multiple streams
Jiajie Xiao
Nov 19, 2017
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CHUNK 100000
__global__ void partialHist(char *input, int len, int *hist)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int number = input[i]-'0';
//printf("%c\t%d\t%d\n", input[i], number, len);
if (i<len)
{
// printf("%d\t",i);
__shared__ int partial_sum[10];
// thread 0 is responsible for initializing partial_sum
if (threadIdx.x == 0)
{
for (i=0;i<10;i++)
partial_sum[i] = 0;
}
__syncthreads(); // each thread updates the partial sum
atomicAdd(&partial_sum[number], 1);
//printf("%d\t%d\n",number, partial_sum[number]);
__syncthreads();
// thread 0 updates the total sum
if (threadIdx.x == 0)
{
for (i=0;i<10;i++)
atomicAdd(&hist[i], partial_sum[i]);
}
}
}
int main(int argc, char **argv)
{
FILE *inputFile, *outputFile;
int numTruncated = -1;
if (argc < 2)
{
printf("An input file name is required.");
return -1;
}
else if (argc >2)
{
numTruncated = atoi(argv[2]);
if (numTruncated<1)
{
printf("Please type positive number of digits to be evaluated.\n");
return -1;
}
}
inputFile = fopen(argv[1],"r");
if (ferror(inputFile))
{
perror("Error: ");
return -1;
}
else
{
int i, stream_idx;
char *buf;
// paged-locked allocation is required when inolving streams (for pinned data that is stored in memory all the time)
//cudaHostAlloc((void**)&buf, CHUNK*sizeof(char), cudaHostAllocDefault);
cudaMallocHost((void**)&buf, CHUNK*sizeof(char));
int *histc;
cudaHostAlloc((void**)&histc, 10*sizeof(int), cudaHostAllocDefault);
for(int i=0;i<10;i++) histc[i] = 0;
int *histc_temp; // used for hist merging among streams and devices
cudaHostAlloc((void**)&histc_temp, 10*sizeof(int), cudaHostAllocDefault);
size_t nread;
cudaEvent_t start_gpu, stop_gpu;
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu);
// Create two streams
int num_streams = 2;
cudaStream_t streams[num_streams];
for (int stream_idx=0; stream_idx < num_streams; stream_idx++)
cudaStreamCreate(&(streams[stream_idx]));
char *dev_buf0, *dev_buf1;
int *dev_histc0, *dev_histc1;
cudaMalloc((void**)&dev_buf0, (int)(CHUNK/num_streams) * sizeof(char));
cudaMalloc((void**)&dev_buf1, (int)(CHUNK/num_streams) * sizeof(char));
cudaMalloc((void**)&dev_histc0, 10 * sizeof(int));
cudaMalloc((void**)&dev_histc1, 10 * sizeof(int));
cudaError_t err1, err2;
int NumDigitsHistogramed = 0, Finished = 0, last;
while((nread = fread(buf, 1, CHUNK * sizeof(char), inputFile)) > 0 && Finished !=1)
{
printf("# Element loaded in CHUNK: %d\t%d\t%d\n",(int)nread, (int)(CHUNK * sizeof(char)), (int)sizeof(buf));
if (numTruncated == -1 || NumDigitsHistogramed + (int)nread < numTruncated)
last = 0;
else
last = 1;
// Compute partial hist
cudaMemcpy(dev_histc0, histc, 10 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_histc1, histc, 10 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyAsync(dev_buf0, buf, (int)(CHUNK/num_streams) * sizeof(char), cudaMemcpyHostToDevice, streams[0]);
cudaMemcpyAsync(dev_buf1, buf + (int)(CHUNK/num_streams), (int)(CHUNK/num_streams) * sizeof(char), cudaMemcpyHostToDevice, streams[1]);
//cudaMemcpyAsync(dev_buf1, buf, (int) (1.0/num_streams * CHUNK) * sizeof(char), cudaMemcpyHostToDevice, streams[1]);
// partial hist execution
if(last)
{
partialHist <<< 100,1000, 0, streams[0] >>> (dev_buf0, (numTruncated - NumDigitsHistogramed)/num_streams, dev_histc0);
err1 = cudaGetLastError();
partialHist <<< 100,1000, 0, streams[1] >>> (dev_buf1, (numTruncated - NumDigitsHistogramed)/num_streams, dev_histc1);
err2 = cudaGetLastError();
}
else
{
partialHist <<< 100,1000, 0, streams[0] >>> (dev_buf0, (int) nread/num_streams, dev_histc0);
err1 = cudaGetLastError();
partialHist <<< 100,1000, 0, streams[1] >>> (dev_buf1, (int) nread/num_streams, dev_histc1);
err2 = cudaGetLastError();
}
if (err1 != cudaSuccess && err2 !=cudaSuccess)
{
printf("Error: stream1 %s\n stream2 %s\n", cudaGetErrorString(err1),cudaGetErrorString(err2));
return -1;
}
// Synchronization
for (stream_idx=0;stream_idx<num_streams;stream_idx++)
{
cudaStreamSynchronize(streams[stream_idx]);
printf("Stream Synchronized (#) %d.\n", stream_idx);
}
cudaDeviceSynchronize();
// Merge partial hist
//cudaMemcpy(histc_temp, dev_histc0, 10 * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpyAsync(histc_temp, dev_histc0, 10 * sizeof(int), cudaMemcpyDeviceToHost,streams[0]);
for (i=0;i<10;i++)
{
histc[i]+=histc_temp[i];
//printf("%d\t%d\n",i,histc_temp[i]);
}
cudaStreamSynchronize(streams[0]);
cudaMemcpyAsync(histc_temp, dev_histc1, 10 * sizeof(int), cudaMemcpyDeviceToHost,streams[1]);
for (i=0;i<10;i++)
{
histc[i]+=histc_temp[i];
//printf("%d\t%d\n",i,histc_temp[i]);
}
cudaStreamSynchronize(streams[1]);
// // cupy memory from gpu 1 to gpu0 using synchronized one instead of the asynchronized one
// cudaMemcpyPeer(histc_temp, 0, dev_histc2, 1, 10 * sizeof(int));
// for (i=0;i<10;i++)
// histc[i]+=histc_temp[i];
// cudaMemcpyPeer(histc_temp, 0, dev_histc3, 1, 10 * sizeof(int));
// for (i=0;i<10;i++)
// histc[i]+=histc_temp[i];
if(last)
{
NumDigitsHistogramed = numTruncated;
Finished = 1;
}
else
{
NumDigitsHistogramed += (int)nread;
}
}
cudaEventRecord(stop_gpu);
cudaStreamDestroy(streams[0]);
cudaStreamDestroy(streams[1]);
cudaFree(dev_histc0);
cudaFree(dev_histc1);
cudaFree(dev_buf0);
cudaFree(dev_buf1);
cudaEventSynchronize(stop_gpu);
float milliseconds_gpu = 0;
cudaEventElapsedTime(&milliseconds_gpu, start_gpu, stop_gpu);
fclose(inputFile);
printf("The histograming calculation time (ms): %f\n", milliseconds_gpu);
outputFile = fopen("hist.txt", "a");
if (ferror(inputFile))
{
perror("Error: ");
return -1;
}
else
{
fprintf(outputFile, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", NumDigitsHistogramed, histc[0], histc[1], histc[2], histc[3], histc[4], histc[5], histc[6], histc[7], histc[8], histc[9] );
fclose(outputFile);
}
}
return 0;
}
|
1,340
|
extern "C" {
__global__ void dummy()
{
}
}
|
1,341
|
#include <stdio.h>
#include <math.h>
// Algoritmo Criba de Eratóstenes
void primos(unsigned long max)
{
unsigned long i, j, c=0;
max++;
char *arr = new char[max];
cudaEvent_t inicio, fin;
float tiempo;
cudaEventCreate( &inicio );
cudaEventCreate( &fin );
cudaEventRecord( inicio, 0 );
if (max >= 2)
{
for (i=0; i<max; i++)
arr[i] = 0;
arr[0] = 1;
arr[1] = 1;
unsigned long raiz = sqrt(max);
for (j=4; j<max; j+=2)
arr[j] = 1;
for (i=3; i<=raiz; i+=2) // impares
if (arr[i] == 0)
for (j=i*i; j<max; j+=i)
arr[j] = 1;
cudaEventRecord( fin, 0 );
cudaEventSynchronize( fin );
cudaEventElapsedTime( &tiempo, inicio, fin );
for (i=0; i<max; i++)
if (arr[i] == 0)
{
// printf("%ld ", i);
c++;
}
printf("\n total:%ld\n", c);
}
free(arr);
printf("tiempo total en ms: %f\n", tiempo);
}
int main(int argc, char *argv[])
{
primos(100000000);
return 1;
}
|
1,342
|
// sudo nvprof --print-gpu-trace --log-file test.txt ./sum_reduction_simple_opt3
// Prints log in txt file
#include<iostream>
#include<vector>
const int sharedMem = 256*sizeof(double);
__global__ void redSum(double *a, double *out){
__shared__ double red_mat[sharedMem];
auto i = (blockDim.x*2)*blockIdx.x + threadIdx.x;
red_mat[threadIdx.x] = a[i]+a[i+blockDim.x];
__syncthreads();
for(auto k = blockDim.x/2; k > 0; k/=2){
if(threadIdx.x < k){
red_mat[threadIdx.x] += red_mat[threadIdx.x+k];
}
}
__syncthreads();
if(threadIdx.x == 0){
out[blockIdx.x] = red_mat[threadIdx.x];
}
}
int main(){
int N = 32768;
size_t size = N *sizeof(double);
std::vector<double> h_a(N);
std::vector<double> h_out(N, 0.0);
for(auto i = 0; i < N; i++){
h_a[i] = 2*i;
}
double *d_a, *d_out;
cudaMalloc(&d_a, size);
cudaMalloc(&d_out, size);
cudaMemcpy(d_a, h_a.data(), size, cudaMemcpyHostToDevice);
int threadsPerBlock = 256; // Launching half the number of threads
int blocksPerGrid = N/threadsPerBlock/2;
redSum<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_out);
cudaMemcpy(h_out.data(), d_out, size, cudaMemcpyDeviceToHost);
std::cout << h_out[0] << std::endl;
cudaFree(d_a);
cudaFree(d_out);
return 0;
}
|
1,343
|
/* filename : pipelined_merge_sort.cu
* author : Tiane Zhu
* date : Mar 26, 2017
*
*
*/
/////
// Input : For each node v of a binary tree,
// a sorted list L_s[v] such that
// v is full whenever s >= 3 * alt(v)
////
// Output : For each node v,
// a sorted list L_{s+1}[v] such that
// v is full whenever s >= 3 * alt(v) - 1
////
// Algorithm at stage (s + 1)
// begin
// for (all active nodes v) pardo
// 1. Let u and w be the children of v.
// Set L_{s+1}'[u] = Sample(L_s[u])
// and L_{s+1}'[w] = Sample(L_s[w])
// 2. Merge L_{s+1}'[u] and L_{s+1}'[w] into
// SORTED list L_{s+1}[v]
// end
////
|
1,344
|
#include "includes.h"
__device__ void set_shared(int *buff, int* G,int off1 , int off2,int n)
{
int m = blockIdx.x+off1*gridDim.x;
int l = threadIdx.x+off2*blockDim.x;
int maxx = blockDim.x-1;
if(m<n && l<n)
{
//If we reach the last element check if n is less than the number of threads
//or if it's the last element of the current row/block
if(l==n-1){
if(blockDim.x > n)
maxx = n-1;
else if(n/blockDim.x==off2)
maxx = (n-1)%blockDim.x;
}
//The first element of each block will write the left corner
//and the last element the right one
if((threadIdx.x ==0 || threadIdx.x==maxx) && maxx!=0)
{
int ad;
//Check if it's the first or the last thread of the block
if(threadIdx.x==0)
ad = -2;
else
ad = 0;
for (int i = m-2; i <= m+2 ; i++)
{
for (int j = l+ad; j <= l+ad+2; j++)
{
int h1 = i - m;
int h2 = j - l;
int b_ind_x = 2+h1;
int b_ind_y = threadIdx.x+2+h2;
int g_ind_x = (n+i)%n;
int g_ind_y = (n+j)%n;
buff[b_ind_x*(blockDim.x+4)+b_ind_y] = G[g_ind_x*n+g_ind_y];
}
}
}
//Special case for maxx==0:it means the first element is the last one too
//it is necessary to write both sides in share memory
else if(threadIdx.x==maxx && maxx==0)
{
for (int i = m-2; i <= m+2 ; i++)
{
for (int j = l-2; j <= l+2; j++)
{
int h1 = i - m;
int h2 = j - l;
int b_ind_x = 2+h1;
int b_ind_y = threadIdx.x+2+h2;
int g_ind_x = (n+i)%n;
int g_ind_y = (n+j)%n;
buff[b_ind_x*(blockDim.x+4)+b_ind_y] = G[g_ind_x*n+g_ind_y];
}
}
}
//write only the values above and bellow you
else
{
for (int i = m-2; i <= m+2 ; i++)
{
int h1 = i - m;
int b_ind_x = 2+h1;
int b_ind_y = threadIdx.x+2;
int g_ind_x = (n+i)%n;
int g_ind_y = (n+l)%n;
buff[b_ind_x*(blockDim.x+4)+b_ind_y] = G[g_ind_x*n+g_ind_y];
}
}
}
}
__global__ void gpu_update_sign(int *G, double *w , int k , int n ,int *temp, int *flag,int it_b ,int it_t)
{
int buf=0;
__shared__ int buff[5140];
for (int off1 = 0; off1 < it_b; off1++)
{
for(int off2 = 0; off2<it_t;off2++){
//set share memory in every iteration
set_shared(buff, G, off1 , off2, n);
int result;
double sum = 0.0;
//Find the indexes
int x = blockIdx.x+off1*gridDim.x;
int y = threadIdx.x+off2*blockDim.x;
//Sync thread to be sure the share memory is ok
__syncthreads();
if(x<n && y<n){
//Calculate the result
for (int i = 0; i < k; i++){
for (int j = 0; j < k; j++){
sum += ((double)buff[i*(blockDim.x+4)+(threadIdx.x+j)])*w[i*k+j];
}
}
//Evaluate it
if ( sum > 1e-6){
result = 1;
if (result != buff[2*(blockDim.x+4)+threadIdx.x+2])
buf++;
}
else if( sum < -(1e-6)){
result = -1;
if (result != buff[2*(blockDim.x+4)+threadIdx.x+2])
buf++;
}
else
result = buff[2*(blockDim.x+4)+threadIdx.x+2];
//write to final array
temp[x*n+y] =result;
}
//For stability of share memory setting
__syncthreads();
}
//For stability of share memory setting
__syncthreads();
}
*flag+=buf;
}
|
1,345
|
__global__ void sum_array(const int * array, int * total, unsigned int n) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
unsigned int input_idx = idx;
__shared__ int partial_res[256];
int partial_sum = 0;
while (input_idx < n) {
partial_sum += array[input_idx];
input_idx += stride;
}
partial_res[threadIdx.x] = partial_sum;
__syncthreads();
// reduction
unsigned int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
partial_res[threadIdx.x] += partial_res[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0) {
atomicAdd(total, partial_res[0]);
}
}
|
1,346
|
#include <iostream>
int crash(int b, int a);
int crash(int b, int a) {
return b / a;
}
int main(int argc, char *argv[]) {
crash(5, 0);
}
|
1,347
|
//#include <omp.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include <sys/mman.h>
#include <sys/stat.h>
#include <iostream>
#include <fcntl.h>
#include <cmath>
using namespace std;
__device__ __managed__ float *x, *y, *z, gpuTotal;
__device__ __managed__ int **indices, *lens;
__device__ __managed__ float xStep, yStep, zStep;
__device__ __managed__ float counter;
__global__ void calcNearby(){
int _x=10*blockIdx.x+threadIdx.x;
int _y=10*blockIdx.y+threadIdx.y;
int _z=10*blockIdx.z+threadIdx.z;
int i = 10000*_x+100*_y+_z;
float result = 0;
float dx, dy, dz;
for(int j=-1;j<=1;j++){
for(int k=-1;k<=1;k++){
for(int l=-1;l<=1;l++){
if(_x+j<0 || _y+k<0 || _z+l<0) continue;
if(_x+j>99 || _y+k>99 || _z+l>99) continue;
int h=10000*(_x+j)+100*(_y+k)+(_z+l);
for(int m=0;m<lens[i];m++){
for(int n=0;n<lens[i];n++){
//if(indices[h][m]==indices[i][n]) continue;
atomicAdd(&counter,1);
dx = x[indices[h][m]]-x[indices[i][n]];
dx = y[indices[h][m]]-y[indices[i][n]];
dx = z[indices[h][m]]-z[indices[i][n]];
result+=rsqrt(dx*dx+dy*dy+dz*dz);
}
}
}
}
}
atomicAdd(&gpuTotal, result);
}
__global__ void calcOverall(){
int _x=10*blockIdx.x+threadIdx.x;
int _y=10*blockIdx.y+threadIdx.y;
int _z=10*blockIdx.z+threadIdx.z;
int i = 10000*_x+100*_y+_z;
float result = 0;
float dx, dy, dz;
for(int j=0;j<100;j++){
for(int k=0;k<100;k++){
for(int l=0;l<100;l++){
int h=10000*j+100*k+l;
if(abs(_x-j)<2) continue;
if(abs(_y-k)<2) continue;
if(abs(_z-l)<2) continue;
int weight=lens[i]*lens[h];
dx = (_x-j)*xStep;
dy = (_y-k)*yStep;
dz = (_z-l)*zStep;
result+=rsqrt(dx*dx+dy*dy+dz*dz)*weight;
}
}
}
atomicAdd(&gpuTotal, result);
}
int main(int argc, char* argv[]){
counter=0;
char* &filename = argv[1];
vector<const char*> lineAddrs;
struct stat st;
int ndex=1;
stat(filename, &st);
size_t filesize = st.st_size;
int fd = open(filename,O_RDONLY,0);
void* file = mmap(NULL, filesize, PROT_READ, MAP_PRIVATE | MAP_POPULATE, fd, 0);
const char* input = (const char*) file;
int lines=0;
lineAddrs.push_back(input);
cout<<"Reading file"<<endl;
for(int i=0;i<filesize;i++){
if(input[i]=='\n'){
lines++;
lineAddrs.push_back(input+i+1);
}
}
cudaMallocManaged(&x, (size_t) lines*sizeof(float));
cudaMallocManaged(&y, (size_t) lines*sizeof(float));
cudaMallocManaged(&z, (size_t) lines*sizeof(float));
for(int i=0;i<lines;i++){
const char *a,*b,*c;
a=lineAddrs[i];
b=strpbrk(strpbrk(a," \t"),"-0123456789");
c=strpbrk(strpbrk(b," \t"),"-0123456789");
x[i]=atof(a);
y[i]=atof(b);
z[i]=atof(c);
}
munmap(file, filesize);
float maxX=x[0], minX=x[0], maxY=y[0], minY=y[0], maxZ=z[0], minZ=z[0];
int chunkSize=(lines+ndex-1)/ndex;
cout<<"Calculating grid size"<<endl;
for(int i=0;i<ndex;i++){
float _maxX=x[i*chunkSize], _minX=x[i*chunkSize], _maxY=y[i*chunkSize],
_minY=y[i*chunkSize], _maxZ=z[i*chunkSize], _minZ=z[i*chunkSize];
for(int j=i*chunkSize+1;j<(i+1)*chunkSize;j++){
if(j>=lines) break;
if(x[j]<_minX) _minX=x[j];
if(x[j]>_maxX) _maxX=x[j];
if(y[j]<_minY) _minY=y[j];
if(y[j]>_maxY) _maxY=y[j];
if(z[j]<_minZ) _minZ=z[j];
if(z[j]>_maxZ) _maxZ=z[j];
}
{
if(_minX<minX) minX=_minX;
if(_maxX>maxX) maxX=_maxX;
if(_minY<minY) minY=_minY;
if(_maxY>maxY) maxY=_maxY;
if(_minZ<minZ) minZ=_minZ;
if(_maxZ>maxZ) maxZ=_maxZ;
}
}
xStep=(maxX-minX)/99;
yStep=(maxY-minY)/99;
zStep=(maxZ-minZ)/99;
typedef vector<int> bit;
typedef vector<bit> skinny;
typedef vector<skinny> flat;
typedef vector<flat> pack;
pack pointLists(100, flat(100, skinny(100)));
cout<<"Assigning points"<<endl;
for(int i=0;i<lines;i++){
int _x=(int)((x[i]-minX)/xStep);
int _y=(int)((y[i]-minY)/yStep);
int _z=(int)((z[i]-minZ)/zStep);
pointLists[_x][_y][_z].push_back(i);
}
cudaMallocManaged(&indices, (size_t) 1000000*sizeof(int*));
cudaMallocManaged(&lens, (size_t) 1000000*sizeof(float));
//cudaMallocManaged(&res, (size_t) 1000000*sizeof(float*));
for(int i=0;i<100;i++){
for(int j=0;j<100;j++){
for(int k=0;k<100;k++){
int count=pointLists[i][j][k].size();
//cout<<count;
int index=10000*i+100*j+k;
lens[index]=count;
cudaMallocManaged(&(indices[index]), (size_t) count*sizeof(int));
cudaMemcpy(&(indices[index]), &(pointLists[i][j][k][0]),
count*sizeof(int), cudaMemcpyHostToDevice);
}
}
}
cout<<"Done"<<endl;
float total=0.0f;
dim3 dimBlock(10,10,10);
dim3 dimGrid(10,10,10);
cout<<"Sending to GPU"<<endl;
// launch the kernel
gpuTotal=0;
calcOverall<<<dimGrid, dimBlock>>>();
calcNearby<<<dimGrid, dimBlock>>>();
cudaDeviceSynchronize();
gpuTotal*=-1/2.0;
cout<<gpuTotal<<endl;
cout<<counter<<endl;
return 0;
}
|
1,348
|
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//The number of character in the encrypted text
#define N 1024
#define A 15
#define B 27
#define M 128
#define INV_MOD 111
void checkCUDAError(const char*);
void read_encrypted_file(int*);
/* Exercise 1.1 */
__device__ int modulo(int a, int b){
int r = a % b;
r = (r < 0) ? r + b : r;
return r;
}
__global__ void affine_decrypt(int *d_input, int *d_output)
{
/* Exercise 1.2 */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int a = INV_MOD * (d_input[i] - B);
int b = M;
int d = modulo(a, b);
d_output[i] = d; // Assign decrypted value to output address
}
__global__ void affine_decrypt_multiblock(int *d_input, int *d_output)
{
/* Exercise 1.8 */
}
int main(int argc, char *argv[])
{
int *h_input, *h_output;
int *d_input, *d_output;
unsigned int size;
int i;
size = N * sizeof(int);
/* allocate the host memory */
h_input = (int *)malloc(size);
h_output = (int *)malloc(size);
/* Exercise 1.3: allocate device memory */
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
checkCUDAError("Memory allocation");
/* read the encryted text */
read_encrypted_file(h_input);
/* Exercise 1.4: copy host input to device input */
cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice);
checkCUDAError("Input transfer to device");
/* Exercise 1.5: Configure the grid of thread blocks and run the GPU kernel */
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(N, 1, 1);
affine_decrypt <<<1, N>>> (d_input, d_output);
/* wait for all threads to complete */
cudaThreadSynchronize();
checkCUDAError("Kernel execution");
/* Exercise 1.6: copy the gpu output back to the host */
cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost);
checkCUDAError("Result transfer to host");
/* print out the result to screen */
for (i = 0; i < N; i++) {
printf("%c", (char)h_output[i]);
}
printf("\n");
/* Exercise 1.7: free device memory */
cudaFree(d_input);
cudaFree(d_output);
checkCUDAError("Free memory");
/* free host buffers */
free(h_input);
free(h_output);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void read_encrypted_file(int* input)
{
FILE *f = NULL;
f = fopen("encrypted01.bin", "rb"); //read and binary flags
if (f == NULL){
fprintf(stderr, "Error: Could not find encrypted01.bin file \n");
exit(1);
}
//read encrypted data
fread(input, sizeof(unsigned int), N, f);
fclose(f);
}
|
1,349
|
#include "cuda.h"
#include "stdio.h"
__global__
void addSquareMatrix (int *A, int *B, int *result, int n) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < n && y < n) {
result[y * n + x] = A[y * n + x] + B[y * n + x];
//The same as: result[y][x] = arr1[y][x] + arr2[y][x];
}
}
int main() {
cudaEvent_t start, stop;
float t;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
const int N = 15000;
int *mat1_h = (int *)malloc(sizeof(int) * N * N);
int *mat2_h = (int *)malloc(sizeof(int) * N * N);
int *mat1_d, *mat2_d, *result_d;
cudaMalloc(&mat1_d, sizeof(int) * N * N);
cudaMalloc(&mat2_d, sizeof(int) * N * N);
cudaMalloc(&result_d, sizeof(int) * N * N);
//cudaMemcpy(mat1_d, mat1_h, sizeof(int) * N * N, cudaMemcpyHostToDevice);
//cudaMemcpy(mat2_d, mat2_h, sizeof(int) * N * N, cudaMemcpyHostToDevice);
dim3 dimBlock(256, 256);
dim3 dimGrid(N/256, N/256);
addSquareMatrix<<<dimGrid, dimBlock>>>(mat1_d, mat2_d, result_d, N);
int *result_h = (int *)malloc(sizeof(int) * N);
//cudaMemcpy(result_h, result_d, sizeof(int) * N, cudaMemcpyDeviceToHost);
//print results
cudaFree(result_d);
cudaFree(mat1_d);
cudaFree(mat2_d);
free(mat1_h);
free(mat2_h);
free(result_h);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&t, start, stop);
printf("Time for the kernel: %f ms\n", t);
}
|
1,350
|
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
static const int WORK_SIZE = 256;
__host__ __device__ unsigned int bitreverse(unsigned int number)
{
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
struct bitreverse_functor
{
__host__ __device__ unsigned int operator()(const unsigned int &x)
{
return bitreverse(x);
}
};
int main()
{
thrust::host_vector<unsigned int> idata(WORK_SIZE);
thrust::host_vector<unsigned int> odata;
thrust::device_vector<unsigned int> dv;
int i;
for (i = 0; i < WORK_SIZE; i++) {
idata[i] = i;
}
dv = idata;
thrust::transform(dv.begin(), dv.end(), dv.begin(), bitreverse_functor());
odata = dv;
for (int i = 0; i < WORK_SIZE; i++) {
std::cout << "Input value: " << idata[i] << ", output value: "
<< odata[i] << std::endl;
}
return 0;
}
|
1,351
|
extern "C" __device__ void simple_mul_workgroup(float *lhs, size_t lhs_offset,
float *rhs, size_t rhs_offset,
float *result,
size_t result_offset,
size_t size) {
int threadId = threadIdx.x;
if (threadId < size) {
result[result_offset + threadId] =
lhs[lhs_offset + threadId] * rhs[rhs_offset + threadId];
}
}
|
1,352
|
/*
* Copyright 2014-2015 NVIDIA Corporation. All rights reserved.
*
* Sample CUPTI app to demonstrate the usage of unified memory counter profiling
*
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
exit(-1); \
} \
} while (0)
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\
exit(-1); \
} \
} while (0)
template<class T>
__host__ __device__ void checkData(const char *loc, T *data, int size, int expectedVal) {
int i;
for (i = 0; i < size / (int)sizeof(T); i++) {
if (data[i] != expectedVal) {
printf("Mismatch found on %s\n", loc);
printf("Address 0x%p, Observed = 0x%x Expected = 0x%x\n", data+i, data[i], expectedVal);
break;
}
}
}
template<class T>
__host__ __device__ void writeData(T *data, int size, int writeVal) {
int i;
for (i = 0; i < size / (int)sizeof(T); i++) {
data[i] = writeVal;
}
}
__global__ void testKernel(int *data, int size, int expectedVal)
{
checkData("GPU", data, size, expectedVal);
writeData(data, size, -expectedVal);
}
int main(int argc, char **argv)
{
int deviceCount;
int *data = NULL;
int size = 64*1024; // 64 KB
int i = 123;
DRIVER_API_CALL(cuInit(0));
DRIVER_API_CALL(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
exit(-1);
}
// allocate unified memory
printf("Allocation size in bytes %d\n", size);
RUNTIME_API_CALL(cudaMallocManaged(&data, size));
// CPU access
writeData(data, size, i);
// kernel launch
testKernel<<<1,1>>>(data, size, i);
RUNTIME_API_CALL(cudaDeviceSynchronize());
// CPU access
checkData("CPU", data, size, -i);
// free unified memory
RUNTIME_API_CALL(cudaFree(data));
cudaDeviceReset();
return 0;
}
|
1,353
|
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void MatrixMulKernel(float * Md, float * Nd, float * Pd, int Width)
{
// identifiant de thread deux dimensions, comme la matrice
int tx = threadIdx.x;
int ty = threadIdx.y;
// Pvaleur sert au stockage de la valeur calcule par le thread
float Pvaleur = 0;
for (int i = 0; i < Width; ++i)
{
float MdElement = Md[ty * Width + i];
float NdElement = Nd[i * Width + tx];
Pvaleur += MdElement * NdElement;
}
// crit la valeur calcule dans la matrice de rsultat
// chaque thread ne peut crire qu'une valeur !
Pd[ty * Width + tx] = Pvaleur;
}
void MatrixMulOnDevice(float * M, float * N, float * P, int Width)
{
//calcul de la taille des matrices
int size = Width * Width * sizeof(float);
float *Md;
float *Nd;
float *Pd;
//allocation des matrices et leur remplissage
cudaMalloc((void**) &Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice) ;
cudaMalloc((void**) &Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
//allocation de la matrice de rsultat
cudaMalloc((void**) &Pd, size);
//multiplication d'une seule matrice
dim3 dimGrid(1, 1);
//matrice carre
dim3 dimBlock(Width, Width);
//produit matriciel proprement dit
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width);
//rcupration du rsultat du calcul
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//destruction des matrices, dsormais inutilises
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
1,354
|
/************************************************************
* ECE408 Parallel Programming - Final Project *
* *
* Topic: Terrain Generation *
* Members: Lai,Haoming; Ma,Yunhan; Wang,Bangqi *
* *
************************************************************/
/*
* Terrain Generation:
* Algorithmn: Diamond Square Algorithmn.
* Version:
* 0. Serial version: 1 * square loop + 4 * diamond loop;
* 1. Parallel version: 1 * sdsfsdfsdf + 4 * diamond kernel;
* 2. Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* 3. Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* 4. One Kernel Version: 1 * square_diamond kernel combined; (based on version 2)
* 5. Kernel Device Version: 1 * kernel + 1 * square device + 1 * diamond device;
* 6. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads we need);
* 7. Shared Memory Version: 1 * kernel + 1 * square device + 1 * diamond device (use share memory);
*
* 8. 2D Smarter Kernel Versio: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <time.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
/* Set the parameter */
/* Choose the version to use */
//#define VERSION 0
/* Set the length of each edge. please put power of 2 */
#define SIZE 512
/* Set number of array */
#define N (SIZE+1)*(SIZE+1)
/* Set the roughness for terrain */
#define ROUGHNESS 10
/* Set the height for each corner */
#define CORNER 0
/* main function for different version */
int version_0 (void);
int version_1 (void);
int version_2 (void);
int version_3 (bool print, int block_size);
int version_4 (bool print);
int version_5 (void);
int version_6 (void);
int version_7 (void);
int version_8 (bool print, int block_size);
int version_9 (bool print, int block_size);
/* main function */
int main (void){
int VERSION;
int p;
int block_size;
bool print= false;
printf("what version do you want: ");
scanf("%d", &VERSION);
printf("print? (0/1): ");
scanf("%d", &p);
printf("please define block_size(max = 32): ");
scanf("%d", &block_size);
if(p)
print = true;
switch(VERSION){
/* test version 0 */
case 0:
version_0();
break;
case 1:
/* test version 1 */
version_1();
break;
case 2:
/* test version 2 */
version_2();
break;
case 3:
/* test version 3 */
version_3(print, block_size);
break;
case 4:
/* test version 4 */
version_4(print);
break;
case 5:
/* test version 5 */
version_5();
break;
case 6:
/* test version 5 */
version_6();
break;
case 7:
/* test version 5 */
version_7();
break;
case 8:
/* test version 5 */
version_8(print, block_size);
break;
case 9:
/* test version 5 */
version_9(print, block_size);
break;
default:
/* test version 0 */
version_0();
return 0;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 0.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 0.0:
* 0.0 Serial version: 1 * square loop + 4 * loop;
*
*/
/* host code for serial version */
int version_0 (void) {
clock_t start, end;
double runTime;
float heightMap[SIZE+1][SIZE+1];
for(int i=0; i<SIZE; i++){
for(int j=0; j<SIZE; j++){
heightMap[i][j] = 0.0;
}
}
//initial the first four points
heightMap[0][0] = 0;
heightMap[SIZE][0] = 0;
heightMap[0][SIZE] = 0;
heightMap[SIZE][SIZE] = 0;
start = clock();
int stride = SIZE;
while(stride>=2){
for(int i = 0; i<(SIZE/stride); i++){
for(int j = 0; j<(SIZE/stride); j++){
int leftbottom_x = i* stride;
int leftbottom_y = j* stride;
float average = heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y] + heightMap[leftbottom_x][leftbottom_y+stride] + heightMap[leftbottom_x + stride][leftbottom_y +stride];
average = average /4 ;
heightMap[leftbottom_x + stride/2][leftbottom_y + stride/2]= average + rand() %10 - 5;
heightMap[leftbottom_x + stride/2 ][leftbottom_y] = (average + heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y] ) /3 + rand() %10 -5;
heightMap[leftbottom_x][leftbottom_y + stride/2] = (average + heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x][leftbottom_y + stride] ) /3 + rand() %10 -5 ;
heightMap[leftbottom_x + stride][leftbottom_y+ stride/2] = (average + heightMap[leftbottom_x + stride ][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y + stride] ) /3 +rand() %10-5;
heightMap[leftbottom_x+ stride/2][leftbottom_y+ stride] = (average + heightMap[leftbottom_x][leftbottom_y + stride] + heightMap[leftbottom_x + stride][leftbottom_y + stride] ) /3 +rand() %10-5;
}
}
printf("%d \n", stride);
stride = stride/2;
}
for (int i=0; i<=SIZE; i++){
for(int j=0; j<=SIZE; j++){
printf("%d: x = %d, y = %d; hm = %f\n", i*j, i, j, heightMap[i][j]);
}
}
end = clock();
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_0: %f\n", runTime);
printf("Version 0\n");
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 1.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 1.0:
* 1.0 Parallel version: 1 * square kernel + 4 * diamond kernel;
* This parallel function parallelize the serial code directly. it change the one square loop to
* one square kernel and change four diamond loop to four different diamond kernel. 1
*/
/* square kernel to calculate the middle point */
__global__ void Square_1(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, j, ni, nj, mi, mj;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
check1[idx] = mi;
check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
__syncthreads();
}
}
/* diamond kernel 1_1 to calcualte middle bottom point */
__global__ void Diamond_1_1(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, mi, j;
int pmi_b, pmj_b;
float hm_b;
int num_b;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
mi = i + half;
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b-half)*(SIZE+1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b+half)*(SIZE+1)];
hm_b += hm[(pmi_b-half) + pmj_b*(SIZE+1)];
hm_b += hm[(pmi_b+half) + pmj_b*(SIZE+1)];
/* set check value */
// check1[idx] = hm_l;
// check2[idx] = hm_l;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand1 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE+1)] = hm_b/num_b + rand1;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_2 to calcualte left point */
__global__ void Diamond_1_2(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, j, mj;
int pmi_l, pmj_l;
float hm_l;
int num_l;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
mj = j + half;
/* find 4 diamond vertex */
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* set the value */
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l-half) + pmj_l*(SIZE+1)];
num_l = 4;
}
hm_l += hm[(pmi_l+half) + pmj_l*(SIZE+1)];
hm_l += hm[pmi_l + (pmj_l-half)*(SIZE+1)];
hm_l += hm[pmi_l + (pmj_l+half)*(SIZE+1)];
/* set check value */
// check1[idx] = hm_l;
// check2[idx] = hm_l;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand2 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_l + pmj_l*(SIZE+1)] = hm_l/num_l + rand2;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_3 to calcualte right point */
__global__ void Diamond_1_3(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, j, ni, mj;
int pmi_r, pmj_r;
float hm_r;
int num_r;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
ni = i + rect;
mj = j + half;
/* find 4 diamond vertex */
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* set the value */
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r+half) + pmj_r*(SIZE+1)];
num_r = 4;
}
hm_r += hm[(pmi_r-half) + pmj_r*(SIZE+1)];
hm_r += hm[pmi_r + (pmj_r-half)*(SIZE+1)];
hm_r += hm[pmi_r + (pmj_r+half)*(SIZE+1)];
/* set check value */
// check1[idx] = hm_l;
// check2[idx] = hm_l;
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand3 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_r + pmj_r*(SIZE+1)] = hm_r/num_r + rand3;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_4 to calcualte middle top point */
__global__ void Diamond_1_4(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, j, mi, nj;
int pmi_t, pmj_t;
float hm_t;
int num_t;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
nj = j + rect;
mi = i + half;
/* find 4 diamond vertex */
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t+half)*(SIZE+1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t-half)*(SIZE+1)];
hm_t += hm[(pmi_t-half) + pmj_t*(SIZE+1)];
hm_t += hm[(pmi_t+half) + pmj_t*(SIZE+1)];
/* set check value */
// check1[idx] = hm_l;
// check2[idx] = hm_l;
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand4 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_t + pmj_t*(SIZE+1)] = hm_t/num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* host code for version 1.0 */
int version_1 (void) {
printf("Version 1: square kernel + 4 diamond kernel\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i=0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE+1)]);
heightMap[SIZE + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE+1)]);
heightMap[0 + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE+1)]);
heightMap[SIZE + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE+1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i=SIZE; i>1; i=i/2){
Square_1<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_1_1<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_1_2<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_1_3<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_1_4<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
for (int i=0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i%(SIZE+1), i/(SIZE+1), heightMap[i]);
}
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_1: %f\n", runTime);
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 2.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 2.0:
* 2.0 Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Diamond_2(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b-half)*(SIZE+1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b+half)*(SIZE+1)];
hm_b += hm[(pmi_b-half) + pmj_b*(SIZE+1)];
hm_b += hm[(pmi_b+half) + pmj_b*(SIZE+1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l-half) + pmj_l*(SIZE+1)];
num_l = 4;
}
hm_l += hm[(pmi_l+half) + pmj_l*(SIZE+1)];
hm_l += hm[pmi_l + (pmj_l-half)*(SIZE+1)];
hm_l += hm[pmi_l + (pmj_l+half)*(SIZE+1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r+half) + pmj_r*(SIZE+1)];
num_r = 4;
}
hm_r += hm[(pmi_r-half) + pmj_r*(SIZE+1)];
hm_r += hm[pmi_r + (pmj_r-half)*(SIZE+1)];
hm_r += hm[pmi_r + (pmj_r+half)*(SIZE+1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t+half)*(SIZE+1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t-half)*(SIZE+1)];
hm_t += hm[(pmi_t-half) + pmj_t*(SIZE+1)];
hm_t += hm[(pmi_t+half) + pmj_t*(SIZE+1)];
/* set check value */
check1[idx] = hm_l;
check2[idx] = hm_l;
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand1 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE+1)] = hm_b/num_b + rand1;
hm[pmi_l + pmj_l*(SIZE+1)] = hm_l/num_l + rand2;
hm[pmi_r + pmj_r*(SIZE+1)] = hm_r/num_r + rand3;
hm[pmi_t + pmj_t*(SIZE+1)] = hm_t/num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_2 (void) {
printf("Version 2: square kernel + stupid diamond kernel\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i=0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE+1)]);
heightMap[SIZE + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE+1)]);
heightMap[0 + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE+1)]);
heightMap[SIZE + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE+1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i=SIZE; i>1; i=i/2){
Square_1<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_2<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
for (int i=0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i%(SIZE+1), i/(SIZE+1), heightMap[i]);
}
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_2: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 3.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 3.0:
* 3.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex.
*/
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_3(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx/(squareInRow*squareInRow)%4;
pmi = i + (1 - tid%2)*half + tid/2*half;
pmj = j + tid%2*half + tid/2*half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi-half) + pmj*(SIZE+1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi+half) + pmj*(SIZE+1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj-half)*(SIZE+1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj+half)*(SIZE+1)];
num_p++;
}
/* set check value */
check1[idx] = pmi;
check2[idx] = pmj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE+1)] = hm_p/num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_3 (bool print, int block_size) {
printf("Version 3: square kernel + smart diamond kernel\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
int size = block_size * block_size;
/* initial height map */
for (int i=0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE+1)]);
heightMap[SIZE + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE+1)]);
heightMap[0 + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE+1)]);
heightMap[SIZE + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE+1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i=SIZE; i>1; i=i/2){
Square_1<<<ceil((float)N/size),size>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_3<<<ceil((float)N/size),size>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if(print){
for (int i=0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i%(SIZE+1), i/(SIZE+1), heightMap[i]);
}
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_3: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 4.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 4.0:
* 4.0 Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Square_Diamond_4(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect/2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
check1[idx] = mi;
check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
__syncthreads();
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b-half)*(SIZE+1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b+half)*(SIZE+1)];
hm_b += hm[(pmi_b-half) + pmj_b*(SIZE+1)];
hm_b += hm[(pmi_b+half) + pmj_b*(SIZE+1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l-half) + pmj_l*(SIZE+1)];
num_l = 4;
}
hm_l += hm[(pmi_l+half) + pmj_l*(SIZE+1)];
hm_l += hm[pmi_l + (pmj_l-half)*(SIZE+1)];
hm_l += hm[pmi_l + (pmj_l+half)*(SIZE+1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r+half) + pmj_r*(SIZE+1)];
num_r = 4;
}
hm_r += hm[(pmi_r-half) + pmj_r*(SIZE+1)];
hm_r += hm[pmi_r + (pmj_r-half)*(SIZE+1)];
hm_r += hm[pmi_r + (pmj_r+half)*(SIZE+1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t+half)*(SIZE+1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t-half)*(SIZE+1)];
hm_t += hm[(pmi_t-half) + pmj_t*(SIZE+1)];
hm_t += hm[(pmi_t+half) + pmj_t*(SIZE+1)];
/* set check value */
check1[idx] = hm_l;
check2[idx] = hm_l;
/* get height for */
/* set random generator */
float rand1 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE+1)] = hm_b/num_b + rand1;
hm[pmi_l + pmj_l*(SIZE+1)] = hm_l/num_l + rand2;
hm[pmi_r + pmj_r*(SIZE+1)] = hm_r/num_r + rand3;
hm[pmi_t + pmj_t*(SIZE+1)] = hm_t/num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_4 (bool print) {
printf("Version 2: square kernel + stupid diamond kernel\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i=0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE+1)]);
heightMap[SIZE + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE+1)]);
heightMap[0 + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE+1)]);
heightMap[SIZE + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE+1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i=SIZE; i>1; i=i/2){
Square_Diamond_4<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if(print){
for (int i=0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i%(SIZE+1), i/(SIZE+1), heightMap[i]);
}
}
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_4: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
int version_5 (void) {
printf("5\n");
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 6.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 6.0:
* 6. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads we need);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* square kernel to calculate the middle point */
__global__ void Square_6(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE/rect;
if (idx < squareInRow * squareInRow){
/* initialize vairable */
int half = rect/2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
check1[idx] = mi;
check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
__syncthreads();
}
}
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_6(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE/rect;
if (idx < 4 * squareInRow * squareInRow){
/* initialize vairable */
int half = rect/2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx/(squareInRow*squareInRow)%4;
pmi = i + (1 - tid%2)*half + tid/2*half;
pmj = j + tid%2*half + tid/2*half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi-half) + pmj*(SIZE+1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi+half) + pmj*(SIZE+1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj-half)*(SIZE+1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj+half)*(SIZE+1)];
num_p++;
}
/* set check value */
check1[idx] = pmi;
check2[idx] = pmj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE+1)] = hm_p/num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_6 (void) {
printf("Version 6: square kernel + smart diamond kernel (active less threads) \n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i=0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE+1)]);
heightMap[SIZE + 0 * (SIZE+1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE+1)]);
heightMap[0 + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE+1)]);
heightMap[SIZE + SIZE * (SIZE+1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE+1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i=SIZE; i>1; i=i/2){
Square_6<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_6<<<ceil((float)N/256),256>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
for (int i=0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i%(SIZE+1), i/(SIZE+1), heightMap[i]);
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_6: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
int version_7 (void) {
printf("7\n");
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 8.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 8.0:
* 8.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A simple revised 2D version of version 3)
*/
__global__ void Square_8(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE+1 && idy < SIZE+1){
int idx = idy*(SIZE+1) + idx_temp;
/* initialize vairable */
int half = rect/2;
int i, j, ni, nj, mi, mj;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
check1[idx] = mi;
check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 +rand;
__syncthreads();
}
}
__global__ void Diamond_8(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE+1 && idy < SIZE+1){
int idx = idy*(SIZE+1) + idx_temp;
/* initialize vairable */
int half = rect/2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx/(squareInRow*squareInRow)%4;
pmi = i + (1 - tid%2)*half + tid/2*half;
pmj = j + tid%2*half + tid/2*half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi-half) + pmj*(SIZE+1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi+half) + pmj*(SIZE+1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj-half)*(SIZE+1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj+half)*(SIZE+1)];
num_p++;
}
/* set check value */
check1[idx] = pmi;
check2[idx] = pmj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE+1)] = hm_p/num_p +rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 8: 2D + 1 square kernel + 1 smart diamond kernel. */
int version_8 (bool print, int block_size) {
printf("Version 8: square kernel + smart diamond kernel\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i=0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE+1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE+1)]);
heightMap[SIZE + 0 * (SIZE+1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE+1)]);
heightMap[0 + SIZE * (SIZE+1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE+1)]);
heightMap[SIZE + SIZE * (SIZE+1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE+1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE)/block_size),ceil(((float)SIZE)/block_size), 1);
dim3 DimBlock(block_size,block_size,1);
for (int i=SIZE; i>1; i=i/2){
Square_8<<<DimGrid,DimBlock>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_8<<<DimGrid,DimBlock>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if(print){
for (int i=0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i%(SIZE+1), i/(SIZE+1), heightMap[i]);
}
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_8: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 9.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 9.0:
* 9.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A simple revised 2D version of version 3)
*/
__global__ void Square_9(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE+1 && idy < SIZE+1){
int idx = idy*(SIZE+1) + idx_temp;
/* initialize vairable */
int half = rect/2;
int i, j, ni, nj, mi, mj;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
check1[idx] = mi;
check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 +rand;
__syncthreads();
}
}
__global__ void Diamond_9(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE+1 && idy < SIZE+1){
int idx = idy*(SIZE+1) + idx_temp;
/* initialize vairable */
int half = rect/2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE/rect;
/* calculate vertex */
i = (idx%squareInRow*rect)%SIZE;
j = (idx/squareInRow*rect)%SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx/(squareInRow*squareInRow)%4;
pmi = i + (1 - tid%2)*half + tid/2*half;
pmj = j + tid%2*half + tid/2*half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi-half) + pmj*(SIZE+1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi+half) + pmj*(SIZE+1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj-half)*(SIZE+1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj+half)*(SIZE+1)];
num_p++;
}
/* set check value */
check1[idx] = pmi;
check2[idx] = pmj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS)/2;
float v2 = ((float)ROUGHNESS)/2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE+1)] = hm_p/num_p +rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 8: 2D + 1 square kernel + 1 smart diamond kernel. */
int version_9 (bool print, int block_size) {
printf("Version 8: square kernel + smart diamond kernel\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i=0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE+1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE+1)]);
heightMap[SIZE + 0 * (SIZE+1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE+1)]);
heightMap[0 + SIZE * (SIZE+1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE+1)]);
heightMap[SIZE + SIZE * (SIZE+1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE+1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE)/block_size),ceil(((float)SIZE)/block_size), 1);
dim3 DimBlock(block_size,block_size,1);
for (int i=SIZE; i>1; i=i/2){
Square_9<<<DimGrid,DimBlock>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
Diamond_9<<<DimGrid,DimBlock>>>(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if(print){
for (int i=0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i%(SIZE+1), i/(SIZE+1), heightMap[i]);
}
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start)/CLOCKS_PER_SEC;
printf("Run time for Version_8: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
|
1,355
|
#include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <algorithm>
#include <vector>
#include <iostream>
#define CUDA_CALL(x) do { cudaError_t err = x; if (( err ) != cudaSuccess ){ \
printf ("Error \"%s\" at %s :%d \n" , cudaGetErrorString(err), \
__FILE__ , __LINE__ ) ; return err;\
}} while (0);
void deBoorMakeTridiag(std::vector<float> x, std::vector<float> y, float d0, float dn, std::vector<float> &a, std::vector<float> &b, std::vector<float> &c, std::vector<float> &r)
{
std::vector<float> dX(x.size() - 1);
std::vector<float> dY(y.size() - 1);
for (int i = 0; i < dX.size(); i++)
{
dX[i] = x[i + 1] - x[i];
dY[i] = y[i + 1] - y[i];
}
for (int i = 0; i < a.size(); i++)
{
a[i] = dX[i + 1];
b[i] = 2 * (dX[i] + dX[i + 1]);
c[i] = dX[i];
r[i] = 3 * ((dX[i] / dX[i + 1]) * dY[i + 1] + (dX[i + 1] / dX[i]) * dY[i]);
}
r[0] -= a[0] * d0;
r[r.size() - 1] -= c[c.size() - 1] * dn;
}
__global__ void LU_tridiag(float* a, float* b, float* c, float* r, int from, int to)
{
for (int i = from + 1; i < to; i++)
{
a[i] = a[i] / b[i - 1];
b[i] = b[i] - (a[i] * c[i - 1]);
r[i] = r[i] - (a[i] * r[i - 1]);
}
r[to - 1] = r[to - 1] / b[to - 1];
for (int i = to - 2; i >= from; i--)
{
r[i] = (r[i] - (c[i] * r[i + 1])) / b[i];
}
}
__device__ void LU_tridiag_device(float* a, float* b, float* c, float* r, int from, int to)
{
for (int i = from + 1; i < to; i++)
{
a[i] = a[i] / b[i - 1];
b[i] = b[i] - (a[i] * c[i - 1]);
r[i] = r[i] - (a[i] * r[i - 1]);
}
r[to - 1] = r[to - 1] / b[to - 1];
for (int i = to - 2; i >= from; i--)
{
r[i] = (r[i] - (c[i] * r[i + 1])) / b[i];
}
}
void LU_CPU(std::vector<float> a, std::vector<float> b, std::vector<float> c, std::vector<float> &r, int from, int to)
{
for (int i = from + 1; i < to; i++)
{
a[i] = a[i] / b[i - 1];
b[i] = b[i] - (a[i] * c[i - 1]);
r[i] = r[i] - (a[i] * r[i - 1]);
}
r[to - 1] = r[to - 1] / b[to - 1];
for (int i = to - 2; i >= from; i--)
{
r[i] = (r[i] - (c[i] * r[i + 1])) / b[i];
}
}
__global__ void partitioning(float* a, float* b, float* c, float* r, float* Va, float* Vb, float* Vc, float* Vr, int* Vindex, int pLength, int size, int Vsize, int remainder) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx * 2 + 1;
int myLength = pLength;
int j = idx * myLength;
if (i == Vsize - 1) // if this is the last processor
{
myLength += remainder;
}
if (i < Vsize)
{
/*float *dev_a = new float[myLength];
float *dev_b = new float[myLength];
float *dev_c = new float[myLength];
float *dev_r = new float[myLength];
for (int k = 0; k < myLength; k++)
{
dev_a[k] = a[j + k];
dev_b[k] = b[j + k];
dev_c[k] = c[j + k];
dev_r[k] = r[j + k];
}*/
/*float Vai = dev_a[1];
float Vbi = dev_b[1];
float Vci = dev_c[1];
float Vri = dev_r[1];*/
float Vai = a[j + 1];
float Vbi = b[j + 1];
float Vci = c[j + 1];
float Vri = r[j + 1];
Vindex[i - 1] = j;
Vindex[i] = j + myLength - 1;
int jInit = j;
for (int k = 2; k < myLength; k++) /* && j < size*/
{
float alpha = Vbi / a[j + k];
Vri -= alpha * r[j + k];
Vbi = Vci - alpha * b[j + k];
Vci = -alpha * c[j + k];
}
Va[i] = Vai;
Vb[i] = Vbi;
Vc[i] = Vci;
Vr[i] = Vri;
i--;
Vai = a[j + myLength - 2];
Vbi = b[j + myLength - 2];
Vci = c[j + myLength - 2];
Vri = r[j + myLength - 2];
for (int k = myLength - 3; k >= 0; k--)
{
float beta = Vbi / c[j + k];
Vri = Vri - beta * r[j + k];
Vbi = Vai - beta * b[j + k];
Vai = -beta * a[j + k];
}
Va[i] = Vai;
Vb[i] = Vbi;
Vc[i] = Vci;
Vr[i] = Vri;
/*delete[] dev_a;
delete[] dev_b;
delete[] dev_c;
delete[] dev_r;*/
}
}
__global__ void final_computations(float* a, float* b, float* c, float* r, float* Vr, int* Vindex, int Vsize)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x) * 2;
if (i < Vsize)
{
/*int Vind = Vindex[i];
int Vind1 = Vindex[i + 1];
float Vri = Vr[i];
float Vri1 = Vr[i + 1];*/
r[Vindex[i]] = Vr[i];
r[Vindex[i + 1]] = Vr[i+1];
int idx1 = Vindex[i] + 1;
r[idx1] -= a[idx1] * Vr[i];
int idx2 = Vindex[i + 1] - 1;
r[idx2] -= c[idx2] * Vr[i+1];
LU_tridiag_device(a, b, c, r, idx1, idx2 + 1);
}
}
cudaError_t austin_berndt_moulton(std::vector<float> a, std::vector<float> b, std::vector<float> c, std::vector<float> &r, int nOfParts)
{
int Vsize = nOfParts * 2;
std::vector<float> Va(Vsize);
std::vector<float> Vb(Vsize);
std::vector<float> Vc(Vsize);
std::vector<float> Vr(Vsize);
std::vector<int> Vindex(Vsize);
cudaEvent_t start, stop_malloc, stop_memcpy1, stop_partitioning, stop_seq, stop_final, stop_memcpy_final;
float time1 = 0.0;
float time2 = 0.0;
float time3 = 0.0;
float time4 = 0.0;
float time5 = 0.0;
float time6 = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop_malloc);
cudaEventCreate(&stop_memcpy1);
cudaEventCreate(&stop_partitioning);
cudaEventCreate(&stop_seq);
cudaEventCreate(&stop_final);
cudaEventCreate(&stop_memcpy_final);
cudaEventRecord(start);
cudaEventSynchronize(start);
float *dev_a = 0; CUDA_CALL(cudaMalloc((void**)&dev_a, a.size() * sizeof(float)));
float *dev_b = 0; CUDA_CALL(cudaMalloc((void**)&dev_b, b.size() * sizeof(float)));
float *dev_c = 0; CUDA_CALL(cudaMalloc((void**)&dev_c, c.size() * sizeof(float)));
float *dev_r = 0; CUDA_CALL(cudaMalloc((void**)&dev_r, r.size() * sizeof(float)));
float *dev_Va = 0; CUDA_CALL(cudaMalloc((void**)&dev_Va, Vsize * sizeof(float)));
float *dev_Vb = 0; CUDA_CALL(cudaMalloc((void**)&dev_Vb, Vsize * sizeof(float)));
float *dev_Vc = 0; CUDA_CALL(cudaMalloc((void**)&dev_Vc, Vsize * sizeof(float)));
float *dev_Vr = 0; CUDA_CALL(cudaMalloc((void**)&dev_Vr, Vsize * sizeof(float)));
int *dev_Vidx = 0; CUDA_CALL(cudaMalloc((void**)&dev_Vidx, Vsize * sizeof(int)));
cudaEventRecord(stop_malloc);
cudaEventSynchronize(stop_malloc);
cudaEventElapsedTime(&time1, start, stop_malloc);
CUDA_CALL(cudaMemcpy(dev_a, &a[0], a.size() * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_b, &b[0], b.size() * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_c, &c[0], c.size() * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(dev_r, &r[0], r.size() * sizeof(float), cudaMemcpyHostToDevice));
cudaEventRecord(stop_memcpy1);
cudaEventSynchronize(stop_memcpy1);
cudaEventElapsedTime(&time2, stop_malloc, stop_memcpy1);
int pLength = r.size() / nOfParts;
int remainder = r.size() - (pLength * nOfParts);
int threadsPerBlock = 128;
int numBlocks = (nOfParts + threadsPerBlock - 1) / threadsPerBlock;
CUDA_CALL(cudaSetDevice(0));
partitioning<<<numBlocks, threadsPerBlock>>>(dev_a, dev_b, dev_c, dev_r, dev_Va, dev_Vb, dev_Vc, dev_Vr, dev_Vidx, pLength, r.size(), Vr.size(), remainder);
CUDA_CALL(cudaGetLastError());
CUDA_CALL(cudaDeviceSynchronize());
cudaEventRecord(stop_partitioning);
cudaEventSynchronize(stop_partitioning);
cudaEventElapsedTime(&time3, stop_memcpy1, stop_partitioning);
CUDA_CALL(cudaMemcpy(&Va[0], dev_Va, Vsize * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(&Vb[0], dev_Vb, Vsize * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(&Vc[0], dev_Vc, Vsize * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(&Vr[0], dev_Vr, Vsize * sizeof(float), cudaMemcpyDeviceToHost));
LU_CPU(Va, Vb, Vc, Vr, 0, Vsize);
CUDA_CALL(cudaMemcpy(dev_Vr, &Vr[0], Vr.size() * sizeof(float), cudaMemcpyHostToDevice));
/*LU_tridiag<<<1, 1>>>(dev_Va, dev_Vb, dev_Vc, dev_Vr, 0, Vsize);
CUDA_CALL(cudaGetLastError());
CUDA_CALL(cudaDeviceSynchronize());*/
cudaEventRecord(stop_seq);
cudaEventSynchronize(stop_seq);
cudaEventElapsedTime(&time4, stop_partitioning, stop_seq);
final_computations<<<numBlocks, threadsPerBlock>>>(dev_a, dev_b, dev_c, dev_r, dev_Vr, dev_Vidx, Vr.size());
CUDA_CALL(cudaGetLastError());
cudaError_t err = cudaDeviceSynchronize();
if ((err) != cudaSuccess) {
printf("Error \"%s\" at %s :%d \n", cudaGetErrorString(err), __FILE__, __LINE__);
}
cudaEventRecord(stop_final);
cudaEventSynchronize(stop_final);
cudaEventElapsedTime(&time5, stop_seq, stop_final);
CUDA_CALL(cudaMemcpy(&r[0], dev_r, r.size() * sizeof(float), cudaMemcpyDeviceToHost));
cudaEventRecord(stop_memcpy_final);
cudaEventSynchronize(stop_memcpy_final);
cudaEventElapsedTime(&time6, stop_final, stop_memcpy_final);
std::cout << "malloc time: " << time1 << " ms" << std::endl;
std::cout << "memcpy time: " << time2 << " ms" << std::endl;
std::cout << "partit time: " << time3 << " ms" << std::endl;
std::cout << "sequen time: " << time4 << " ms" << std::endl;
std::cout << "fiinal time: " << time5 << " ms" << std::endl;
std::cout << "rescpy time: " << time6 << " ms" << std::endl;
std::cout << "sum time: " << time1+time2+time3+time4+time5+time6 << " ms" << std::endl;
std::cout << "============================" << std::endl;
return err;
}
void ABM_on_CPU(std::vector<float> a, std::vector<float> b, std::vector<float> c, std::vector<float> &r, int nOfParts) {
int Vsize = nOfParts * 2;
std::vector<float> Va(Vsize);
std::vector<float> Vb(Vsize);
std::vector<float> Vc(Vsize);
std::vector<float> Vr(Vsize);
std::vector<int> Vindex(Vsize);
int j = 1;
int pLength = b.size() / nOfParts;
int remainder = b.size() - (pLength * nOfParts);
for (int i = 0; i < Vb.size(); i += 2)
{
i++;
if (i == Vb.size() - 1)
{
pLength += remainder;
}
Va[i] = a[j];
Vb[i] = b[j];
Vc[i] = c[j];
Vr[i] = r[j];
Vindex[i - 1] = j - 1;
int jInit = j - 1;
j++;
for (int k = 0; k < pLength - 2 && j < b.size(); k++, j++)
{
float alpha = Vb[i] / a[j];
Vr[i] -= alpha * r[j];
Vb[i] = Vc[i] - alpha * b[j];
Vc[i] = -alpha * c[j];
}
i--;
Va[i] = a[j - 2];
Vb[i] = b[j - 2];
Vc[i] = c[j - 2];
Vr[i] = r[j - 2];
Vindex[i + 1] = j - 1;
for (int k = j - 3; k >= jInit; k--)
{
float beta = Vb[i] / c[k];
Vr[i] = Vr[i] - beta * r[k];
Vb[i] = Va[i] - beta * b[k];
Va[i] = -beta * a[k];
}
j++;
}
LU_CPU(Va, Vb, Vc, Vr, 0, Vsize);
for (int i = 0; i < Vr.size(); i++)
{
r[Vindex[i]] = Vr[i];
}
for (int i = 0; i < Vr.size(); i += 2)
{
int idx1 = Vindex[i] + 1;
r[idx1] -= a[idx1] * Vr[i];
int idx2 = Vindex[i + 1] - 1;
r[idx2] -= c[idx2] * Vr[i + 1];
LU_CPU(a, b, c, r, idx1, idx2 + 1);
}
}
int main()
{
const int matrixSize = 500 * 1024;
std::vector<float> a(matrixSize);
std::vector<float> b(matrixSize);
std::vector<float> c(matrixSize);
std::vector<float> r(matrixSize);
//float d1 = 1, dr = -1;
//float x1 = -4, xr = 4;
//std::vector<float> X(matrixSize + 2);
//std::vector<float> F(matrixSize + 2);
//float h = (xr - x1) / (X.size() - 1);
//// Data X, F:
//X[0] = x1;
//F[0] = 1 / (1 + 4 * X[0] * X[0]);
//for (int i = 1; i < X.size(); i++)
//{
// X[i] = X[i - 1] + h; F[i] = 1 / (1 + 4 * X[i] * X[i]);
//}
//deBoorMakeTridiag(X, F, d1, dr, a, b, c, r);
srand(time(NULL));
for (size_t i = 0; i < matrixSize; i++)
{
a[i] = rand() % 10 + 1;
c[i] = rand() % 10 + 1;
b[i] = a[i] + c[i] + 1 + rand() % 10; // musi byt diagonalne dominantna
r[i] = rand() % 100;
}
std::vector<float> a2(matrixSize);
std::vector<float> b2(matrixSize);
std::vector<float> c2(matrixSize);
std::vector<float> r2(matrixSize);
a2 = a;
b2 = b;
c2 = c;
r2 = r;
cudaEvent_t start, stop_CPU, stop_GPU;
float time1 = 0.0;
float time2 = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop_CPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start);
cudaEventSynchronize(start);
// computing on CPU
LU_CPU(a2, b2, c2, r2, 0, r.size());
//ABM_on_CPU(a2, b2, c2, r2, 1024);
cudaEventRecord(stop_CPU);
cudaEventSynchronize(stop_CPU);
cudaEventElapsedTime(&time1, start, stop_CPU);
// computing on GPU
cudaError_t cudaStatus = austin_berndt_moulton(a, b, c, r, 1024);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "GPU computing failed!\n");
return 1;
}
cudaEventRecord(stop_GPU);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&time2, stop_CPU, stop_GPU);
std::cout << "CPU time: " << time1 <<" ms" << std::endl;
std::cout << "my GPU time: " << time2 << " ms" << std::endl << std::endl;
// std::cout.precision(15);
for (int i = 0; i < r.size(); i++)
{
float diff = r[i] - r2[i];
if (diff > 0.00000000000001) { // 10^-15
std::cout << "BACHA! rozdiel v " << i << " je presne " << diff << std::endl;
}
}
/*std::cout << "R1: ";
for (int i = 0; i < r.size(); i++)
{
std::cout << r[i] << ", ";
}
std::cout << std::endl << "R2: ";
for (int i = 0; i < r.size(); i++)
{
std::cout << r2[i] << ", ";
}
std::cout << std::endl;*/
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
1,356
|
//Darrien Park
#include "cuda_runtime.h"
#include <string.h>
#include <stdio.h>
//no field in cudaDeviceProperties for number of cores. Therefore need to determine based on compute capability
int getCores(cudaDeviceProp dev_prop)
{
int cores = 0;
int sm = dev_prop.multiProcessorCount;
//start switch case based on major compute capability
switch (dev_prop.major){
//Fermi
case 2:
if (dev_prop.minor == 1)
cores = sm * 48;
else cores = sm * 32;
break;
//Kepler
case 3:
cores = sm * 192;
break;
//Maxwell
case 5:
cores = sm * 128;
break;
//Pascal
case 6:
if (dev_prop.minor == 1)
cores = sm * 128;
else if (dev_prop.minor == 0)
cores = sm * 64;
else printf("Unknown device type \n");
break;
//Volta
case 7:
if (dev_prop.minor == 0)
cores = sm * 64;
else printf("Unknown device type \n");
break;
//base case: can't be detected
default:
printf("Unknown device type \n");
break;
}
return cores;
}
int main(int argc, char * argv[])
{
int dev_count;
cudaGetDeviceCount(& dev_count);
printf("Number of CUDA devices is [%d]\n\n",dev_count);
for(int i = 0; i < dev_count; i++){
int k = i+1;
printf("Device [%d]\n", k);
cudaDeviceProp dev_props;
cudaGetDeviceProperties(&dev_props, 0); //cudaGetDeviceProperties(cudaDeviceProp* prop, int device#)
printf(" Device Name: %s\n",dev_props.name);
printf(" Memory Clock Rate (KHz): %d\n",dev_props.memoryClockRate);
printf(" Number of Streaming Multiprocessors: %d\n",dev_props.multiProcessorCount);
printf(" Number of cores: %d\n",getCores(dev_props));
printf(" Warp Size: %d\n",dev_props.warpSize);
printf(" Total Global Memory: %d\n",dev_props.totalGlobalMem);
printf(" Total Constant Memory: %d\n",dev_props.totalConstMem);
printf(" Shared Memory/Block: %d\n",dev_props.sharedMemPerBlock);
printf(" Number of Registers/Block: %d\n",dev_props.regsPerBlock);
printf(" Number of Threads/Block: %d\n",dev_props.maxThreadsPerBlock);
printf(" Max Block Dimension: %d\n",dev_props.maxThreadsDim);
printf(" Max Grid Dimension: %d\n",dev_props.maxGridSize);
}
return 0;
}
|
1,357
|
#include <iostream>
#include <curand.h>
using namespace std;
#include <curand.h>
struct random_d_array
{
float *data;
int n;
random_d_array(int n) :n{n}
{
cudaMalloc((void**)&data, n*sizeof(float));
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandGenerateUniform(gen, data, n);
}
~random_d_array()
{
cudaFree(&data);
}
};
__global__ void MyKernel(float *d, float *a, float *b, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < n)
d[idx] = a[idx] * b[idx];
}
int main()
{
int numBlocks;
int blockSize = 32;
int device;
cudaDeviceProp prop;
int activeWarps;
int maxWarps;
int N = 1024*1024;
random_d_array a(N);
random_d_array b(N);
random_d_array d(N);
string buffer;
while(true)
{
cout << "Enter the block size or q to exit" << endl;
cin >> buffer;
if(buffer == "q") break;
blockSize = stoi(buffer, nullptr);
int gridSize = (N + blockSize - 1)/blockSize;
cout << "blockSize = " << blockSize << ", gridSize = " << gridSize << endl;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, MyKernel, blockSize, 0);
activeWarps = numBlocks * blockSize/prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor/prop.warpSize;
cout << "Occupancy: " << (double)activeWarps/maxWarps * 100 << "%" << endl;
double average = 0.0;
int iterations = 5;
for(int i = 0; i < iterations; ++i)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MyKernel<<<gridSize,blockSize>>>(d.data, a.data, b.data, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout << milliseconds << " ms" << endl;
if(i > 0)
average += milliseconds;
}
average /= (iterations - 1);
cout << "Average = " << average << endl;
}
}
|
1,358
|
#include "includes.h"
__global__ void inputKernel2(float *x, int n, int N)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,i;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * NUM_OF_X_THREADS + ix;
if (idx < N)
{
if (idx < n)
{
x[idx*N] = ((float)idx * 2) - ((float)idx * (float)idx);
}
else
{
x[idx] = 0;
}
for(i=1;i<N;i++)
{
x[idx*N + i] = 0;
}
}
}
|
1,359
|
#include "cuda.h"
#include "math.h"
#include "stdio.h"
#include "stdlib.h"
#include "thrust/reduce.h"
#define BLOCK_SIZE 512
#define ELEMS_PER_THREAD 32
template <unsigned int blockSize>
__device__ void warpReduce(volatile double* s_data, unsigned int t) {
if (blockSize >= 64) s_data[t] += s_data[t + 32];
if (blockSize >= 32) s_data[t] += s_data[t + 16];
if (blockSize >= 16) s_data[t] += s_data[t + 8];
if (blockSize >= 8) s_data[t] += s_data[t + 4];
if (blockSize >= 4) s_data[t] += s_data[t + 2];
if (blockSize >= 2) s_data[t] += s_data[t + 1];
}
template <unsigned int blockSize>
__global__ void reductionKernel(double* d_in, double* d_out, unsigned int N) {
extern __shared__ double s_data[];
// Indexing
unsigned int t = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
// Load some elements into shared memory
s_data[t] = 0.f;
while (i + blockSize < N) {
s_data[t] += d_in[i] + d_in[i + blockSize];
i += gridSize;
}
if (i < N) s_data[t] += d_in[i];
__syncthreads();
// Unroll the loop
if (blockSize >= 512) {
if (t < 256) s_data[t] += s_data[t + 256];
__syncthreads();
}
if (blockSize >= 256) {
if (t < 128) s_data[t] += s_data[t + 128];
__syncthreads();
}
if (blockSize >= 128) {
if (t < 64) s_data[t] += s_data[t + 64];
__syncthreads();
}
if (t < 32) warpReduce<blockSize>(s_data, t);
// Write the result for each block into d_out
if (t == 0) d_out[blockIdx.x] = s_data[0];
}
void reductionDevice(double* h_in, double* h_out, double** d_arr,
unsigned int N, int tree_depth, unsigned int* lengths,
dim3* dimBlock, dim3* dimGrid, unsigned int& shared_size,
float& dur_ex, float& dur_in) {
// Setup timing
cudaEvent_t start_ex, end_ex, start_in, end_in;
cudaEventCreate(&start_ex);
cudaEventCreate(&end_ex);
cudaEventCreate(&start_in);
cudaEventCreate(&end_in);
// Copy host array to device
cudaEventRecord(start_in, 0);
cudaMemcpy(d_arr[0], h_in, N * sizeof(double), cudaMemcpyHostToDevice);
// Perform reduction on device
cudaEventRecord(start_ex, 0);
for (int i = 0; i < tree_depth; i++)
reductionKernel<BLOCK_SIZE> <<<dimGrid[i], dimBlock[i], shared_size>>>
(d_arr[i], d_arr[i + 1], lengths[i]);
cudaEventRecord(end_ex, 0);
cudaEventSynchronize(end_ex);
// Copy device array back to host
cudaMemcpy(h_out, d_arr[tree_depth], sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(end_in, 0);
cudaEventSynchronize(end_in);
// Calculate durations
cudaEventElapsedTime(&dur_ex, start_ex, end_ex);
cudaEventElapsedTime(&dur_in, start_in, end_in);
}
void reductionThrust(double* h_in, double* h_out, unsigned int N,
float& dur_thrust) {
// Setup timing
cudaEvent_t start_thrust, end_thrust;
cudaEventCreate(&start_thrust);
cudaEventCreate(&end_thrust);
// Perform reduction on the device using thrust
cudaEventRecord(start_thrust, 0);
*h_out = thrust::reduce(h_in, h_in + N);
cudaEventRecord(end_thrust, 0);
cudaEventSynchronize(end_thrust);
// Calculate duration
cudaEventElapsedTime(&dur_thrust, start_thrust, end_thrust);
}
void reductionHost(double* h_in, double* h_out, unsigned int N,
float& dur_cpu) {
// Setup timing
cudaEvent_t start_cpu, end_cpu;
cudaEventCreate(&start_cpu);
cudaEventCreate(&end_cpu);
// Perform reduction on host
cudaEventRecord(start_cpu, 0);
double result = 0.f;
for (unsigned int i = 0; i < N; i++) result += h_in[i];
*h_out = result;
cudaEventRecord(end_cpu, 0);
cudaEventSynchronize(end_cpu);
// Calculate duration
cudaEventElapsedTime(&dur_cpu, start_cpu, end_cpu);
}
bool checkResults(double* h_out, double* h_ref, double eps) {
double delta = abs(*h_out - *h_ref);
if (delta > eps) return false;
return true;
}
double* allocateHostArray(unsigned int size) {
double* h_array;
cudaError_t code = cudaMallocHost(&h_array, size);
if (code != cudaSuccess) {
printf("Memory allocation on the host was unsuccessful.\n");
exit(EXIT_FAILURE);
}
return h_array;
}
double* allocateDeviceArray(unsigned int size) {
double* d_arr;
cudaError_t code = cudaMalloc(&d_arr, size);
if (code != cudaSuccess) {
printf("Memory allocation on the device was unsuccessful.\n");
exit(EXIT_FAILURE);
}
return d_arr;
}
void exitUsage() {
printf("Usage: ./p2 [[M N] [dur_max]]\n");
exit(EXIT_SUCCESS);
}
void parseInput(int argc, char** argv, unsigned int& N, unsigned int& M,
float& dur_max) {
if (argc == 1) {
N = 50000000;
M = 5;
dur_max = 1000.f;
return;
}
if (argc != 3 && argc != 4) exitUsage();
if (sscanf(argv[1], "%u", &N) != 1) exitUsage();
if (sscanf(argv[2], "%u", &M) != 1) exitUsage();
if (argc == 3) {
dur_max = 1000.f;
return;
}
if (sscanf(argv[3], "%f", &dur_max) != 1) exitUsage();
dur_max *= 1000;
}
int main(int argc, char** argv) {
// Parse command line arguments
unsigned int N, M;
float dur_max;
parseInput(argc, argv, N, M, dur_max);
// Allocate host arrays
double* h_in = allocateHostArray(N * sizeof(double));
double* h_device = allocateHostArray(sizeof(double));
double* h_thrust = allocateHostArray(sizeof(double));
double* h_cpu = allocateHostArray(sizeof(double));
// Setup host array and fill with random numbers
srand(73);
for (unsigned int i = 0; i < N; i++)
h_in[i] = ((double)rand() / RAND_MAX - 0.5f) * 2 * M;
// Calculate the tree depth
int tree_depth = 0;
{
unsigned int length = N;
while (length > 1) {
length = (length + (BLOCK_SIZE * ELEMS_PER_THREAD) - 1) /
(BLOCK_SIZE * ELEMS_PER_THREAD);
tree_depth++;
}
}
// Calculate the lengths of the device arrays
unsigned int lengths[tree_depth + 1];
lengths[0] = N;
for (int i = 1; i < tree_depth + 1; i++)
lengths[i] = (lengths[i - 1] + (BLOCK_SIZE * ELEMS_PER_THREAD) - 1) /
(BLOCK_SIZE * ELEMS_PER_THREAD);
// Setup grid
dim3 dimBlock[tree_depth];
dim3 dimGrid[tree_depth];
for (int i = 0; i < tree_depth; i++) {
dimBlock[i].x = BLOCK_SIZE;
dimGrid[i].x = lengths[i + 1];
}
// Shared memory size
unsigned int shared_size = BLOCK_SIZE * sizeof(double);
// Allocate device arrays
double* d_arr[tree_depth + 1];
for (int i = 0; i < tree_depth + 1; i++)
d_arr[i] = allocateDeviceArray(lengths[i] * sizeof(double));
// Setup timing
int nruns_device = 0;
int nruns_thrust = 0;
int nruns_cpu = 0;
float dur_ex, dur_in, dur_thrust, dur_cpu;
float dur_ex_total = 0.f;
float dur_in_total = 0.f;
float dur_thrust_total = 0.f;
float dur_cpu_total = 0.f;
float dur_ex_min = 1e99;
float dur_in_min = 1e99;
float dur_thrust_min = 1e99;
float dur_cpu_min = 1e99;
// Vector reduction on the device
while (dur_in_total < dur_max) {
nruns_device++;
reductionDevice(h_in, h_device, d_arr, N, tree_depth, lengths, dimBlock,
dimGrid, shared_size, dur_ex, dur_in);
dur_ex_total += dur_ex;
dur_in_total += dur_in;
if (dur_ex < dur_ex_min) dur_ex_min = dur_ex;
if (dur_in < dur_in_min) dur_in_min = dur_in;
if (dur_in_total <= 0.f) break;
}
// Vector reduction on the device with thrust
while (dur_thrust_total < dur_max) {
nruns_thrust++;
reductionThrust(h_in, h_thrust, N, dur_thrust);
dur_thrust_total += dur_thrust;
if (dur_thrust < dur_thrust_min) dur_thrust_min = dur_thrust;
if (dur_thrust_total <= 0.f) break;
}
// Vector reduction on CPU
while (dur_cpu_total < dur_max) {
nruns_cpu++;
reductionHost(h_in, h_cpu, N, dur_cpu);
dur_cpu_total += dur_cpu;
if (dur_cpu < dur_cpu_min) dur_cpu_min = dur_cpu;
if (dur_cpu_total <= 0.f) break;
}
// Compare device and host results
double eps = (double)M * 0.001f;
bool passed_device = checkResults(h_device, h_cpu, eps);
bool passed_thrust = checkResults(h_thrust, h_cpu, eps);
if (passed_device)
printf("Test PASSED (device)\n");
else
printf("Test FAILED (device)\n");
if (passed_thrust)
printf("Test PASSED (thrust)\n");
else
printf("Test FAILED (thrust)\n");
// Print stuff
printf("N: %u\n", N);
printf("M: %u\n", M);
printf("Elements per thread: %d\n", ELEMS_PER_THREAD);
printf("Tree depth: %d\n", tree_depth);
printf("Block sizes: %dx%d", dimBlock[0].y, dimBlock[0].x);
for (int i = 1; i < tree_depth; i++)
printf(", %dx%d", dimBlock[i].y, dimBlock[i].x);
printf("\n");
printf("Grid sizes: %dx%d", dimGrid[0].y, dimGrid[0].x);
for (int i = 1; i < tree_depth; i++)
printf(", %dx%d", dimGrid[i].y, dimGrid[i].x);
printf("\n");
printf("GPU array lengths: %d", lengths[0]);
for (int i = 1; i < tree_depth + 1; i++) printf(", %d", lengths[i]);
printf("\n");
printf("Result (Device): %24.14f\n", *h_device);
printf("Result (Thrust): %24.14f\n", *h_thrust);
printf("Result (CPU): %24.14f\n", *h_cpu);
printf("Timing results %12s %12s %8s\n", "Average", "Minimum", "Num_runs");
printf("Device ex: %12.6f %12.6f %8d\n", dur_ex, dur_ex_min,
nruns_device);
printf("Device in: %12.6f %12.6f %8d\n", dur_in, dur_in_min,
nruns_device);
printf("Thrust: %12.6f %12.6f %8d\n", dur_thrust, dur_thrust_min,
nruns_thrust);
printf("CPU: %12.6f %12.6f %8d\n", dur_cpu, dur_cpu_min,
nruns_cpu);
printf("\n");
// Free arrays
cudaFree(h_in);
cudaFree(h_device);
cudaFree(h_thrust);
cudaFree(h_cpu);
for (int i = 0; i < tree_depth + 1; i++) cudaFree(d_arr[i]);
return 0;
}
|
1,360
|
//
// TP: Exploration de la machine
// Complter les TODOs
//
#include<iostream>
int main (int argc, char ** argv) {
// Nombre de GPU sur la machine supporant CUDA
int devices_count = 0;
cudaGetDeviceCount(&devices_count);
std::cout << "Cette machine est équiée de " << devices_count << " GPU(s) supportant CUDA" << std::endl;
// Lire les proprits de chaque GPU
for (int device_index = 0; device_index < devices_count; ++device_index) {
std::cout << "============" << std::endl << "GPU index: " << device_index << std::endl;
cudaDeviceProp device_properties;
cudaGetDeviceProperties(&device_properties, device_index);
std::cout << "Nom du GPU: " << device_properties.name << std::endl;
std::cout << "Compute Capability: " << device_properties.major << "." << device_properties.minor << std::endl;
std::cout << "Nombre de SMs: " << device_properties.multiProcessorCount << std::endl;
std::cout << "Taille du warp: " << device_properties.warpSize << std::endl;
// TODO: Imprimer d'autres proprits
// TIP: Voir https://stackoverflow.com/a/32531982/3503855
}
// Consulter le lien suivant pour plus d'information sur la structure "cudaDeviceProp"
// http://developer.download.nvidia.com/compute/cuda/3_2_prod/toolkit/docs/online/structcudaDeviceProp.html
return 0;
}
|
1,361
|
__device__ int find_qmin_and_qmax(
double dq0,
double dq1,
double dq2,
double *qmin,
double *qmax)
{
// Considering the centroid of an FV triangle and the vertices of its
// auxiliary triangle, find
// qmin=min(q)-qc and qmax=max(q)-qc,
// where min(q) and max(q) are respectively min and max over the
// four values (at the centroid of the FV triangle and the auxiliary
// triangle vertices),
// and qc is the centroid
// dq0=q(vertex0)-q(centroid of FV triangle)
// dq1=q(vertex1)-q(vertex0)
// dq2=q(vertex2)-q(vertex0)
// This is a simple implementation
*qmax = max(max(dq0, max(dq0+dq1, dq0+dq2)), 0.0) ;
*qmin = min(min(dq0, min(dq0+dq1, dq0+dq2)), 0.0) ;
return 0;
}
__device__ int limit_gradient(
double *dqv,
double qmin,
double qmax,
double beta_w)
{
// Given provisional jumps dqv from the FV triangle centroid to its
// vertices and jumps qmin (qmax) between the centroid of the FV
// triangle and the minimum (maximum) of the values at the centroid of
// the FV triangle and the auxiliary triangle vertices,
// calculate a multiplicative factor phi by which the provisional
// vertex jumps are to be limited
int i;
double r=1000.0, r0=1.0, phi=1.0;
double TINY = 1.0e-100; // to avoid machine accuracy problems.
// FIXME: Perhaps use the epsilon used elsewhere.
// Any provisional jump with magnitude < TINY does not contribute to
// the limiting process.
for (i=0;i<3;i++){
if (dqv[i]<-TINY)
r0=qmin/dqv[i];
if (dqv[i]>TINY)
r0=qmax/dqv[i];
r=min(r0,r);
}
phi=min(r*beta_w,1.0);
//phi=1.;
dqv[0]=dqv[0]*phi;
dqv[1]=dqv[1]*phi;
dqv[2]=dqv[2]*phi;
return 0;
}
// Computational routine
__global__ void _extrapolate_second_order_edge_sw(
//int number_of_elements,
int N,
int optimise_dry_cells,
int extrapolate_velocity_second_order,
double epsilon,
double minimum_allowed_height,
double beta_w,
double beta_w_dry,
double beta_uh,
double beta_uh_dry,
double beta_vh,
double beta_vh_dry,
long* surrogate_neighbours,
long* number_of_boundaries,
double* centroid_coordinates,
double* stage_centroid_values,
double* elevation_centroid_values,
double* xmom_centroid_values,
double* ymom_centroid_values,
double* edge_coordinates,
double* stage_edge_values,
double* elevation_edge_values,
double* xmom_edge_values,
double* ymom_edge_values,
double* stage_vertex_values,
double* elevation_vertex_values,
double* xmom_vertex_values,
double* ymom_vertex_values,
double * stage_centroid_store,
double * xmom_centroid_store,
double * ymom_centroid_store,
double * min_elevation_edgevalue,
double * max_elevation_edgevalue,
int * count_wet_neighbours
)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N)
return;
// Local variables
// Gradient vector used to calculate edge values from centroids
double a, b;
int k0, k1, k2, k3, k6, coord_index, i, ii, ktmp;
// Vertices of the auxiliary triangle
double x, y, x0, y0, x1, y1, x2, y2, xv0, yv0, xv1, yv1, xv2, yv2;
double dx1, dx2, dy1, dy2;
double dxv0, dxv1, dxv2, dyv0, dyv1, dyv2, dq0, dq1, dq2;
double area2, inv_area2;
double dqv[3], qmin, qmax, hmin, hmax, bedmax, stagemin;
double hc, h0, h1, h2, beta_tmp, hfactor, xtmp, ytmp;
double dk, dv0, dv1, dv2, de[3];
double demin, dcmax, r0scale, vect_norm, l1, l2;
//double *xmom_centroid_store, *ymom_centroid_store;
//double *stage_centroid_store, *min_elevation_edgevalue;
//double *max_elevation_edgevalue;
//int *count_wet_neighbours;
// Use malloc to avoid putting these variables on the stack,
// which can cause
// segfaults in large model runs
//xmom_centroid_store = malloc(number_of_elements*sizeof(double));
//ymom_centroid_store = malloc(number_of_elements*sizeof(double));
//stage_centroid_store = malloc(number_of_elements*sizeof(double));
//min_elevation_edgevalue = malloc(number_of_elements*sizeof(double));
//max_elevation_edgevalue = malloc(number_of_elements*sizeof(double));
//count_wet_neighbours = malloc(number_of_elements*sizeof(int));
if(extrapolate_velocity_second_order==1){
// Replace momentum centroid with velocity centroid 2 allow velocity
// extrapolation This will be changed back at the end of the routine
//for (k=0; k<number_of_elements; k++){
dk = max(stage_centroid_values[k]-elevation_centroid_values[k],
minimum_allowed_height);
xmom_centroid_store[k] = xmom_centroid_values[k];
xmom_centroid_values[k] = xmom_centroid_values[k]/dk;
ymom_centroid_store[k] = ymom_centroid_values[k];
ymom_centroid_values[k] = ymom_centroid_values[k]/dk;
min_elevation_edgevalue[k] = min(elevation_edge_values[3*k],
min(elevation_edge_values[3*k+1],
elevation_edge_values[3*k+2]));
max_elevation_edgevalue[k] = max(elevation_edge_values[3*k],
max(elevation_edge_values[3*k+1],
elevation_edge_values[3*k+2]));
//} // for k=0 to number_of_elements-1
}
// Count how many 'fully submerged' neighbours the cell has
//for(k=0; k<number_of_elements;k++){
count_wet_neighbours[k]=0;
for (i=0; i<3; i++){
ktmp = surrogate_neighbours[3*k+i];
if(stage_centroid_values[ktmp] > max_elevation_edgevalue[ktmp]){
count_wet_neighbours[k]+=1;
}
}
//} // for k=0 to number_of_elements-1
// Begin extrapolation routine
//for (k = 0; k < number_of_elements; k++) {
do{
k3=k*3;
k6=k*6;
if (number_of_boundaries[k]==3)
//if (0==0)
{
// No neighbours, set gradient on the triangle to zero
stage_edge_values[k3] = stage_centroid_values[k];
stage_edge_values[k3+1] = stage_centroid_values[k];
stage_edge_values[k3+2] = stage_centroid_values[k];
xmom_edge_values[k3] = xmom_centroid_values[k];
xmom_edge_values[k3+1] = xmom_centroid_values[k];
xmom_edge_values[k3+2] = xmom_centroid_values[k];
ymom_edge_values[k3] = ymom_centroid_values[k];
ymom_edge_values[k3+1] = ymom_centroid_values[k];
ymom_edge_values[k3+2] = ymom_centroid_values[k];
continue;
}
else
{
// Triangle k has one or more neighbours.
// Get centroid and edge coordinates of the triangle
// Get the edge coordinates
xv0 = edge_coordinates[k6];
yv0 = edge_coordinates[k6+1];
xv1 = edge_coordinates[k6+2];
yv1 = edge_coordinates[k6+3];
xv2 = edge_coordinates[k6+4];
yv2 = edge_coordinates[k6+5];
// Get the centroid coordinates
coord_index = 2*k;
x = centroid_coordinates[coord_index];
y = centroid_coordinates[coord_index+1];
// Store x- and y- differentials for the edges of
// triangle k relative to the centroid
dxv0 = xv0 - x;
dxv1 = xv1 - x;
dxv2 = xv2 - x;
dyv0 = yv0 - y;
dyv1 = yv1 - y;
dyv2 = yv2 - y;
// Compute the minimum distance from the centroid to an edge
//demin=min(dxv0*dxv0 +dyv0*dyv0,
// min(dxv1*dxv1+dyv1*dyv1, dxv2*dxv2+dyv2*dyv2));
//demin=sqrt(demin);
}
if (number_of_boundaries[k]<=1)
{
//==============================================
// Number of boundaries <= 1
//==============================================
// If no boundaries, auxiliary triangle is formed
// from the centroids of the three neighbours
// If one boundary, auxiliary triangle is formed
// from this centroid and its two neighbours
k0 = surrogate_neighbours[k3];
k1 = surrogate_neighbours[k3 + 1];
k2 = surrogate_neighbours[k3 + 2];
// Take note if the max neighbour bed elevation
// is greater than the min
// neighbour stage -- suggests a 'steep' bed relative to the flow
bedmax = max(elevation_centroid_values[k],
max(elevation_centroid_values[k0],
max(elevation_centroid_values[k1],
elevation_centroid_values[k2])));
//bedmax = elevation_centroid_values[k];
stagemin = min(max(stage_centroid_values[k],
elevation_centroid_values[k]),
min(max(stage_centroid_values[k0],
elevation_centroid_values[k0]),
min(max(stage_centroid_values[k1],
elevation_centroid_values[k1]),
max(stage_centroid_values[k2],
elevation_centroid_values[k2]))));
if(stagemin < bedmax){
// This will cause first order extrapolation
k2 = k;
k0 = k;
k1 = k;
}
// Get the auxiliary triangle's vertex coordinates
// (really the centroids of neighbouring triangles)
coord_index = 2*k0;
x0 = centroid_coordinates[coord_index];
y0 = centroid_coordinates[coord_index+1];
coord_index = 2*k1;
x1 = centroid_coordinates[coord_index];
y1 = centroid_coordinates[coord_index+1];
coord_index = 2*k2;
x2 = centroid_coordinates[coord_index];
y2 = centroid_coordinates[coord_index+1];
// Store x- and y- differentials for the vertices
// of the auxiliary triangle
dx1 = x1 - x0;
dx2 = x2 - x0;
dy1 = y1 - y0;
dy2 = y2 - y0;
// Calculate 2*area of the auxiliary triangle
// The triangle is guaranteed to be counter-clockwise
area2 = dy2*dx1 - dy1*dx2;
// Treat triangles with zero or 1 wet neighbours.
if ((area2 <= 0)) //|(count_wet_neighbours[k]==0))
{
//printf("Error negative triangle area \n");
//report_python_error(AT, "Negative triangle area");
//return -1;
stage_edge_values[k3] = stage_centroid_values[k];
stage_edge_values[k3+1] = stage_centroid_values[k];
stage_edge_values[k3+2] = stage_centroid_values[k];
// First order momentum / velocity extrapolation
xmom_edge_values[k3] = xmom_centroid_values[k];
xmom_edge_values[k3+1] = xmom_centroid_values[k];
xmom_edge_values[k3+2] = xmom_centroid_values[k];
ymom_edge_values[k3] = ymom_centroid_values[k];
ymom_edge_values[k3+1] = ymom_centroid_values[k];
ymom_edge_values[k3+2] = ymom_centroid_values[k];
continue;
}
// Calculate heights of neighbouring cells
hc = stage_centroid_values[k] - elevation_centroid_values[k];
h0 = stage_centroid_values[k0] - elevation_centroid_values[k0];
h1 = stage_centroid_values[k1] - elevation_centroid_values[k1];
h2 = stage_centroid_values[k2] - elevation_centroid_values[k2];
hmin = min(min(h0, min(h1, h2)), hc);
hfactor = 0.0;
//if (hmin > 0.001)
if (hmin > 0.)
//if (hc>0.0)
{
hfactor = 1.0 ;//hmin/(hmin + 0.004);
//hfactor=hmin/(hmin + 0.004);
}
//-----------------------------------
// stage
//-----------------------------------
// Calculate the difference between vertex 0 of the auxiliary
// triangle and the centroid of triangle k
dq0 = stage_centroid_values[k0] - stage_centroid_values[k];
// Calculate differentials between the vertices
// of the auxiliary triangle (centroids of neighbouring triangles)
dq1 = stage_centroid_values[k1] - stage_centroid_values[k0];
dq2 = stage_centroid_values[k2] - stage_centroid_values[k0];
inv_area2 = 1.0/area2;
// Calculate the gradient of stage on the auxiliary triangle
a = dy2*dq1 - dy1*dq2;
a *= inv_area2;
b = dx1*dq2 - dx2*dq1;
b *= inv_area2;
// Calculate provisional jumps in stage from the centroid
// of triangle k to its vertices, to be limited
dqv[0] = a*dxv0 + b*dyv0;
dqv[1] = a*dxv1 + b*dyv1;
dqv[2] = a*dxv2 + b*dyv2;
// Now we want to find min and max of the centroid and the
// vertices of the auxiliary triangle and compute jumps
// from the centroid to the min and max
find_qmin_and_qmax(dq0, dq1, dq2, &qmin, &qmax);
beta_tmp = beta_w_dry + (beta_w - beta_w_dry) * hfactor;
// Limit the gradient
limit_gradient(dqv, qmin, qmax, beta_tmp);
stage_edge_values[k3+0] = stage_centroid_values[k] + dqv[0];
stage_edge_values[k3+1] = stage_centroid_values[k] + dqv[1];
stage_edge_values[k3+2] = stage_centroid_values[k] + dqv[2];
//-----------------------------------
// xmomentum
//-----------------------------------
// Calculate the difference between vertex 0 of the auxiliary
// triangle and the centroid of triangle k
dq0 = xmom_centroid_values[k0] - xmom_centroid_values[k];
// Calculate differentials between the vertices
// of the auxiliary triangle
dq1 = xmom_centroid_values[k1] - xmom_centroid_values[k0];
dq2 = xmom_centroid_values[k2] - xmom_centroid_values[k0];
// Calculate the gradient of xmom on the auxiliary triangle
a = dy2*dq1 - dy1*dq2;
a *= inv_area2;
b = dx1*dq2 - dx2*dq1;
b *= inv_area2;
// Calculate provisional jumps in stage from the centroid
// of triangle k to its vertices, to be limited
dqv[0] = a*dxv0+b*dyv0;
dqv[1] = a*dxv1+b*dyv1;
dqv[2] = a*dxv2+b*dyv2;
// Now we want to find min and max of the centroid and the
// vertices of the auxiliary triangle and compute jumps
// from the centroid to the min and max
//
find_qmin_and_qmax(dq0, dq1, dq2, &qmin, &qmax);
beta_tmp = beta_uh_dry + (beta_uh - beta_uh_dry) * hfactor;
limit_gradient(dqv, qmin, qmax, beta_tmp);
for (i=0; i < 3; i++)
{
xmom_edge_values[k3+i] = xmom_centroid_values[k] + dqv[i];
}
//-----------------------------------
// ymomentum
//-----------------------------------
// Calculate the difference between vertex 0 of the auxiliary
// triangle and the centroid of triangle k
dq0 = ymom_centroid_values[k0] - ymom_centroid_values[k];
// Calculate differentials between the vertices
// of the auxiliary triangle
dq1 = ymom_centroid_values[k1] - ymom_centroid_values[k0];
dq2 = ymom_centroid_values[k2] - ymom_centroid_values[k0];
// Calculate the gradient of xmom on the auxiliary triangle
a = dy2*dq1 - dy1*dq2;
a *= inv_area2;
b = dx1*dq2 - dx2*dq1;
b *= inv_area2;
// Calculate provisional jumps in stage from the centroid
// of triangle k to its vertices, to be limited
dqv[0] = a*dxv0 + b*dyv0;
dqv[1] = a*dxv1 + b*dyv1;
dqv[2] = a*dxv2 + b*dyv2;
// Now we want to find min and max of the centroid and the
// vertices of the auxiliary triangle and compute jumps
// from the centroid to the min and max
//
find_qmin_and_qmax(dq0, dq1, dq2, &qmin, &qmax);
beta_tmp = beta_vh_dry + (beta_vh - beta_vh_dry) * hfactor;
limit_gradient(dqv, qmin, qmax, beta_tmp);
for (i=0;i<3;i++)
{
ymom_edge_values[k3 + i] = ymom_centroid_values[k] + dqv[i];
}
} // End number_of_boundaries <=1
else
{
//==============================================
// Number of boundaries == 2
//==============================================
// One internal neighbour and gradient
// is in direction of the neighbour's centroid
// Find the only internal neighbour (k1?)
for (k2 = k3; k2 < k3 + 3; k2++)
{
// Find internal neighbour of triangle k
// k2 indexes the edges of triangle k
if (surrogate_neighbours[k2] != k)
{
break;
}
}
if ((k2 == k3 + 3))
{
// If we didn't find an internal neighbour
//report_python_error(AT, "Internal neighbour not found");
//return -1;
return;
}
k1 = surrogate_neighbours[k2];
// The coordinates of the triangle are already (x,y).
// Get centroid of the neighbour (x1,y1)
coord_index = 2*k1;
x1 = centroid_coordinates[coord_index];
y1 = centroid_coordinates[coord_index + 1];
// Compute x- and y- distances between the centroid of
// triangle k and that of its neighbour
dx1 = x1 - x;
dy1 = y1 - y;
// Set area2 as the square of the distance
area2 = dx1*dx1 + dy1*dy1;
// Set dx2=(x1-x0)/((x1-x0)^2+(y1-y0)^2)
// and dy2=(y1-y0)/((x1-x0)^2+(y1-y0)^2) which
// respectively correspond to the x- and y- gradients
// of the conserved quantities
dx2 = 1.0/area2;
dy2 = dx2*dy1;
dx2 *= dx1;
//-----------------------------------
// stage
//-----------------------------------
// Compute differentials
dq1 = stage_centroid_values[k1] - stage_centroid_values[k];
// Calculate the gradient between the centroid of triangle k
// and that of its neighbour
a = dq1*dx2;
b = dq1*dy2;
// Calculate provisional edge jumps, to be limited
dqv[0] = a*dxv0 + b*dyv0;
dqv[1] = a*dxv1 + b*dyv1;
dqv[2] = a*dxv2 + b*dyv2;
// Now limit the jumps
if (dq1>=0.0)
{
qmin=0.0;
qmax=dq1;
}
else
{
qmin = dq1;
qmax = 0.0;
}
// Limit the gradient
limit_gradient(dqv, qmin, qmax, beta_w);
//for (i=0; i < 3; i++)
//{
stage_edge_values[k3] = stage_centroid_values[k] + dqv[0];
stage_edge_values[k3 + 1] = stage_centroid_values[k] + dqv[1];
stage_edge_values[k3 + 2] = stage_centroid_values[k] + dqv[2];
//}
//-----------------------------------
// xmomentum
//-----------------------------------
// Compute differentials
dq1 = xmom_centroid_values[k1] - xmom_centroid_values[k];
// Calculate the gradient between the centroid of triangle k
// and that of its neighbour
a = dq1*dx2;
b = dq1*dy2;
// Calculate provisional edge jumps, to be limited
dqv[0] = a*dxv0+b*dyv0;
dqv[1] = a*dxv1+b*dyv1;
dqv[2] = a*dxv2+b*dyv2;
// Now limit the jumps
if (dq1 >= 0.0)
{
qmin = 0.0;
qmax = dq1;
}
else
{
qmin = dq1;
qmax = 0.0;
}
// Limit the gradient
limit_gradient(dqv, qmin, qmax, beta_w);
//for (i=0;i<3;i++)
//xmom_edge_values[k3] = xmom_centroid_values[k] + dqv[0];
//xmom_edge_values[k3 + 1] = xmom_centroid_values[k] + dqv[1];
//xmom_edge_values[k3 + 2] = xmom_centroid_values[k] + dqv[2];
for (i = 0; i < 3;i++)
{
xmom_edge_values[k3 + i] = xmom_centroid_values[k] + dqv[i];
}
//-----------------------------------
// ymomentum
//-----------------------------------
// Compute differentials
dq1 = ymom_centroid_values[k1] - ymom_centroid_values[k];
// Calculate the gradient between the centroid of triangle k
// and that of its neighbours a = dq1*dx2;
b = dq1*dy2;
// Calculate provisional edge jumps, to be limited
dqv[0] = a*dxv0 + b*dyv0;
dqv[1] = a*dxv1 + b*dyv1;
dqv[2] = a*dxv2 + b*dyv2;
// Now limit the jumps
if (dq1>=0.0)
{
qmin = 0.0;
qmax = dq1;
}
else
{
qmin = dq1;
qmax = 0.0;
}
// Limit the gradient
limit_gradient(dqv, qmin, qmax, beta_w);
for (i=0;i<3;i++)
{
ymom_edge_values[k3 + i] = ymom_centroid_values[k] + dqv[i];
}
} // else [number_of_boundaries==2]
} while(k < 0);
//} // for k=0 to number_of_elements-1
// Compute vertex values of quantities
//for (k=0; k<number_of_elements; k++){
k3=3*k;
// Compute stage vertex values
stage_vertex_values[k3] = stage_edge_values[k3+1] + stage_edge_values[k3+2] -stage_edge_values[k3] ;
stage_vertex_values[k3+1] = stage_edge_values[k3] + stage_edge_values[k3+2]-stage_edge_values[k3+1];
stage_vertex_values[k3+2] = stage_edge_values[k3] + stage_edge_values[k3+1]-stage_edge_values[k3+2];
// Compute xmom vertex values
xmom_vertex_values[k3] = xmom_edge_values[k3+1] + xmom_edge_values[k3+2] -xmom_edge_values[k3] ;
xmom_vertex_values[k3+1] = xmom_edge_values[k3] + xmom_edge_values[k3+2]-xmom_edge_values[k3+1];
xmom_vertex_values[k3+2] = xmom_edge_values[k3] + xmom_edge_values[k3+1]-xmom_edge_values[k3+2];
// Compute ymom vertex values
ymom_vertex_values[k3] = ymom_edge_values[k3+1] + ymom_edge_values[k3+2] -ymom_edge_values[k3] ;
ymom_vertex_values[k3+1] = ymom_edge_values[k3] + ymom_edge_values[k3+2]-ymom_edge_values[k3+1];
ymom_vertex_values[k3+2] = ymom_edge_values[k3] + ymom_edge_values[k3+1]-ymom_edge_values[k3+2];
// If needed, convert from velocity to momenta
if(extrapolate_velocity_second_order==1){
//Convert velocity back to momenta at centroids
xmom_centroid_values[k] = xmom_centroid_store[k];
ymom_centroid_values[k] = ymom_centroid_store[k];
// Re-compute momenta at edges
for (i=0; i<3; i++){
de[i] = max(stage_edge_values[k3+i]-elevation_edge_values[k3+i],0.0);
xmom_edge_values[k3+i]=xmom_edge_values[k3+i]*de[i];
ymom_edge_values[k3+i]=ymom_edge_values[k3+i]*de[i];
}
// Re-compute momenta at vertices
for (i=0; i<3; i++){
de[i] = max(stage_vertex_values[k3+i]-elevation_vertex_values[k3+i],0.0);
xmom_vertex_values[k3+i]=xmom_vertex_values[k3+i]*de[i];
ymom_vertex_values[k3+i]=ymom_vertex_values[k3+i]*de[i];
}
}
//} // for k=0 to number_of_elements-1
}
|
1,362
|
#include "includes.h"
__global__ void SidedDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
|
1,363
|
#include "includes.h"
__global__ void set_zero_kernel(float *src, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) src[i] = 0;
}
|
1,364
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//#include "bmp.h"
extern "C" void write_bmp(unsigned char* data, int width, int height);
extern "C" unsigned char* read_bmp(char* filename);
//#include "host_blur.h"
extern "C" void host_blur(unsigned char* inputImage, unsigned char* outputImage, int size);
void print_properties(){
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
printf("Device count: %d\n", deviceCount);
cudaDeviceProp p;
cudaSetDevice(0);
cudaGetDeviceProperties (&p, 0);
printf("Compute capability: %d.%d\n", p.major, p.minor);
printf("Name: %s\n" , p.name);
printf("\n\n");
}
__global__ void device_blur(unsigned char* input, unsigned char* output) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i != 0 && j != 0 && i != 512 && j != 512) {
output[i*512 + j] = 0;
for(int k = -1; k < 2; k++){
for(int l = -1; l < 2; l++){
output[i * 512 + j] += (input[(i + k)*512 + (j + l)] / 9.0);
}
}
}
}
int main(int argc,char **argv) {
//Prints some device properties, also to make sure the GPU works etc.
print_properties();
unsigned char* A = read_bmp("peppers.bmp");
unsigned char* B = (unsigned char*)malloc(sizeof(unsigned char) * 512 * 512);
//Allocate buffers for the input image and the output image on the device
unsigned char* A_device;
cudaMalloc((void**)&A_device, sizeof(unsigned char)*512*512);
unsigned char* B_device;
cudaMalloc((void**)&B_device, sizeof(unsigned char)*512*512);
//Transfer the input image from the host to the device
cudaMemcpy(A_device, A, sizeof(unsigned char)*512*512, cudaMemcpyHostToDevice);
cudaMemcpy(B_device, B, sizeof(unsigned char)*512*512, cudaMemcpyHostToDevice);
//Launch the kernel which does the bluring
//The grid consists of 4096 blocks, 64 threads per block
dim3 grid(64, 64);
dim3 block(8, 8);
device_blur<<<grid, block>>>(A_device, B_device);
//Transfer the result back to the host.
cudaMemcpy(B, B_device, sizeof(unsigned char)*512*512, cudaMemcpyDeviceToHost);
write_bmp(B, 512, 512);
free(A);
free(B);
return 0;
}
|
1,365
|
#include "includes.h"
__global__ void addKernel(int *c, int *a,int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
//printf("%d", c[i]);
}
|
1,366
|
__global__ void spfaKernelForSSSP(int *V, int *E, int *W, int *n, bool *visit,int *dist){
int old=0, u, v;
__shared__ int QuickExit;
const int threadId = threadIdx.z*(blockDim.x * blockDim.y)+ threadIdx.y* blockDim.x+ threadIdx.x;
const int blockSize =blockDim.x * blockDim.y * blockDim.z;
while(1)/* this while can solve a sssp */
{
u = threadId;
QuickExit = 0;
while(u < (*n))
{
for(int adj = V[u]; adj < V[u+1]; adj++)
{
v = E[adj];
old=atomicMin( &dist[v] , dist[u] + W[adj]);
if(old>dist[v])
{
QuickExit=1;
visit[v]=1;
}
}
u+=blockSize;
}
__syncthreads();
if(QuickExit==0){
break;
}
}
}
|
1,367
|
#include <stdio.h>
#include <stdlib.h>
#define SIZE 1024
/* must use .cu otherwise .c and .cpp will send to host compiler and global would have issues */
__global__ void VectorAdd(int *a, int *b, int *c, int n) {
int i = threadIdx.x;
// no loop for (i = 0; i < n; ++i)
if (i < n)
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[])
{
int noOfRun;
if (argc > 1)
{
noOfRun = atoi(argv[1]);
printf("\nargv[1] in intger=%d\n\n", noOfRun);
}
// use SIZE here instead of noofRun
int *a, *b, *c;
a = (int *)malloc(SIZE * sizeof(int));
b = (int *)malloc(SIZE * sizeof(int));
c = (int *)malloc(SIZE * sizeof(int));
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, SIZE * sizeof(int));
cudaMalloc(&d_b, SIZE * sizeof(int));
cudaMalloc(&d_c, SIZE * sizeof(int));
for (int i = 0; i < SIZE; ++i)
{
a[i] = i;
b[i] = i + 1;
c[i] = 0;
}
cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, SIZE * sizeof(int), cudaMemcpyHostToDevice);
VectorAdd<<<1, SIZE>>>(d_a, d_b, d_c, SIZE);
cudaMemcpy(a, d_a, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; ++i)
printf("host = %d: a[%d] + b[%d] = %d + %d = c[%d] = %d\n", i, i, i, a[i], b[i], i, c[i]);
/* you cannot directly address the gpu memory !!!
for (int i = 0; i < 10; ++i)
printf("device = %d: d_a[%d] + d_b[%d] = %d + %d = d_c[%d] = %d\n", i, i, i, d_a[i], d_b[i], i, d_c[i]); */
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// cudaProfilerStop(); and _syncthreads(); and device level close ????
return 0;
}
/*
#include <stdio.h>
#include <stdlib.h>
#define SIZE 1024
void VectorAdd(int *a, int *b, int *c, int n) {
int i;
for (i = 0; i < n; ++i)
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[])
{
int noOfRun;
if (argc > 1)
{
noOfRun = atoi(argv[1]);
printf("\nargv[1] in intger=%d\n\n", noOfRun);
}
// use SIZE here instead of noofRun
int *a, *b, *c;
a = (int *)malloc(SIZE * sizeof(int));
b = (int *)malloc(SIZE * sizeof(int));
c = (int *)malloc(SIZE * sizeof(int));
for (int i = 0; i < SIZE; ++i)
{
a[i] = i;
b[i] = i + 1;
c[i] = 0;
}
VectorAdd(a, b, c, SIZE);
for (int i = 0; i < 10; ++i)
printf("%d: a[%d] + b[%d] = %d + %d = c[%d] = %d\n", i, i, i, a[i], b[i], i, c[i]);
free(a);
free(b);
free(c);
return 0;
}
*/
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
*/
|
1,368
|
#include "includes.h"
__global__ void negative_prob_multiply_dense_matrix_vector_kernel(float* matrix, float* in_vector, float* out_vector, unsigned int outerdim, unsigned int innerdim) {
// We parallelize at the level of matrix rows,
unsigned int row = blockIdx.x*blockDim.x+threadIdx.x;
float prob = 1.0;
if (row < outerdim) {
// each thread computes one element of the output vector
for (int i = 0; i < innerdim; i++) {
prob *= 1.0 - (matrix[row * innerdim + i] * in_vector[i]);
}
out_vector[row] = prob;
}
}
|
1,369
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstring>
#include <iostream>
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char* file, const int line)
{
if (cudaSuccess != err)
{
std::cerr << file << "(" << line << ") : CUDA Runtime API error " << err << ": " << cudaGetErrorString(err) << ".\n";
exit(EXIT_FAILURE);
}
}
__global__ void kernel(cudaTextureObject_t tex, int width, int height, unsigned char* outputData)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
outputData[y * width * 3 + 3 * x] = tex2D<unsigned char>(tex, 3 * x, y);
outputData[y * width * 3 + 3 * x + 1] = tex2D<unsigned char>(tex, 3 * x + 1, y);
outputData[y * width * 3 + 3 * x + 2] = tex2D<unsigned char>(tex, 3 * x + 2, y);
}
extern "C" void process(unsigned char* inBuffer, int width, int height, int channels, unsigned char** outBuffer, int &stride)
{
size_t inputStride = sizeof(unsigned char) * width * channels;
unsigned char* devImageIn = nullptr;
size_t inPitch;
cudaError_t err = cudaMallocPitch(&devImageIn, &inPitch, inputStride, height);
err = cudaMemcpy2D(devImageIn, inPitch, inBuffer, inputStride, inputStride, height, cudaMemcpyHostToDevice);
checkCudaErrors(err);
unsigned char* devImageOut = nullptr;
size_t outPitch;
err = cudaMallocPitch(&devImageOut, &outPitch, inputStride, height);
checkCudaErrors(err);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypePitch2D;
texRes.res.pitch2D.devPtr = devImageIn;
texRes.res.pitch2D.desc = desc;
texRes.res.pitch2D.width = static_cast<size_t>(width) * channels;
texRes.res.pitch2D.height = height;
texRes.res.pitch2D.pitchInBytes = inPitch;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModePoint;
texDescr.addressMode[0] = cudaAddressModeWrap;
texDescr.addressMode[1] = cudaAddressModeWrap;
texDescr.readMode = cudaReadModeElementType;
cudaTextureObject_t texture;
err = cudaCreateTextureObject(&texture, &texRes, &texDescr, NULL);
checkCudaErrors(err);
dim3 blockSize(16, 16);
dim3 gridSize(width + blockSize.x / blockSize.x, height + blockSize.y/ blockSize.y);
kernel<<<gridSize, blockSize>>>(texture, width, height, devImageOut);
stride = static_cast<int>(outPitch);
*outBuffer = new unsigned char[stride * height];
for (int i = 0; i < stride * height; i += 3)
{
(*outBuffer)[i] = 255;
(*outBuffer)[i+1] = 0;
(*outBuffer)[i+2] = 0;
}
err = cudaMemcpy2D(*outBuffer, inPitch, devImageOut, inputStride, inputStride, height, cudaMemcpyDeviceToHost);
checkCudaErrors(err);
cudaDestroyTextureObject(texture);
cudaFree(devImageIn);
cudaFree(&inPitch);
cudaFree(&outPitch);
}
|
1,370
|
#include <iostream>
#include <cstdio>
#include <iomanip>
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
using namespace std;
__global__ void kernel(double *da, double *db, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
while (idx < n) {
da[idx] -= db[idx];
idx += offset;
}
}
__host__ double* scanDoubleArray(int size) {
double *arr = new double[size];
for (int i = 0; i < size; i++) {
cin >> arr[i];
}
return arr;
}
__host__ double* getDeviceDoubleArray(double *arr, int size) {
double *d_arr;
CSC(cudaMalloc(&d_arr, sizeof(double) * size));
CSC(cudaMemcpy(d_arr, arr, sizeof(double) * size, cudaMemcpyHostToDevice));
return d_arr;
}
int main() {
ios_base::sync_with_stdio(false);
int n;
cin >> n;
double *a = scanDoubleArray(n);
double *b = scanDoubleArray(n);
double *da = getDeviceDoubleArray(a, n);
double *db = getDeviceDoubleArray(b, n);
delete[] b;
kernel<<<256, 256>>>(da, db, n);
CSC(cudaGetLastError());
CSC(cudaMemcpy(a, da, sizeof(double) * n, cudaMemcpyDeviceToHost));
CSC(cudaFree(da));
CSC(cudaFree(db));
cout.precision(10);
cout.setf(ios::scientific);
for (int i = 0; i < n; i++) {
cout << a[i] << ' ';
}
cout << endl;
delete[] a;
return 0;
}
|
1,371
|
//====================================================
// GPIO Control
// main.cu : Main Routine
//----------------------------------------------------
// Rev.01 2019.06.08 M.Munetomo
//----------------------------------------------------
// Copyright (C) 2019 Munetomo Maruyama
//====================================================
#include <cinttypes>
#include <fcntl.h>
#include <poll.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
//----------------------------------
// Main Routine
//----------------------------------
int main(void)
{
int fd, fd_poll;
int byte;
// Teach how to quit
printf("Please Hit Enter to Quit.\n");
// Export GPIO50 (11pin)
fd = open("/sys/class/gpio/export", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
byte = write(fd, "50", 3);
close(fd);
// Set Direction of GPIO50 (11pin) as Input
fd = open("/sys/class/gpio/gpio50/direction", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
byte = write(fd, "in", 3);
close(fd);
// Set Edge Detection of GPIO50 (11pin) as Both
fd = open("/sys/class/gpio/gpio50/edge", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
byte = write(fd, "both", 5);
close(fd);
// Left Open Value of GPIO50 (11pin)
fd_poll = open("/sys/class/gpio/gpio50/value", O_RDONLY | O_NONBLOCK);
if (fd_poll < 0) exit(EXIT_FAILURE);
// Export GPIO79 (12pin)
fd = open("/sys/class/gpio/export", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
byte = write(fd, "79", 3);
close(fd);
// Set Direction of GPIO79 (12pin) as Output
fd = open("/sys/class/gpio/gpio79/direction", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
byte = write(fd, "out", 4);
close(fd);
// Prepare Interrupt Polling
struct pollfd fdset[2];
int nfds = 2;
memset((void*)fdset, 0, sizeof(fdset));
fdset[0].fd = STDIN_FILENO;
fdset[0].events = POLLIN;
fdset[1].fd = fd_poll;
fdset[1].events = POLLPRI;
// Forever Loop
while(1)
{
char ch;
// Wait for Interrupt
int rc = poll(fdset, nfds, 3000); // polling with timeout 3sec
if (rc < 0) exit(EXIT_FAILURE);
// Timeout?
if (rc == 0)
{
printf("Timeout\n");
continue;
}
// Detected Stdin?
if (fdset[0].revents & POLLIN)
{
byte = read(fdset[0].fd, &ch, 1);
printf("poll() stdin read %c\n", ch);
break;
}
// Detected GPIO50 Edge?
if (fdset[1].revents & POLLPRI)
{
lseek(fdset[1].fd, 0, SEEK_SET);
byte = read(fdset[1].fd, &ch, 1);
printf("poll() GPIO50 Interrupt Detected, level=%c\n", ch);
//
// Toggle GPIO79 (12pin) 10times
for (int i = 0; i < 10; i++)
{
// Open
fd = open("/sys/class/gpio/gpio79/value", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
// Set High
byte = write(fd, "1", 2);
// Wait
usleep(1000);
// Set Low
byte = write(fd, "0", 2);
// Wait
usleep(1000);
// Close
close(fd);
}
}
}
// Close File of Value of GPIO50 (11pin)
close(fd_poll);
// UnExport GPIO50 (11pin)
fd = open("/sys/class/gpio/unexport", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
byte = write(fd, "50", 3);
close(fd);
// UnExport GPIO79 (12pin)
fd = open("/sys/class/gpio/unexport", O_WRONLY);
if (fd < 0) exit(EXIT_FAILURE);
byte = write(fd, "79", 3);
close(fd);
// Return from this Program
return(EXIT_SUCCESS);
}
//====================================================
// End of Program
//====================================================
|
1,372
|
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
int main() {
thrust::host_vector<double> host(10, 0);
host[9] = 35;
printf("Host vector: ");
for (thrust::host_vector<double>::iterator i = host.begin(); i != host.end(); i++) {
std::cout << *i << " "; // este acesso é rápido -- CPU
}
printf("\n");
thrust::device_vector<double> dev(host);
host[2] = 12;
printf("Device vector: ");
for (thrust::device_vector<double>::iterator i = dev.begin(); i != dev.end(); i++) {
std::cout << *i << " "; // este acesso é lento! -- GPU
}
printf("\n");
}
|
1,373
|
#include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.width + col);
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_naive(const Matrix A, const Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; e++)
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
C.elements[row*C.width+col] = Cvalue;
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_naive<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
}
|
1,374
|
#include "includes.h"
__global__ void kernelA(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (x[i] > y[i]) {
for (int j = 0; j < n / CONST; j++)
y[i] = x[j] + y[j];
} else {
for (int j = 0; j < n / CONST; j++)
y[i] = x[j] / y[j];
}
}
}
|
1,375
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#define THREADS 512
#ifdef __cplusplus
extern "C"
{
#endif
__global__ void bitonicSort(float *d_inputArray, int blockSize, int strideLength, int number_of_elements)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index >= number_of_elements)
return;
int blockNumber = index/blockSize;
int blockStartAddress = blockNumber*blockSize;
//computation is performed on only selected indices
//as according to bitonic sort algorithm
if( ((index-blockStartAddress)/strideLength) % 2 != 0 )
return;
//even numbered block will be sorted in increasing order
//while odd numbered block is sorted in decreasing order
if(blockNumber % 2 == 0)
{
if(d_inputArray[index] > d_inputArray[index + strideLength])
{
float temp = d_inputArray[index];
d_inputArray[index] = d_inputArray[index + strideLength];
d_inputArray[index + strideLength] = temp;
}
}
else
{
if(d_inputArray[index] < d_inputArray[index + strideLength])
{
float temp = d_inputArray[index];
d_inputArray[index] = d_inputArray[index + strideLength];
d_inputArray[index + strideLength] = temp;
}
}
}
int cuda_sort(int number_of_elements, float *a)
{
//allocating memory on GPU device and copying data from host to GPU device
float *d_inputArray;
if(!cudaMalloc(&d_inputArray, sizeof(float) * number_of_elements) == cudaSuccess)
printf("error in allocating d_inputArray\n");
if(!cudaMemcpy(d_inputArray, a, sizeof(float) * number_of_elements, cudaMemcpyHostToDevice) == cudaSuccess)
printf("error in copying d_inputArray\n");
//iterating through input array block by block
for (int blockSize = 2; blockSize <= number_of_elements; blockSize = blockSize*2)
{
//iterating through each block with differnt strideLength
for(int strideLength = blockSize/2; strideLength >= 1; strideLength = strideLength/2)
{
bitonicSort<<<(number_of_elements/1024 + 1),1024>>>(d_inputArray, blockSize, strideLength, number_of_elements);
}
}
//copying data back from GPU device to host memory
if(!cudaMemcpy(a, d_inputArray, sizeof(float) * number_of_elements, cudaMemcpyDeviceToHost) == cudaSuccess)
printf("error in copying d_inputArray from device to host\n");
return 0;
}
#ifdef __cplusplus
}
#endif
|
1,376
|
#include "includes.h"
__global__ void initMemory(size_t position, size_t* array)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
array[position + idx] = idx;
}
|
1,377
|
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <cuda_runtime.h>
#include "kernel.cuh"
#define N 100000
#define BLOCK_SIZE 256
void compareBuffers(const float *a, const float *b, const uint32_t arr_size)
{
uint32_t total_failed = 0;
for(uint32_t i = 0; i < arr_size; ++i)
{
if(a[i] != b[i]) {
printf("Failed index %d\n", i);
total_failed++;
}
}
printf("failed (%d/%d)\n", total_failed, arr_size);
}
void referenceAdd(const float *a, const float *b, float *c, const uint32_t arr_size)
{
for(uint32_t i = 0; i < arr_size; ++i)
{
c[i] = a[i] + b[i];
}
}
void randomizeFloatArray(float *arr, const uint32_t arr_size, const uint32_t seed)
{
srand(seed);
for(uint32_t i = 0; i < arr_size; ++i)
{
arr[i] = rand();
}
}
void initSimpleFloatArray(float *arr, const uint32_t arr_size, const uint32_t offset)
{
for(uint32_t i = 0; i < arr_size; ++i)
{
arr[i] = i + offset;
}
}
void printFloatArray(float *arr, const uint32_t arr_size)
{
for(uint32_t i = 0; i < arr_size; ++i)
{
printf("%f, ", arr[i]);
}
printf("\n");
}
int main()
{
float *a, *b, *c, *ref_c;
float *c_in_a, *c_in_b, *c_out_c;
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
c = (float*)malloc(sizeof(float) * N);
ref_c = (float*)malloc(sizeof(float) * N);
initSimpleFloatArray(a, N, 0);
initSimpleFloatArray(b, N, 43);
//printFloatArray(a, N);
//printFloatArray(b, N);
cudaMalloc((void **) &c_in_a, N * sizeof(float));
cudaMalloc((void **) &c_in_b, N * sizeof(float));
cudaMalloc((void **) &c_out_c, N * sizeof(float));
cudaMemcpy(c_in_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(c_in_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
//const uint32_t el_per_thread = (uint32_t)ceil((double)N / (double)BLOCK_SIZE);
const uint32_t el_per_thread = 1;
const uint32_t block_count = (uint32_t)ceil(((double)N / el_per_thread) / (double)BLOCK_SIZE);
printf("el per thread : %d, block_count : %d\n", el_per_thread, block_count);
kadd<<<block_count, BLOCK_SIZE>>>(c_in_a, c_in_b, c_out_c, el_per_thread);
cudaMemcpy(c, c_out_c, N * sizeof(float), cudaMemcpyDeviceToHost);
//printFloatArray(c, N);
cudaFree(c_in_a);
cudaFree(c_in_b);
cudaFree(c_out_c);
referenceAdd(a, b, ref_c, N);
compareBuffers(ref_c, c, N);
free(a);
free(b);
free(c);
free(ref_c);
}
|
1,378
|
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_complexeConjugateKernel (int n, int sizeInput, float *output, float *input, float *inputKernel)
{
//n size
//int id = 2*(threadIdx.x + blockIdx.x * blockDim.x);
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = 2*(idy * gridDim.x * blockDim.x + idx);
int id2=id%(sizeInput*2);
float real;
float imag;
float tmp;
if (id < n*2)
{
real=input[id2]/sqrt((float)sizeInput);
imag=input[id2+1]/sqrt((float)sizeInput);
//id : real
//id+1 : imaginary
tmp=imag*inputKernel[id+1]+real*inputKernel[id];
output[id+1]=imag*inputKernel[id]-real*inputKernel[id+1];
output[id]=tmp;
}
}
|
1,379
|
#include "includes.h"
__global__ void sumArraysOnGPUshared(float *A, float *B, float *C, const int N)
{
__shared__ float smem[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
smem[threadIdx.x] += i;
C[i] = A[i] + B[i] + smem[threadIdx.x];
}
}
|
1,380
|
#include <stdio.h>
__global__ void hello_from_gpu()
{
printf("This is hello from GPU\n");
}
int main()
{
printf("This is hello from CPU\n");
hello_from_gpu <<<1,10>>> ();
cudaDeviceReset();
return 0;
}
|
1,381
|
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
|
1,382
|
#include <iostream>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
void testTrush(){
// H has storage for 4 integers
thrust::host_vector<float*> H(2);
// initialize individual elements
// print contents of H
for(int i = 0; i < H.size(); i++){
H[i] = (float*)malloc(2*sizeof(float));
H[i][0] = 0;H[i][1] = 1;
std::cout << "H[" << i << "] = " << H[i][0] <<"--"<<H[i][1]<< std::endl;
}
// float test[3];
// *test = {.0f, .0f, .0f};
// for(int i=0;i<3;i++)
// cout<<test[i]<<endl;
//
// // resize H
// H.resize(2);
//
// std::cout << "H now has size " << H.size() << std::endl;
//
// // Copy host_vector H to device_vector D
// thrust::device_vector<int> D = H;
//
// // elements of D can be modified
// D[0] = 99;
// D[1] = 88;
//
// // print contents of D
// for(int i = 0; i < D.size(); i++)
// std::cout << "D[" << i << "] = " << D[i] << std::endl;
}
int main()
{
testTrush();
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numOfBlocks = (N+blockSize-1)/blockSize;
// Run kernel on 1M elements on the GPU
add<<<numOfBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
1,383
|
#include "includes.h"
__global__ void cu_depadding(const float* src, float* dst, const int rows1, const int cols1, const int cols2, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int pad = (cols1 - cols2) / 2;
int c2 = tid % cols2;
int r2 = tid / cols2;
int r1 = r2 + pad;
int c1 = c2 + pad;
dst[tid] = src[r1 * cols1 + c1];
tid += stride;
}
}
|
1,384
|
#include "includes.h"
__global__ void matrixAdd_B_Kernel(float* A, float* B, float* C, size_t pitch, int width){
//compute indexes
int row = blockIdx.x * blockDim.x + threadIdx.x;
int rowWidthWithPad = pitch/sizeof(float);
if(row < width){
for (int col = 0; col < width; ++col) {
if(col < width)
C[row * rowWidthWithPad + col] = A[row * rowWidthWithPad + col] + B[row * rowWidthWithPad + col];
}
}
}
|
1,385
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
float rand(float a,float b)
{
return(b - a) * ((float)rand() / RAND_MAX) + a;
}
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
#define BLOCK_SIZE 1
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void**)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void**)&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc((void**)&d_C.elements, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
float Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
Matrix Asub = GetSubMatrix(A, blockRow, m);
Matrix Bsub = GetSubMatrix(B, m, blockCol);
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
__syncthreads();
}
SetElement(Csub, row, col, Cvalue);
__syncthreads();
__syncthreads();
}
static Matrix
cons_Matrix (int height_, int width_)
{
Matrix A;
A.height = height_;
A.width = width_;
A.stride = width_;
A.elements = (float*) malloc(sizeof(*A.elements) * width_ * height_);
for (int row = 0; row < height_; row++)
for (int col = 0; col < width_; col++)
A.elements[row * width_ + col] = rand(0.0,50.0);
return A;
}
static void
print_Matrix (Matrix A, char *name)
{
for (int row = 0; row < A.height; row++){
for (int col = 0; col < A.width; col++)
printf ("%5.1f ", A.elements[row * A.stride + col]);
printf("\n");}
}
int main(int argc, char **argv)
{
time_t czas;
srand( (unsigned int)time(&czas));
const int m = atoi(argv[1]);
const int n = atoi(argv[2]);
const int p = atoi(argv[3]);
Matrix A = cons_Matrix(m, n);
Matrix B = cons_Matrix(n, p);
Matrix C = cons_Matrix(m, p);
MatMul(A, B, C);
printf("\n");
print_Matrix(A, "A");
printf("\n");
print_Matrix(B, "B");
printf("\n");
print_Matrix(C, "C");
printf("\n");
return 0;
}
|
1,386
|
/* Block size X: 32 */
__global__ void fct_ale_a1(const int maxLevels, const double * __restrict__ fct_low_order, const double * __restrict__ ttf, const int * __restrict__ nLevels, double * __restrict__ fct_ttf_max, double * __restrict__ fct_ttf_min)
{
const int node = (blockIdx.x * maxLevels);
for ( int level = threadIdx.x; level < nLevels[blockIdx.x]; level += 32 )
{
double fct_low_order_item = 0;
double ttf_item = 0;
fct_low_order_item = fct_low_order[node + level];
ttf_item = ttf[node + level];
fct_ttf_max[node + level] = fmax(fct_low_order_item, ttf_item);
fct_ttf_min[node + level] = fmin(fct_low_order_item, ttf_item);
}
}
|
1,387
|
#include "includes.h"
__global__ void sumArraysOnGPUlocal(float *A, float *B, float *C, const int N)
{
float local[4];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i +4 < N) {
for (int j=0; j < 4; j++) local[j] = 2*A[i+j];
C[i] = A[i] + B[i] + local[threadIdx.x%4];
}
}
|
1,388
|
#include <iostream>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
#define ARRAY_SIZE_X 32
#define ARRAY_SIZE_Y 16
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE_X * ARRAY_SIZE_Y))
/*定义 const 指针(由于指针本身的值不能改变所以必须得初始化)*/
__global__ void what_is_my_id_2d_A(
unsigned int * const block_x,
unsigned int * const block_y,
unsigned int * const thread,
unsigned int * const calc_thread,
unsigned int * const x_thread,
unsigned int * const y_thread,
unsigned int * const grid_dimx,
unsigned int * const block_dimx,
unsigned int * const grid_dimy,
unsigned int * const block_dimy)
{
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; // 0~W-1
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y; // 0~H-1
const unsigned int thread_idx = ((gridDim.x * blockDim.x) * idy) + idx; // global
block_x[thread_idx] = blockIdx.x;
block_y[thread_idx] = blockIdx.y;
thread[thread_idx] = threadIdx.x;
calc_thread[thread_idx] = thread_idx;
x_thread[thread_idx] = idx;
y_thread[thread_idx] = idy;
grid_dimx[thread_idx] = gridDim.x;
block_dimx[thread_idx] = blockDim.x;
grid_dimy[thread_idx] = gridDim.y;
block_dimy[thread_idx] = blockDim.y;
}
/* Declare statically 6 arrays of ARRAY_SIZE each */
// the image is 32 x 16
unsigned int cpu_block_x[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_y[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_calc_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_x_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_y_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_grid_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
unsigned int cpu_block_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X];
int main(void)
{
/* Total thread count of one block = 2 * 64 = 128 */
// Total thread of all grid 128*4=512
const dim3 threads_rect(32, 4); // W=32, H=4
const dim3 blocks_rect(1, 4); // it means W=1, H=4
// another shape
const dim3 threads_square(16, 8);
const dim3 blocks_square(2, 2);
// Declare pointers fro GPU based params
unsigned int * gpu_block_x;
unsigned int * gpu_block_y;
unsigned int * gpu_thread;
unsigned int * gpu_calc_thread;
unsigned int * gpu_x_thread;
unsigned int * gpu_y_thread;
unsigned int * gpu_grid_dimx;
unsigned int * gpu_block_dimx;
unsigned int * gpu_grid_dimy;
unsigned int * gpu_block_dimy;
// Allocate four arrays on the GPU
cudaMalloc((void **)&gpu_block_x, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block_y, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_x_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_y_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_grid_dimx, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block_dimx, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_grid_dimy, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block_dimy, ARRAY_SIZE_IN_BYTES);
// Execute the kernel
for(int kernel=0; kernel < 2; kernel++)
{
switch(kernel)
{
case 0:
{
what_is_my_id_2d_A<<<blocks_rect, threads_rect>>>(
gpu_block_x, gpu_block_y, gpu_thread, gpu_calc_thread,
gpu_x_thread, gpu_y_thread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy
);
} break;
case 1:
{
what_is_my_id_2d_A<<<blocks_square, threads_square>>>(
gpu_block_x, gpu_block_y, gpu_thread, gpu_calc_thread,
gpu_x_thread, gpu_y_thread, gpu_grid_dimx, gpu_block_dimx,
gpu_grid_dimy, gpu_block_dimy
);
} break;
default: exit(1); break;
}
}
// copy back the gpu results to the cpu
cudaMemcpy(cpu_block_x, gpu_block_x, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_y, gpu_block_y, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_x_thread, gpu_x_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_y_thread, gpu_y_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_grid_dimx, gpu_grid_dimx, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_dimx, gpu_block_dimx, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_grid_dimy, gpu_grid_dimy, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_block_dimy, gpu_block_dimy, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
// free the arrays on the GPU
cudaFree(gpu_block_x);
cudaFree(gpu_block_y);
cudaFree(gpu_thread);
cudaFree(gpu_calc_thread);
cudaFree(gpu_x_thread);
cudaFree(gpu_y_thread);
cudaFree(gpu_grid_dimx);
cudaFree(gpu_block_dimx);
cudaFree(gpu_grid_dimy);
cudaFree(gpu_block_dimy);
// print
for (int y=0; y <ARRAY_SIZE_Y; y++)
{
for (int x=0; x < ARRAY_SIZE_X; x++)
{
printf("CT: %d - Block_X-%d, Block_Y-%d, Thread_X-%d, Thread_Y-%d, Thread-%d,Grid_dimx-%d, Block_dimx-%d, Grid_dimy-%d, Block_dimy-%d\n",
cpu_calc_thread[y][x], cpu_block_x[y][x], cpu_block_y[y][x], cpu_x_thread[y][x], cpu_y_thread[y][x],
cpu_thread[y][x], cpu_grid_dimx[y][x], cpu_block_dimx[y][x], cpu_grid_dimy[y][x],cpu_block_dimy[y][x]);
}
}
}
|
1,389
|
#include <cuda_runtime.h>
#include <stdio.h>
int main(){
int device = 0;
cudaDeviceProp device_property;
cudaGetDeviceProperties(&device_property, device);
printf("\nDevice %d: %s", device, device_property.name);
int driver_version;
int runtime_version;
cudaDriverGetVersion(&driver_version);
cudaRuntimeGetVersion(&runtime_version);
printf("\nCUDA driver Version / Runtime Version %.3g / %.3g",
driver_version/1000.0, runtime_version/1000.0);
// thread property
printf("\nMax number of threads per multiprocessor: %d\n",
device_property.maxThreadsPerMultiProcessor);
printf("\nMax number of threads per block: %d\n",
device_property.maxThreadsPerBlock);
printf("\nMax dimension size of a thread block (x,y,z): (%d,%d,%d)\n",
device_property.maxThreadsDim[0],
device_property.maxThreadsDim[1],
device_property.maxThreadsDim[2]);
printf("\nMax dimension size of a grid size (x,y,z): (%d,%d,%d)\n",
device_property.maxGridSize[0],
device_property.maxGridSize[1],
device_property.maxGridSize[2]);
return 0;
}
|
1,390
|
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void vecAddGPU(double *a, double *b, double *c, double n){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n){
c[id] = a[id] + b[id];
}
}
void vecAddCPU(double *a, double *b, double *c, double n){
for(int i = 0;i < n;i ++){
c[i] = a[i] + b[i];
}
}
int main(){
double n = 100000000;
double *a, *b, *c, *dev_a, *dev_b, *dev_c;
//Allocate memories
a = (double*)malloc(n * sizeof(double));
b = (double*)malloc(n * sizeof(double));
c = (double*)malloc(n * sizeof(double));
cudaMalloc(&dev_a, n * sizeof(double));
cudaMalloc(&dev_b, n * sizeof(double));
cudaMalloc(&dev_c, n * sizeof(double));
// Get random double numbers
for(int i = 0;i < n;i ++){
a[i] = double(rand());
b[i] = double(rand());
}
// Time CPU execution
auto startCPU = high_resolution_clock::now();
vecAddCPU(a, b, c, n);
auto stopCPU = high_resolution_clock::now();
//Move a and b to device
cudaMemcpy(dev_a, a, n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n * sizeof(double), cudaMemcpyHostToDevice);
// Time GPU execution
auto startGPU = high_resolution_clock::now();
vecAddGPU<<<1, 256>>>(dev_a, dev_b, dev_c, n);
auto stopGPU = high_resolution_clock::now();
//Compare execution times
cout << endl;
cout << "====Execution times===" << endl;
cout << " CPU (in microseconds): " << duration_cast<microseconds>(stopCPU - startCPU).count() << endl;
cout << " GPU (in microseconds): " << duration_cast<microseconds>(stopGPU - startGPU).count() << endl;
cout << endl;
}
|
1,391
|
#include "slicer.cuh"
#include "triangle.cuh"
#include <thrust/sort.h>
#include <thrust/count.h>
#include <thrust/functional.h>
#include <thrust/copy.h>
// Declare local helper functions
__device__ __forceinline__ void toNextLayer(layer_t* intersections_large_local,
size_t trunk_length_local, layer_t & curr_layer, bool & isInside, char* out_local);
__device__ __forceinline__ double min3(double a, double b, double c);
__device__ __forceinline__ double max3(double a, double b, double c);
__global__
void overlapSlicer(triangle* tri_small, double* zMins, size_t num_small, bool* out) {
// out[y][x][z]
size_t idx = (size_t)blockDim.x * (size_t)blockIdx.x + (size_t)threadIdx.x;
int x_idx = idx & (X_DIM-1);
int y_idx = idx / X_DIM;
int x = x_idx - (X_DIM >> 1);
int y = y_idx - (Y_DIM >> 1);
__shared__ triangle tri_base[THREADS_PER_BLOCK];
// __shared__ double zMins_base[THREADS_PER_BLOCK];
// __shared__ double xMin[THREADS_PER_BLOCK];
// __shared__ double xMax[THREADS_PER_BLOCK];
__shared__ bool yNotInside[THREADS_PER_BLOCK];
// Use local array. Mapped to registers if NUM_LAYERS is small
char out_local[NUM_LAYERS];
char* out_ptr = (char*)(out + idx);
for (size_t i = 0; i < NUM_LAYERS; i++) {
out_local[i] = out_ptr[i*X_DIM*Y_DIM];
}
// layer_t curr_layer = curr_layers[idx];
// layer_t* intersections_large_local = intersections_large + idx * NUM_LAYERS;
// This flag only applies to pixels that are not intersections.
// bool isInside = false;
size_t num_iters = num_small / THREADS_PER_BLOCK;
double y_pos = y * RESOLUTION;
// double x_pos = x * RESOLUTION;
for (size_t i = 0; i < num_iters; i++) {
triangle t = tri_small[threadIdx.x + (i * THREADS_PER_BLOCK)];
tri_base[threadIdx.x] = t;
// zMins_base[threadIdx.x] = zMins[threadIdx.x + (i * THREADS_PER_BLOCK)];
double yMin = min3(t.p1.y, t.p2.y, t.p3.y);
double yMax = max3(t.p1.y, t.p2.y, t.p3.y);
yNotInside[threadIdx.x] = (y_pos < yMin) || (y_pos > yMax);
// Wait for other threads to complete;
__syncthreads();
if (y_idx < Y_DIM) {
for (size_t tri_idx = 0; tri_idx < THREADS_PER_BLOCK; tri_idx++) {
layer_t curr_intersection = yNotInside[tri_idx] ? NONE : pixelRayIntersection(tri_base[tri_idx], x, y);
if (curr_intersection >= 0 && curr_intersection < NUM_LAYERS) out_local[curr_intersection]++;
}
// Move to the next triangle-layer pair that intersects
// Add 1 to curr_layer when comparing to avoid rounding issues.
// while ((curr_layer+1)*RESOLUTION < zMins_base[THREADS_PER_BLOCK-1] && curr_layer < NUM_LAYERS) {
// toNextLayer(intersections_large_local, trunk_length_local, curr_layer, isInside, out_local);
// }
}
__syncthreads();
}
size_t remaining = num_small - (num_iters * THREADS_PER_BLOCK);
if (threadIdx.x < remaining) {
triangle t = tri_small[threadIdx.x + (num_iters * THREADS_PER_BLOCK)];
tri_base[threadIdx.x] = t;
// zMins_base[threadIdx.x] = zMins[threadIdx.x + (num_iters * THREADS_PER_BLOCK)];
double yMin = min3(t.p1.y, t.p2.y, t.p3.y);
double yMax = max3(t.p1.y, t.p2.y, t.p3.y);
yNotInside[threadIdx.x] = (y_pos < yMin) || (y_pos > yMax);
}
__syncthreads();
if (remaining) {
if (y_idx < Y_DIM) {
for (size_t tri_idx = 0; tri_idx < remaining; tri_idx++) {
layer_t curr_intersection = yNotInside[tri_idx] ? NONE : pixelRayIntersection(tri_base[tri_idx], x, y);
if (curr_intersection >= 0 && curr_intersection < NUM_LAYERS) out_local[curr_intersection]++;
}
}
}
// Process the remaining layers
// while (curr_layer < NUM_LAYERS) {
// toNextLayer(intersections_large_local, trunk_length_local, curr_layer, isInside, out_local);
// }
// thrust::copy(thrust::device, &out_local[0], &out_local[0] + NUM_LAYERS, out_ptr);
for (size_t i = 0; i < NUM_LAYERS; i++) {
out_ptr[i*X_DIM*Y_DIM] = out_local[i];
}
}
__global__
void layerExtraction(bool* out, layer_t start) {
size_t idx = (size_t)blockDim.x * (size_t)blockIdx.x + (size_t)threadIdx.x;
bool isInside = false;
char* out_ptr = (char*) (out + idx);
char intersection_count;
for (size_t i = start; i < NUM_LAYERS; i++) {
intersection_count = out_ptr[i*X_DIM*Y_DIM];
bool flip = (bool)(intersection_count & 1);
bool intersect = (intersection_count > 0);
out_ptr[i*X_DIM*Y_DIM] = (char) (isInside || intersect);
isInside = isInside ^ flip;
}
}
__global__
void getZMin(triangle* tris, size_t size, double* zMins) {
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size) return;
thrust::minimum<double> min;
zMins[i] = min(tris[i].p1.z, min(tris[i].p2.z, tris[i].p3.z));
}
__host__
void GPUsort(triangle* tris_dev, size_t size, double* zMins) {
int num_blocks = (size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
getZMin<<<num_blocks, THREADS_PER_BLOCK>>>(tris_dev, size, zMins);
cudaDeviceSynchronize();
thrust::sort_by_key(thrust::device, zMins, zMins + size, tris_dev);
}
/**
* pixelRayIntersection: helper function, computes the intersection of given triangle and pixel ray
* Inputs:
* t -- input triangle
* x, y -- coordinates of the input pixel ray
* Returns:
* The layer on which they intersect, or -1 if no intersection
*/
__device__ __forceinline__
layer_t pixelRayIntersection(triangle t, int x, int y) {
/*
Let A, B, C be the 3 vertices of the given triangle
Let S(x,y,z) be the intersection, where x,y are given
We want to find some a, b such that AS = a*AB + b*AC
If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection.
*/
double x_d = x * RESOLUTION - t.p1.x;
double y_d = y * RESOLUTION - t.p1.y;
double x1 = t.p2.x - t.p1.x;
double y1 = t.p2.y - t.p1.y;
double z1 = t.p2.z - t.p1.z;
double x2 = t.p3.x - t.p1.x;
double y2 = t.p3.y - t.p1.y;
double z2 = t.p3.z - t.p1.z;
double a = (x_d * y2 - x2 * y_d) / (x1 * y2 - x2 * y1);
double b = (x_d * y1 - x1 * y_d) / (x2 * y1 - x1 * y2);
bool inside = (a >= 0) && (b >= 0) && (a+b <= 1);
double intersection = (a * z1 + b * z2) + t.p1.z;
// // divide by layer width
layer_t layer = inside ? (intersection / RESOLUTION) : (layer_t)(-1);
return layer;
}
/**
* get the array of intersections of a given pixel ray
*/
__device__
int getIntersectionTrunk(int x, int y, triangle* triangles, size_t num_triangles, layer_t* layers) {
int idx = 0;
for (int i = 0; i < num_triangles; i++) {
layer_t layer = pixelRayIntersection(triangles[i], x, y);
if (layer != -1) {
layers[idx] = layer;
idx++;
}
}
return idx;
}
__device__ __forceinline__
void extractLayer(layer_t curr_layer, bool & isInside, char* out_local) {
char total_intersections = out_local[curr_layer];
bool flip = (bool) (total_intersections & 1);
bool intersect = (total_intersections > 0);
out_local[curr_layer] = (char) (isInside || intersect);
isInside = isInside ^ flip;
}
__device__ __forceinline__
double min3(double a, double b, double c) {
thrust::minimum<double> min;
return min(a, min(b, c));
}
__device__ __forceinline__
double max3(double a, double b, double c) {
thrust::maximum<double> max;
return max(a, max(b, c));
}
|
1,392
|
/**
* Group Info:
* rwsnyde2 Richard W Snyder
* kshanka2 Koushik Shankar
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <cooperative_groups.h>
#define __DEBUG
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
extern int tpdt(double *t, double dt, double end_time);
#define TSCALE 1.0
#define VSQR 0.1
namespace cg = cooperative_groups;
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
__device__ void evolve13(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t, double end_time)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i = idx / n;
int j = idx % n;
cg::thread_block block = cg::this_thread_block();
if( i <= 1 || i >= n-2 || j <= 1 || j >= n - 2 )
{
un[idx] = 0.;
}
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *
((uc[idx-1] + uc[idx+1] + uc[idx + n] + uc[idx-n] + // west east north south
0.25 * (uc[idx - n - 1] + uc[idx - n + 1] + uc[idx + n - 1] + uc[idx + n + 1]) + // northwest northeast southwest southeast
0.125 * (uc[idx - 2] + uc[idx + 2] + uc[idx - (2*n)] + uc[idx + (2*n)]) - // westwest easteast northnorth southsouth
5.5 * uc[idx])/(h * h) + (-expf(-TSCALE * t) * pebbles[idx]));
}
block.sync();
if (t + dt < end_time)
{
t = t + dt;
evolve13(uo, un, uc, pebbles, n, h, dt, t,end_time);
}
}
__global__ void evolve(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t, double end_time)
{
evolve13(un, uc, uo, pebbles, n, h, dt, t,end_time);
}
void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads)
{
cudaEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
double t, dt;
int num_blocks = (n/nthreads)*(n/nthreads);
int threads_per_block = nthreads * nthreads;
double *u_d, *u0_d, *u1_d,*pebbles_d;
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
cudaMalloc((void **) &u_d, sizeof(double) * n * n);
cudaMalloc((void **) &u0_d, sizeof(double) * n * n);
cudaMalloc((void **) &u1_d, sizeof(double) * n * n);
cudaMalloc((void **) &pebbles_d, sizeof(double) * n * n);
cudaMemcpy(u0_d, u0, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(u1_d, u1, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(pebbles_d, pebbles, sizeof(double) * n * n, cudaMemcpyHostToDevice);
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
/* HW2: Add main lake simulation loop here */
t = 0.;
dt = h / 2.;
evolve<<< num_blocks,threads_per_block >>>(u_d, u1_d, u0_d, pebbles_d, n, h, dt, t,end_time);
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
cudaMemcpy(u, u_d, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
cudaFree(u_d);
cudaFree(u0_d);
cudaFree(u1_d);
cudaFree(pebbles_d);
/* timer cleanup */
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
}
|
1,393
|
/*
A basic CUDA demonstration. Two random vectors are added together
in serial and using a GPU accelerator.
To compile, use:
make
NOTE: CUDA must be installed/loaded before running make. Also, the
Makefile will probably have to be customized for your system.
To run, use for example:
./cuda_vecadd 100000000
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
// Add two vectors in serial
void vecAdd(double *h_A, double *h_B, double *h_C, int n)
{
for( int i = 0; i < n; i++ )
h_C[i] = h_A[i] + h_B[i];
}
// The CUDA vector addition kernel
__global__
void cudaVecAddKernel( double* A, double* B, double* D, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<n) D[i] = A[i] + B[i];
}
// Add two vectors in CUDA
void cuda_vecAdd(double *h_A, double *h_B, double *h_D, int n)
{
int size = n * sizeof(double);
double *d_A, *d_B, *d_D;
cudaError_t err1 = cudaSuccess;
cudaError_t err2 = cudaSuccess;
cudaError_t err3 = cudaSuccess;
// Allocate memory on the GPU
err1 = cudaMalloc((void **) &d_A, size);
err2 = cudaMalloc((void **) &d_B, size);
err3 = cudaMalloc((void **) &d_D, size);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err3 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Copy the data to the GPU
err1 = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
err2 = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// CUDA kernel
int threads = 1024; // Threads per block
int blocks = (n + threads - 1) / threads; // Blocks per grid
printf("\n CUDA kernel was launched with %d blocks of %d threads...", blocks, threads);
cudaVecAddKernel<<<blocks, threads>>>(d_A, d_B, d_D, n);
err1 = cudaGetLastError();
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Copy the results back to the host
err1 = cudaMemcpy(h_D, d_D, size, cudaMemcpyDeviceToHost);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Deallocate memory on the GPU
err1 = cudaFree(d_A);
err2 = cudaFree(d_B);
err3 = cudaFree(d_D);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err3 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
// Timer
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
return( ((double)TV.tv_sec) + ((double)TV.tv_usec)*1.0e-6 );
}
// Main program
int main (int argc, char** argv)
{
unsigned int n, i;
double t0, t1, t2, t3;
n = atoi(argv[1]);
printf("\nn = %d", n);
double * A = (double*) malloc( n * sizeof(double) );
double * B = (double*) malloc( n * sizeof(double) );
double * C = (double*) malloc( n * sizeof(double) );
double * D = (double*) malloc( n * sizeof(double) );
for (i = 0; i < n; ++i) {
A[i] = ((double) rand()/RAND_MAX);
B[i] = ((double) rand()/RAND_MAX);
}
// Add the two vectors in serial
t0 = getTime();
vecAdd(A, B, C, n);
t1 = getTime();
printf("\n Serial addition: %f sec.", t1 - t0);
// Add the two vectors using CUDA
t2 = getTime();
cuda_vecAdd(A, B, D, n);
t3 = getTime();
printf("\n CUDA addition: %f sec.\n\n", t3 - t2);
// Verify that the two results are the same
for (i = 0; i < n; ++i) {
if( C[i] != D[i])
{
printf("\nERROR! Outputs do not match at index %d", i);
break;
}
}
// Free host memory
free(A);
free(B);
free(C);
free(D);
return 0;
}
|
1,394
|
//
// 3D DCA Driver
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <iostream>
#include <math.h>
#include <math.h>
#include <fstream>
#include <limits>
#define z11 0
#define z12 6
#define z13 12
#define z21 18
#define z22 24
#define z23 30
#define nz11 0
#define nz12 6
#define nz13 12
#define nz21 13
#define nz22 19
#define nz23 25
//Function Prototypes
// Function found in RK45.cu
void Initialize(double Mass[],double Inertia[], double Zetas[], int n, bool Active_DOF[], double Body_Placement[],double Body_Vectors[],int dof_index[]);
void init_2d_pend(bool Active_DOF[],double Body_Vectors[], double Mass[], double Inertia[],int n, double Body_Placement[], double speeds[]);
void printZeta(double[],int,int,int,int);
void update(double Mass[], double Inertia[], double Init_Zetas[], double Zetas[], int n, double Body_Vectors[], double Speeds[], double DCMs[]);
void kinematics(bool Active_DOF[], double Coords[], double DCMs[], double speeds[], double omegas[], int dof_index[],int n);
void RK_45(double step, int n,bool Active_DOF[],double Coords[], double Speeds[], double initZetas[],double Mass[], double Inertia[],int DOF, double Y[], double Ydot[], int dof_index[], int cut_off, double Body_Vectors[]);
//Main function
int main()
{
int cut_off=0;
int n=2;
int DOF=0;
std::ofstream output;
output.open("output.mtx");
//Variable Declarations
double *Body_Placement = new double[n*6]; //Initial conditions
double *Body_Speeds = new double[n*6];
double *Mass = new double[n];
double *Inertia = new double[3*n];
bool *Active_DOF = new bool[n*6]; //Solution to each timestep
double *Body_Vectors = new double[n*6];
int *dof_index = new int[n];
init_2d_pend(Active_DOF,Body_Vectors,Mass,Inertia,n,Body_Placement,Body_Speeds);
for(int i =0; i<6*n; i++)
{
if(Active_DOF[i])
{
DOF++;
}
}
double *Coords = new double[DOF];
double *Speeds = new double[DOF];
double *initZetas = new double[n*36*6];
for(int i =0, j=0; i<n*6; i++)
{
if(Active_DOF[i])
{
Coords[j]=Body_Placement[i];
Speeds[j]=Body_Speeds[i];
j++;
}
}
Initialize(Mass, Inertia, initZetas, n, Active_DOF, Body_Placement,Body_Vectors,dof_index);
//printZeta(initZetas,n,0,36,z11);
//double *DCMs = new double[n*3*3];
//double *omegas= new double[n*3];
//kinematics(Active_DOF, Coords, DCMs, Speeds, omegas, dof_index, n);
//update(Mass, Inertia, initZetas, Zetas, n, Body_Vectors, omegas, DCMs);
//Time Setup
double tstep= 0.001; //Length of a timestep [s]
double tfinal = 0.005; //Final time [s]
int tlen = (int) floor(tfinal/tstep)+1; //Number of timesteps
double *Y = new double[DOF];
double *Ydot = new double[DOF];
for(int t=1; t<tlen; t++) //Loop through every timestep
{
RK_45(tstep,n,Active_DOF,Coords,Speeds,initZetas,Mass, Inertia,DOF,Y,Ydot,dof_index,cut_off,Body_Vectors); //Find the solution at that timestep
for(int i = 0; i<DOF;i++) //Loop through the solution
{
Coords[i]=Y[i];
Speeds[i]=Ydot[i];
}
}
delete[] Body_Placement;
delete[] Body_Speeds;
delete[] Mass;
delete[] Inertia;
delete[] Active_DOF;
delete[] Body_Vectors;
delete[] dof_index;
delete[] Y;
delete[] Ydot;
delete[] Coords;
delete[] Speeds;
delete[] initZetas;
return EXIT_SUCCESS; //Program completed successfully
}
void init_2d_pend(bool Active_DOF[],double Body_Vectors[], double Mass[], double Inertia[],int n, double Body_Placement[], double speeds[])
{
double m =1;
//double r = 0.05;
double l =1;
/*
double I1 = .5*m*r*r;
double I2 = (m/12)*((3*r*r)+(l*l));
double I3 = I2;
*/
double I1 = (m/12);
double I2 = (m/12);
double I3 = (m/12);
for(int i =0; i<6*n; i++)
{
Body_Placement[i]=0;
speeds[i]=0;
Body_Vectors[i]=0;
}
for(int i =0; i<n; i++)
{
Active_DOF[i*6+2]=1;
Mass[i]=1.0;
Body_Vectors[i*6]=-l/2;
Body_Vectors[i*6+3]=l/2;
Inertia[i*3]=I1;
Inertia[i*3+1]=I2;
Inertia[i*3+2]=I3;
}
//Body_Placement[2]=0.3;
}
void printZeta(double Zetas[],int n, int body,int len,int zeta)
{
std::cout<<"\n\n\n";
for(int r =0; r<6; r++)
{
for(int c =0; c<6; c++)
{
std::cout<<Zetas[r*n*len+body*len+zeta+c]<<'\t';
}
std::cout<<std::endl;
}
}
|
1,395
|
#define W 500
#define H 500
#define TX 32 // number of threads per block along x-axis
#define TY 32 // number of threads per block along y-axis
__device__
unsigned char clip(int n) {
return n > 255 ? 255 : (n < 0 ? 0 : n);
}
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int i = r * w + c;
if ((c >= w) || (r >= h)) return;
// compute the distance (in pixel spacings)
const int d = sqrtf((c - pos.x) * (c - pos.x) + (r - pos.y) * (r - pos.y));
// convert distance to intensity value on interval [0, 255]
const unsigned char intensity = clip(255 - d);
d_out[i].x = intensity; // red channel
d_out[i].y = intensity; // green channel
d_out[i].z = 0; // blue channel
d_out[i].z = 255; // fully opaque (alpha channel)
}
int main() {
uchar4 *out = (uchar4*)calloc(W*H, sizeof(uchar4));
uchar4 *d_out; // pointer for device array
cudaMalloc(&d_out, W * H * sizeof(uchar4));
const int2 pos = {0, 0}; // set reference position
const dim3 blockSize(TX, TY);
const int bx = (W + TX - 1)/TX;
const int by = (W + TY - 1)/TY;
const dim3 gridSize = dim3(bx, by);
distanceKernel<<<gridSize, blockSize>>>(d_out, W, H, pos);
// copy the results to host
cudaMemcpy(out, d_out, W*H*sizeof(uchar4), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
1,396
|
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#define RED 0
#define GREEN 1
const char * LABELS[] = {
"Red",
"Green"
};
/**
* Computes the Euclidean distance between the input vector 'Y' and all
* vectors in the array 'X'.
* An array of size 'n' containing each distance will be returned on completion.
* \param n Number of entries in the data set 'X'.
* \param d Dimension of each vector.
* \param Y Input vector to compare against 'X' (array of size d).
* \param X Input list of vectors (array of size n * d).
* \param DIST Output array of distances (array of size n).
*/
__global__ void calcDistE(int n, int d, float * Y, float * X, float * DIST) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) { return; } // Index is not in bounds of the data set.
float * x = &X[idx * d];
float sum = 0.0f;
// Compute the sum of squares over each dimmension...
for (int i=0; i<d; ++i) {
sum += pow(x[i] - Y[i], 2.0f);
}
// The distance will be the square root of that value.
DIST[idx] = sqrt(sum);
}
/**
* Sorts the input label array based on the input distances.
* This is done with a parallel implementation of a selection sort,
* which is fairly slow for a parallel sort, but should be faster
* than most serial sorts on large data sets.
* \param n The number of points in our data set.
* \param DIST Array if distances associated with our labels (array of size n).
* \param L List of class labels associated with distances (arry of size n).
* \param OUT Output copy of list L, sorted by correpsonding distances (array of size n).
*/
__global__ void findNearest(int n, float * DIST, int * L, int * OUT) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) { return; } // Index is not in bounds of the data set.
// This is the value we are sorting.
float d = DIST[idx];
int k = L[idx];
// How many distances are smaller than the current one?
int smaller = 0;
for (int i=0; i<n; ++i) {
if ((DIST[i] < d) ||
// Break ties by index.
(DIST[i] == d && i < idx)) {
++smaller;
}
}
// Set the output label.
OUT[smaller] = k;
}
/**
* Performs KNN on the given data.
* \param n Number of entries in the data set 'X'.
* \param d Dimension of each vector.
* \param Y Input vector to compare against 'X' (array of size d).
* \param X Input list of vectors (array of size n * d).
* \param L Labels coresponding to each of item of X (array of size n).
* \param C The number of classes that are valid in L.
* \param Cstr String representations for each class label (array of C strings).
* \param k How many neighbors to consider when making our assignment?
*/
void knn(const int n, const int d, const float * Y,
const float * X, const int * L,
const int C, const char ** Cstr,
const int k) {
// Allocate the GPU arrays.
float * cu_X, * cu_Y, * cu_DIST;
int * cu_L, * cu_OUT;
cudaMalloc(&cu_X, n * d * sizeof(float));
cudaMalloc(&cu_Y, d * sizeof(float));
cudaMalloc(&cu_DIST, n * sizeof(float));
cudaMalloc(&cu_L, n * sizeof(int));
cudaMalloc(&cu_OUT, n * sizeof(int));
// Copy the provided data to the GPU.
cudaMemcpy(cu_X, X, n * d * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cu_Y, Y, d * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cu_L, L, n * sizeof(int), cudaMemcpyHostToDevice);
// Compute distances and sort labels by distance using blocks of 256 threads.
calcDistE<<<(n + 255)/256, 256>>>(n, d, cu_Y, cu_X, cu_DIST);
findNearest<<<(n + 255)/256, 256>>>(n, cu_DIST, cu_L, cu_OUT);
// Copy the sorted labels from our output
int * OUT = (int *)malloc(n * sizeof(int));
cudaMemcpy(OUT, cu_OUT, n * sizeof(int), cudaMemcpyDeviceToHost);
// Count each class.
int * count = (int *)malloc(C * sizeof(int));
memset(count, 0, C * sizeof(int));
for (int i=0; i<k; ++i) { ++count[OUT[i]]; }
// Print the results.
printf("knn: k=%d\n", k);
for (int i=0; i<C; ++i) {
printf("class %s:\t%d/%d\n", Cstr[i], count[i], k);
}
printf("\n");
// Cleanup GPU...
cudaFree(cu_X);
cudaFree(cu_Y);
cudaFree(cu_DIST);
cudaFree(cu_L);
cudaFree(cu_OUT);
// Cleanup CPU...
free(OUT);
free(count);
}
int main(int argc, const char ** argv) {
const int d = 3; // 3-dimmensional data set
const int n = 6; // with 6 elements.
const float Y[] = {0, 0, 0};
const float X[] = {
2, 3, 0, // 1
2, 0, 1, // 2
0, 1, 3, // 3
0, 1, 2, // 4
-1, 0, 1, // 5
1, -1, 1 // 6
};
const int L[] = {
RED, // 1
RED, // 2
RED, // 3
GREEN, // 4
GREEN, // 5
RED // 6
};
knn(n, d, Y, X, L, 2, LABELS, 1);
knn(n, d, Y, X, L, 2, LABELS, 3);
}
|
1,397
|
#include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur réel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur réel
// Multiplie point par point un vecteur complex par un vecteur réel
// Applique y = at*x +bt à chaque point d'un vecteur réel
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-régressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linéaire à la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linéaire à la source
__global__ void KGaborFilter1(double* filter, double* Vr, int width, int height, double ss , double r0, double sr0, double stheta0 )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i >= width || j >= height) return;
double x = i;
double y = j;
if (i> width/2) x = width-i;
if (j> height/2) y = height-j;
#define Eps 1E-6;
double r = sqrt(x*x+ y*y)+Eps;
double theta;
if (x>0) theta= atan2( y, x); else theta = PI/2;
//double ff = exp( cos(2*theta)/stheta0 )
// *
// exp(-0.5*pow(log(r/r0),2)/log(1+pow(sr0,2))) * pow(r0/r,3)*ss*r;
// Correction Jonathan 7-12-16
double ff = exp( cos(2*theta)/(4*pow(stheta0,2) ) )
*
exp(-0.5*pow(log(r/r0),2)/log(1+pow(sr0,2))) * pow(r0/r,3)*4*pow(ss*r,3);
filter[i+j*width] = ff;
if (i>0 || j>0) Vr[i+j*width] = ff/(4*pow(ss*r,3)); else Vr[i+j*width] = 0;
}
|
1,398
|
#include "includes.h"
__global__ void check_if_unique(const unsigned *keys, unsigned *is_unique, size_t kSize) {
unsigned id = threadIdx.x +
blockIdx.x * blockDim.x +
blockIdx.y * blockDim.x * gridDim.x;
if (id == 0) {
is_unique[0] = 1;
} else if (id < kSize) {
is_unique[id] = (keys[id] != keys[id - 1] ? 1 : 0);
}
}
|
1,399
|
#include "includes.h"
__global__ void RemoveNodeByUtilityKernel( int *connectionMatrix, int *connectionAge, int *activityFlag, float *utility, float utilityConstant, float *localError, int *neuronAge, float *winningFraction, int *winningCount, float maxError, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
if(activityFlag[threadId] == 1)
{
if(utility[threadId] > 0.00f)
{
if( maxError / utility[threadId] > utilityConstant )
{
activityFlag[threadId] = 0;
localError[threadId] = 0.00f;
neuronAge[threadId] = 0;
winningFraction[threadId] = 0.00f;
winningCount[threadId] = 0;
utility[threadId] = 0.00f;
for(int n = 0; n < maxCells; n++)
{
connectionMatrix[threadId * maxCells + n] = 0;
connectionAge[threadId * maxCells + n] = 0;
connectionMatrix[n * maxCells + threadId] = 0;
connectionAge[n * maxCells + threadId] = 0;
}
}
}
}
}
}
|
1,400
|
/*
* demo_log_speed.cu
*
* Created on: 07-Apr-2009
* Author: alee
*/
//#include <cutil.h>
#include <stdio.h>
__global__ void logtest(int size, float* d_array, int M) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i, j;
float x;
for (i = tid; i < size; i += tt) {
x = ((float) i + 1) / size;
for (j = 0; j < M; j++) {
x = logf(x);
x = expf(x);
}
d_array[i] = x;
}
}
void logtestref(int size, float* array, int M) {
float x;
for (int i = 0; i < size; i++) {
x = ((float) i + 1) / size;
for (int j = 0; j < M; j++) {
x = logf(x);
x = expf(x);
}
array[i] = x;
}
}
void testLogSpeed(int N, int M, int nb, int nt) {
// unsigned int hTimer;
// double gtime, ctime;
// cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * sizeof(float));
float* d_array;
cudaMalloc((void**) &d_array, N * sizeof(float));
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
logtest<<<nb,nt>>>(N, d_array, M);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("log test time = %f\n", gtime);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
logtestref(N, array, M);
// cutStopTimer(hTimer);
// ctime = cutGetTimerValue(hTimer);
// printf("ref log test time = %f\n", ctime);
// printf("speedup = %f\n", ctime / gtime);
free(array);
cudaFree(d_array);
}
int main(int argc, char **argv) {
int nb = 256;
int nt = 128;
// int N = 65536;
int N = 262144;
int M = 1024;
testLogSpeed(N, M, nb, nt);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.