serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
19,801 | #include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
int const N = 32;
int const THREADS = 12;
int const BSZ = 3;
float const EPS2 = 0.0001;
double get_time()
{ struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
__global__ void direct_sh(float4 *sourceGlob, float *targetGlob)
{
__shared__ float4 p_sh[THREADS];
int tx = threadIdx.x;
int bx = blockIdx.x;
int I = bx * THREADS + tx;
float dx,dy,dz,r;
float4 p1 = sourceGlob[I];
float p = - p1.w/ sqrtf(EPS2);
float4 p2;
for (unsigned int m = 0; m < BSZ-1; m++ ){
p_sh[tx] = sourceGlob[m * THREADS + tx];
__syncthreads();
#pragma unroll 10
for (unsigned int i = 0; i < THREADS; i++){
p2 = p_sh[i];
dx = p1.x - p2.x;
dy = p1.y - p2.y;
dz = p1.z - p2.z;
r = sqrtf(dx * dx + dy * dy + dz * dz + EPS2);
p += p1.w / r;
}
__syncthreads();
}
int m = BSZ-1;
p_sh[tx] = sourceGlob[m*THREADS + tx];
__syncthreads();
int lastDim = N%THREADS;
for ( unsigned int i = 0; i < lastDim; i++){
p2 = p_sh[i];
dx = p1.x - p2.x;
dy = p1.y - p2.y;
dz = p1.z - p2.z;
r = sqrtf(dx * dx + dy * dy + dz * dz + EPS2);
p += p1.w / r;
}
__syncthreads();
targetGlob[I] = p;
}
__global__ void direct(float4 *sourceGlob, float *targetGlob)
{ /*** wirte your kernel here! *****/
int tx = threadIdx.x;
float dx,dy,dz,r;
float4 p1,p2;
p1 = sourceGlob[tx];
float p = - p1.w / sqrtf(EPS2);
for ( int j = 0; j < N; j++ ){
p2 = sourceGlob[j];
dx = p1.x - p2.x;
dy = p1.y - p2.y;
dz = p1.z - p2.z;
r = sqrtf(dx * dx + dy * dy + dz * dz + EPS2);
p += p1.w / r;
}
targetGlob[tx] = p;
}
int main() {
float4 *sourceHost,*sourceDevc;
float *targetHost,*targetDevc;
// Allocate memory on host and device
sourceHost = (float4*) malloc( N*sizeof(float4) );
targetHost = (float *) malloc( N*sizeof(float ) );
cudaMalloc( (void**) &sourceDevc, N*sizeof(float4) );
cudaMalloc( (void**) &targetDevc, N*sizeof(float ) );
// Initialize
for( int i=0; i<N; i++ ) {
sourceHost[i].x = rand()/(1.+RAND_MAX);
sourceHost[i].y = rand()/(1.+RAND_MAX);
sourceHost[i].z = rand()/(1.+RAND_MAX);
sourceHost[i].w = 1.0/N;
}
// Direct summation on device
cudaMemcpy(sourceDevc,sourceHost,N*sizeof(float4),cudaMemcpyHostToDevice);
double start = get_time();
direct_sh<<< int(N-0.5/THREADS)+1, THREADS >>>(sourceDevc,targetDevc);
double stop = get_time();
cudaMemcpy(targetHost,targetDevc,N*sizeof(float ),cudaMemcpyDeviceToHost);
double time = stop - start;
std::cout<<"Kernel execution time: "<<time<<std::endl;
// Direct summation on host
float dx,dy,dz,r;
for( int i=0; i<N; i++ ) {
float p = - sourceHost[i].w / sqrtf(EPS2);
for( int j=0; j<N; j++ ) {
dx = sourceHost[i].x - sourceHost[j].x;
dy = sourceHost[i].y - sourceHost[j].y;
dz = sourceHost[i].z - sourceHost[j].z;
r = sqrtf(dx * dx + dy * dy + dz * dz + EPS2);
p += sourceHost[j].w / r;
}
printf("%d %f %f\n",i,p,targetHost[i]);
}
}
|
19,802 | #include "includes.h"
__global__ void uniform_double(int n,double lower,double upper,double *result) {
int totalThreads = gridDim.x * blockDim.x;
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
for(; i < n; i += totalThreads) {
double u = result[i];
result[i] = u * upper + (1 - u) * lower;
}
} |
19,803 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <ostream>
void ShowGPUProps(int DeviceID)
{
cudaDeviceProp DeviceProp;
cudaGetDeviceProperties(&DeviceProp, DeviceID);
printf("--- Card: %s --- Device Number: %d --- Integrated: %s ---\n", DeviceProp.name, DeviceID, DeviceProp.integrated ? "True" : "False");
printf("--- Major Revision is %d and Minor Revision is %d ---\n", DeviceProp.major, DeviceProp.minor);
printf("---------------------------------------------------------\n");
printf("Total Global Memory: %d\n", DeviceProp.totalGlobalMem);
printf("Total Constant Memory: %d\n", DeviceProp.totalConstMem);
printf("Maximum Threads Per Block: %d\n", DeviceProp.maxThreadsPerBlock);
printf("Maximum Grid Size: %d\n", DeviceProp.maxGridSize);
printf("Multi Processors: %d\n", DeviceProp.multiProcessorCount);
printf("Maximum Texture Dimensions;\n");
printf("1D: %d\n2D: %d\n3D: %d\n", DeviceProp.maxTexture1D, DeviceProp.maxTexture2D, DeviceProp.maxTexture3D);
printf("Warp Size: %d\n", DeviceProp.warpSize);
printf("Registers Per Block: %d\n", DeviceProp.regsPerBlock);
printf("---------------------------------------------------------\n");
}
int main()
{
int DeviceCount, UserInput;
cudaGetDeviceCount(&DeviceCount);
if (DeviceCount > 1)
{
printf("This machine has %d video cards.\n1 - Choose one card by number\n2 - Choose all cards\nYour choice: ");
scanf("%d", &UserInput);
if (UserInput == 1)
{
printf("Choose the a number between 0 and %d: ", (DeviceCount - 1));
scanf("%d", &UserInput);
if (UserInput >= 0 && UserInput <= DeviceCount - 1)
ShowGPUProps(UserInput);
else
printf("A wrong value was typed. Please try again.\n");
main();
}
else
{
for (int i = 0; i < DeviceCount - 1; i++)
ShowGPUProps(i);
}
}
else
{
ShowGPUProps(0);
}
return 0;
}
|
19,804 | #include "includes.h"
__global__ void LSTMCellInputGradientKernelBPTT( float *input, float *previousOutput, float *cellInputDeltas, float *cellInputWeightGradient, int inputCount, int previousOutputCount, int cellsPerBlock )
{
int weightId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int weightsPerCell = inputCount + previousOutputCount + 1;
if (weightId < weightsPerCell * previousOutputCount)
{
int fromId = weightId % weightsPerCell;
int toId = weightId / weightsPerCell;
int isFromInputUnit = fromId >= 0 && fromId < inputCount;
int isFromPreviousOutputUnit = (fromId >= inputCount) && (fromId < inputCount + previousOutputCount);
int isFromBiasUnit = fromId == (inputCount + previousOutputCount);
float inputFromWeight = isFromInputUnit * input[isFromInputUnit * fromId]
+ isFromPreviousOutputUnit * previousOutput[isFromPreviousOutputUnit * (fromId - inputCount)]
+ isFromBiasUnit * 1;
cellInputWeightGradient[weightId] = cellInputDeltas[toId] * inputFromWeight;
}
} |
19,805 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <float.h>
void handleCudaError(cudaError_t cudaERR)
{
if (cudaERR != cudaSuccess)
{
printf("CUDA ERROR : %s\n", cudaGetErrorString(cudaERR));
}
}
struct paretoParam
{
float p;
float xi;
float psi;
float u;
paretoParam() {}
__device__ paretoParam(float p, float xi, float psi, float u) : p(p), xi(xi), psi(psi), u(u) { }
};
__device__ static float Brent_fmin(float ax, float bx, float (*f)(float, void *), void *info)
{
const float c = (3. - sqrt(5.)) * .5;
float a, b, d, e, p, q, r, u, v, w, x;
float t2, fu, fv, fw, fx, xm, eps, tol, tol1, tol3;
eps = DBL_EPSILON;
tol = DBL_EPSILON;
tol1 = eps + 1.;
eps = sqrt(eps);
a = ax;
b = bx;
v = a + c * (b - a);
w = v;
x = v;
d = 0.;/* -Wall */
e = 0.;
fx = (*f)(x, info);
fv = fx;
fw = fx;
tol3 = tol / 3.;
for(int i(0); i < 300; i++) {
// for(;;) {
xm = (a + b) * .5;
tol1 = eps * fabs(x) + tol3;
t2 = tol1 * 2.;
if (fabs(x - xm) <= t2 - (b - a) * .5) break;
p = 0.;
q = 0.;
r = 0.;
if (fabs(e) > tol1) {
r = (x - w) * (fx - fv);
q = (x - v) * (fx - fw);
p = (x - v) * q - (x - w) * r;
q = (q - r) * 2.;
if (q > 0.) p = -p; else q = -q;
r = e;
e = d;
}
if (fabs(p) >= fabs(q * .5 * r) ||
p <= q * (a - x) || p >= q * (b - x)) {
if (x < xm) e = b - x; else e = a - x;
d = c * e;
}
else {
d = p / q;
u = x + d;
if (u - a < t2 || b - u < t2) {
d = tol1;
if (x >= xm) d = -d;
}
}
if (fabs(d) >= tol1)
u = x + d;
else if (d > 0.)
u = x + tol1;
else
u = x - tol1;
fu = (*f)(u, info);
if (fu <= fx) {
if (u < x) b = x; else a = x;
v = w; w = x; x = u;
fv = fw; fw = fx; fx = fu;
} else {
if (u < x) a = u; else b = u;
if (fu <= fw || w == x) {
v = w; fv = fw;
w = u; fw = fu;
} else if (fu <= fv || v == x || v == w) {
v = u; fv = fu;
}
}
}
return x;
}
__device__ float dPareto(float x, float xi, float psi, float u)
{
return 1 - std::pow((1 + xi / psi * (x - u)), (-1 / xi));
}
__device__ float fitness(float x, void *info)
{
paretoParam *Q = (paretoParam*)info;
return std::abs(dPareto(x, Q->xi, Q->psi, Q->u) - Q->p);
}
__device__ float qPareto(float p, float xi, float psi, float u)
{
paretoParam info(p, xi, psi, u);
float min(Brent_fmin(0, 1e5, fitness, &info));
return min;
}
__device__ float convolve(curandState *state, float lambda, float xi, float psi, float u)
{
int freq(curand_poisson(state, lambda));
float loss(0);
for (int i = 0; i < freq; i++)
{
loss += qPareto(curand_uniform(state), xi, psi, u);
}
return loss;
}
__global__ void loss(float *a, float lambda, float xi, float psi, float u, int n)
{
curandState state;
int i(threadIdx.x + blockDim.x * blockIdx.x);
while (i < n)
{
curand_init(1234 + i, 0, 0, &state);
a[i] = convolve(&state, lambda, xi, psi, u);
i += blockDim.x * gridDim.x;
}
}
float * poipar(float lambda, float xi, float psi, float u, int n)
{
float *a, *d_a;
size_t size(sizeof(float) * n);
a = new float[size];
handleCudaError(cudaMalloc(&d_a, size));
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
int threads = props.maxThreadsPerBlock;
int blocks(std::min(0xFFFF, (n + threads - 1) / threads));
dim3 block(threads, 1, 1);
dim3 threa(blocks, 1, 1);
loss<<<block, threa>>>(d_a, lambda, xi, psi, u, n);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("CUDA ERROR while executing the kernel: %s\n",cudaGetErrorString(err));
}
handleCudaError(cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost));
return a;
}
|
19,806 | #include "threshold_computer.cuh"
template<class T>
void ThresholdComputer<T>::UpdateBuffer(std::vector<T> buf) {
thrust::host_vector<float> host_data_;
host_data_.assign(buf.begin(), buf.end());
device_data_ = host_data_; // copy data to device
is_sorted = false;
is_cached = false;
}
template<class T>
float ThresholdComputer<T>::ComputeThreshold(float multiplier) {
if (mad < std::numeric_limits<float>::infinity()) {
return multiplier * mad / 0.6745;
}
ComputeMedian(); // sorts device_data_
// absolute deviation from the ComputeMedian
thrust::transform(device_data_.begin(), device_data_.end(),
device_data_.begin(), abs_dev(med));
// median absolute deviation from the median (i.e., the MAD)
mad = utilities::median(device_data_, false);
is_cached = true;
return multiplier * mad / 0.6745;
}
/**
* @brief Compute and return the ComputeMedian of the data.
* @return The ComputeMedian of the data.
*/
template<class T>
float ThresholdComputer<T>::ComputeMedian() {
if (med < std::numeric_limits<float>::infinity()) {
return med;
}
med = utilities::median(device_data_, is_sorted);
is_sorted = true;
return med;
}
template<class T>
thrust::host_vector<float> ThresholdComputer<T>::data() {
thrust::host_vector<float> d(device_data_);
return d;
}
template
class ThresholdComputer<short>; |
19,807 | #include "includes.h"
__global__ void reshape(size_t num_values, float_t* src, float_t* dest, size_t ld_src, size_t ld_dest)
{
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < num_values)
{
size_t src_index = (index/ld_dest)*ld_src+ index%ld_dest;
dest[index] = src[src_index];
}
} |
19,808 | #include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
using namespace std;
__global__ void initArray( int *A) {
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
A[tid] = tid;
}
__global__ void swapArray( int *A, int size, int num_t) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=size/2/num_t*tid; i<size/2/num_t*(tid+1); i++){
int temp = A[i];
A[i] = A[size-1-i];
A[size-1-i] = temp;
}
}
__global__ void mmmul(int N, float *A, float *B, float *C){
//printf("working");
//printf("A[1]: %d", A[1]);
int T = 8;
__shared__ float smem_c[64][64];
if(threadIdx.x==0 && threadIdx.y==0){
for (int i=0; i<64; i++){
for(int j=0; j<64; j++){
smem_c[i][j]=0.0;
}
}
}
__shared__ float smem_a[64][8];
__shared__ float smem_b[8][64];
//printf("%f", smem_c[0][0]);
int c = blockIdx.x * 64;
int r = blockIdx.y * 64;
//printf("%d \n",blockDim.x);
//printf("%d \n", blockDim.y);
if(threadIdx.y==31){
//printf("x: %d \n",threadIdx.x);
}
int cthread = c + threadIdx.x * 2;
int rthread = r + threadIdx.y * 2;
if(threadIdx.x==1 && threadIdx.y==1){
//printf("==========r, c; %d, %d\n", rthread, cthread);
}
int count= 0;
for (int kk=0; kk<N; kk+=T) {
for (int i=threadIdx.x+blockDim.x*threadIdx.y; i<64*8; i+=blockDim.x*blockDim.y) {
int k = kk +i / 64;
int rt = r +i % 64;
int ct = c +i % 64;
smem_a[i%64][i/64] = A[rt*N+k];
smem_b[i/64][i%64] = B[k*N+ct];
//smem_c[i%64][i%64] = 0;
//printf("%d %d \n", smem_a[i%64][i/64], smem_b[i/64][i%64]);
//printf("%d %d \n", A[rt*N+k], B[k*N+ct]);
}
// if(threadIdx.x==0 && blockIdx.x==0 && threadIdx.y==0 && blockIdx.y==0){
// printf("K: %d",k);
// for (int i=0; i<64; i++){
// for(int j=0; j<8; j++){
// if(smem_a[i][j]!=1 || smem_b[j][i] !=1){
// printf("incorect, a: %d, b: %d\n",smem_a[i][j], smem_b[i][j]);
// }
// }
// }
// }
__syncthreads();
//printf("finish loading %f \n", smem_c[0][0]);
for (int x=0; x<2; x++){
//printf("x loop \n");
for (int y=0; y<2; y++){
//printf("y loop");
int cc = cthread % 64 + x;
int rc = rthread % 64 + y;
if(threadIdx.x==0 && blockIdx.x==0 && threadIdx.y==0 && blockIdx.y==0)
printf("rc,cc = %d,%d\n",rc,cc );
for (int k=kk; k<kk+T; k++){
int k8 = k % T;
//int rc64 = rc % 64;
//int cc64 = cc % 64;
float temp = smem_a[rc][k8] * smem_b[k8][cc];
atomicAdd(&smem_c[rc][cc],temp);
//smem_c[rc][cc]+=temp;
if(threadIdx.x==0 && blockIdx.x==0 && threadIdx.y==0 && blockIdx.y==0){
//printf("temp=%f\n",temp );
//printf("rc,cc=%f\n", smem_c[rc][cc]);
}
count+=1;
//printf("count %d \n", count);
//printf("load into c \n");
}
}
}
__syncthreads();
if(threadIdx.x==0 && blockIdx.x==0 && threadIdx.y==0 && blockIdx.y==0){
printf("K: %d\n",kk);
for (int i=0; i<2; i++){
for(int j=0; j<2; j++){
printf("%f ", smem_c[i][j]);
}
printf("\n");
}
}
__syncthreads();
}
__syncthreads();
//printf("working \n");
for (int p=0; p<2; p++){
for (int q=0; q<2; q++) {
int ccc = cthread + p;
int rcc = rthread + q;
/*if(smem_c[rcc%64][ccc%64]!=1024.0){
//if(rcc%64 != 31 && rcc%64 !=30 && rcc%64 !=29)
printf("row, col: %d %d \n", rcc, ccc);
}*/
C[rcc*N+ccc] = smem_c[rcc%64][ccc%64];
}
}
__syncthreads();
//printf("C %d\n", C[0]);
}
//double func(int N){
// // int N = 1024;
// srand(time(0));
// int size=N*N;
// //int size=16;
// float* A = new float[size];
// float* B = new float[size];
// float* C = new float[size];
// float* Cseq = new float[size];
//
// int num;
// for(int i=0; i<size; i++){
// num = rand()%100;
// A[i] = num;
// B[i] = num;
// }
//
// /* for(int i=0; i<N; i++){
// for(int j=0; j<N; j++){
// for(int k=0; k<N; k++){
// Cseq[i*N+j] += A[i*N+k]*B[k*N+j];
// }
// }
// }*/
//
//
//
// float *d_a;
// float *d_b;
// float *d_c;
//
// // define thread hierarchy
// int num_blocks_x = N/64; int num_blocks_y = N/64; int num_th_per_blk = 32;
// // int num_t = num_blocks_x*num_blocks_y*num_th_per_blk;
//
// // allocate host and device memory
// size_t memSize;
// // memSize = num_blocks * num_th_per_blk * sizeof(int);
// memSize = size*sizeof(int);
// // h_a = (int*) malloc(memSize);
// clock_t start = clock();
//
// cudaMalloc( (void**) &d_a, memSize);
// cudaMemcpy( d_a, A, memSize, cudaMemcpyHostToDevice);
// cudaMalloc( (void**) &d_b, memSize);
// cudaMemcpy( d_b, B, memSize, cudaMemcpyHostToDevice);
// cudaMalloc( (void**) &d_c, memSize);
// cudaMemcpy( d_c, C, memSize, cudaMemcpyHostToDevice);
//
// // launch kernel
// dim3 dimGrid2D(num_blocks_x, num_blocks_y);
// dim3 dimBlock(num_th_per_blk, num_th_per_blk);
// mmmul<<< dimGrid2D, dimBlock >>>(N, d_a, d_b, d_c);
// cudaMemcpy( C, d_c, memSize, cudaMemcpyDeviceToHost);
// clock_t end = clock();
//
// double time_elapsed_in_seconds = (double)(end - start)/CLOCKS_PER_SEC;
//
//
//
// /*for(int i=0; i<N*N; i+=1){
// if(C[i]!=Cseq[i]){
// printf("%f %f %d\n",Cseq[i],C[i], i);
// }
// }*/
// return time_elapsed_in_seconds;
//}
int main(int argc, char *argv[]){
int N = 1024*4;
srand(time(0));
int size=N*N;
//int size=16;
float* A = new float[size];
float* B = new float[size];
float* C = new float[size];
float* Cseq = new float[size];
float num;
for(int i=0; i<size; i++){
num = rand()%10;
A[i] = num;
B[i] = num;
}
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
for(int k=0; k<N; k++){
Cseq[i*N+j] += A[i*N+k]*B[k*N+j];
}
}
}
float *d_a;
float *d_b;
float *d_c;
// define thread hierarchy
int num_blocks_x = N/64; int num_blocks_y = N/64; int num_th_per_blk = 32;
// int num_t = num_blocks_x*num_blocks_y*num_th_per_blk;
// allocate host and device memory
size_t memSize;
// memSize = num_blocks * num_th_per_blk * sizeof(int);
memSize = size*sizeof(int);
// h_a = (int*) malloc(memSize);
clock_t start = clock();
cudaMalloc( (void**) &d_a, memSize);
cudaMemcpy( d_a, A, memSize, cudaMemcpyHostToDevice);
cudaMalloc( (void**) &d_b, memSize);
cudaMemcpy( d_b, B, memSize, cudaMemcpyHostToDevice);
cudaMalloc( (void**) &d_c, memSize);
cudaMemcpy( d_c, C, memSize, cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid2D(num_blocks_x, num_blocks_y);
dim3 dimBlock(num_th_per_blk, num_th_per_blk);
mmmul<<< dimGrid2D, dimBlock >>>(N, d_a, d_b, d_c);
cudaMemcpy( C, d_c, memSize, cudaMemcpyDeviceToHost);
clock_t end = clock();
double time_elapsed_in_seconds = (double)(end - start)/CLOCKS_PER_SEC;
for(int i=0; i<N*N; i+=1){
if(C[i]!=Cseq[i]){
printf("%f %f %d\n",Cseq[i],C[i], i);
}
//if(C[i]!= 4096.0){
//printf("C[i]:%f, i:%d", C[i], i);
//}
}
// for(int i=1; i<3; i++){
printf("execution time for N = %d was %f\n", N, time_elapsed_in_seconds);
// }
return 0;
}
|
19,809 | #include "includes.h"
__global__ void LreluBackward(float* srcDiff, float* dstDiff, float* srcData, int data_size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < data_size; i += num_threads)
{
int index = i + thread_index;
if(index < data_size)
{
dstDiff[index] = srcDiff[index] * ((srcData[index] > 0) + (srcData[index] <= 0) * 0.01);
}
}
} |
19,810 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 8
__global__ void reduceVector (float *d_a) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
d_a[i] = d_a[i] + d_a[i+(N/2)];
__syncthreads();
if (threadIdx.x<(blockDim.x/2))
d_a[i] = d_a[i] +d_a[i+(N/4)];
__syncthreads();
if (threadIdx.x<(blockDim.x/4))
d_a[i] = d_a[i] +d_a[i+(N/8)];
}
int main () {
int memsize = N*sizeof(float);
float *a = (float *) malloc (memsize);
float resultado =0.0f;
float *d_a;
cudaMalloc (&d_a, memsize);
for (int i=0;i<N; ++i){
a[i]=1.0f;
}
cudaMemcpy (d_a, a, memsize, cudaMemcpyHostToDevice);
dim3 block (1);
dim3 thread (N/2);
reduceVector<<< block, thread>>> (d_a);
cudaMemcpy (&resultado,d_a, sizeof(float), cudaMemcpyDeviceToHost);
printf ("%f",resultado);
printf ("\n");
}
|
19,811 | #include <iostream>
#include <string>
#include <iomanip>
#include <ctime>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
const string ALPHABET_SET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
// device utility functions
__device__
int c_strlen(char *string)
{
int count = 0;
while (string[count] != '\0') {
++count;
}
return count;
}
//kernel
// __global__
// void random_password(char* pass, char* alphabet_set, unsigned int seed)
// {
// extern __shared__ char alphabet[];
// char test[10];
// int passLen = c_strlen(pass);
// int a_l = c_strlen(alphabet_set);
// for (int i = 0; i<a_l; i++)
// alphabet[i] = alphabet_set[i];
// // int digit[8];
// // digit[0] = blockIdx.x;
// printf("Block ID: %d\n", blockIdx.x);
// printf("Thread ID: %d\n", threadIdx.x);
// // for radom index from alphabet set range
// curandState_t state;
// int rand;
// curand_init(seed,0,0,&state);
// for(int i = 0; i < passLen; i++){
// rand = curand(&state) % a_l;
// test[i] = alphabet[rand];
// }
// test[passLen] ='\0';
// for(int i = 0; i < passLen; i++){
// printf("%c", test[i]);
// }
// }
__global__
void bruteforce(char* pass, char* alphabet_set)
{
int passLen = c_strlen(pass);
int a_l = c_strlen(alphabet_set); // Alphabet length
if (passLen == 1){ //call kernel by <<<1,1>>
for (int i = 0; i < a_l; i++){
printf("%c\n",alphabet_set[i]);
}
}
else if (passLen == 2){ //call kernel by <<<1,len>>>
for (int i = 0; i < a_l; i++){
printf("%c%c\n",alphabet_set[i],
alphabet_set[threadIdx.x]);
}
}
else if (passLen == 3){ //call kernel by <<<len,len>>>
for (int i = 0; i < a_l; i++){
printf("%c%c%c\n", alphabet_set[i],
alphabet_set[threadIdx.x],
alphabet_set[(int)(blockIdx.x % a_l)]);
}
}
else if (passLen == 4){ //call kernel by <<<len^2,len>>>
for (int i = 0; i < a_l; i++){
printf("%c%c%c%c\n", alphabet_set[i],
alphabet_set[threadIdx.x],
alphabet_set[(int)(blockIdx.x % a_l)],
alphabet_set[(int)((blockIdx.x / a_l ) % a_l)]);
}
}
else if (passLen == 5){ //call kernel by <<<len^3,len>>>
for (int i = 0; i < a_l; i++){
printf("%c%c%c%c%c\n", alphabet_set[i],
alphabet_set[threadIdx.x],
alphabet_set[(int)(blockIdx.x % a_l)],
alphabet_set[(int)((blockIdx.x / a_l ) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l)) % a_l)]);
}
}
else if (passLen == 6){ //call kernel by <<<len^4,len>>>
for (int i = 0; i < a_l; i++){
printf("%c%c%c%c%c%c\n", alphabet_set[i],
alphabet_set[threadIdx.x],
alphabet_set[(int)(blockIdx.x % a_l)],
alphabet_set[(int)((blockIdx.x / a_l ) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l)) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l*a_l)) % a_l)]);
}
}
else if (passLen == 7){ //call kernel by <<<len^5,len>>>
for (int i = 0; i < a_l; i++){
printf("%c%c%c%c%c%c%c\n", alphabet_set[i],
alphabet_set[threadIdx.x],
alphabet_set[(int)(blockIdx.x % a_l)],
alphabet_set[(int)((blockIdx.x / a_l ) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l)) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l*a_l)) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l*a_l*a_l)) % a_l)]);
}
}
else if (passLen == 8){ //call kernel by <<<len^6,len>>>
for (int i = 0; i < a_l; i++){
printf("%c%c%c%c%c%c%c%c\n", alphabet_set[i],
alphabet_set[threadIdx.x],
alphabet_set[(int)(blockIdx.x % a_l)],
alphabet_set[(int)((blockIdx.x / a_l ) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l)) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l*a_l)) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l*a_l*a_l)) % a_l)],
alphabet_set[(int)((blockIdx.x / (a_l*a_l*a_l*a_l*a_l)) % a_l)]);
}
}
}
//driver code
int main()
{
cudaEvent_t start, stop; //timer
float ms;
cudaEventCreate(&start);
cudaEventCreate(&stop);
string password;
cout << "Please enter password: ";
cin >> password;
char* d_pass;
char* d_alphabet_set;
cudaMalloc((void**)&d_pass, sizeof(char)*password.length() + 1);
cudaMalloc((void**)&d_alphabet_set, sizeof(char)*ALPHABET_SET.length() + 1);
cudaMemcpy(d_pass, password.c_str(), sizeof(char)*password.length() + 1, cudaMemcpyHostToDevice);
cudaMemcpy(d_alphabet_set, ALPHABET_SET.c_str(), sizeof(char)*ALPHABET_SET.length() + 1, cudaMemcpyHostToDevice);
int blocksPerGrid;
int threadsPerBlock;
if (password.length() == 1){
blocksPerGrid = 1;
threadsPerBlock = 1;
}
else if (password.length() == 2){
threadsPerBlock = ALPHABET_SET.length();
blocksPerGrid = 1;
}
else if (password.length() == 3){
threadsPerBlock = ALPHABET_SET.length();
blocksPerGrid = ALPHABET_SET.length();
}
else {
threadsPerBlock = ALPHABET_SET.length();
blocksPerGrid = (int)std::pow((float)ALPHABET_SET.length(), password.length() - 2);
}
cout << blocksPerGrid << endl;
cudaEventRecord(start);
bruteforce<<<blocksPerGrid,threadsPerBlock>>>(d_pass, d_alphabet_set);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
cout << "All combination with password length: " << password.length() << " in: " << ms << " milliseconds." << endl;
return -1;
} |
19,812 | #include "includes.h"
// Include files
// Parameters
#define N_ATOMS 343
#define MASS_ATOM 1.0f
#define time_step 0.01f
#define L 10.5f
#define T 0.728f
#define NUM_STEPS 10000
const int BLOCK_SIZE = 1024;
//const int L = ;
const int scheme = 1; // 0 for explicit, 1 for implicit
/*************************************************************************************************************/
/************* INITIALIZATION CODE **********/
/*************************************************************************************************************/
__global__ void init_r(float* r, int N_cube){
int ix = threadIdx.x + blockDim.x* blockIdx.x;
int iy = threadIdx.y + blockDim.y* blockIdx.y;
int iz = threadIdx.z + blockDim.z* blockIdx.z;
int index = ix + iy*N_cube + iz * N_cube * N_cube;
if (ix < N_cube && iy < N_cube && iz<N_cube && index < N_ATOMS){
r[index] = L / 2.0 * (1.0 - float(2 * ix + 1) / N_cube);
r[index + N_ATOMS] = L / 2.0 * (1.0 - float(2 * iy + 1) / N_cube);
r[index + 2 * N_ATOMS] = L / 2.0 * (1.0 - float(2 * iz + 1) / N_cube);
}
} |
19,813 | #include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cuda_runtime.h>
using namespace std;
void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) {
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
float t = 0;
for (int k = 0; k < b_rows; k++) {
t += a[i*a_cols+k]*b[k*b_cols+j];
}
c[i*b_cols+j] = t;
}
}
}
void MatrixRandBin(float *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if ((float)rand()/RAND_MAX > 0.5) {
mat[i*cols+j] = 1.0f;
}else {
mat[i*cols+j] = -1.0f;
}
}
}
}
float MatrixCompare(float *a,float *b,int rows,int cols){
float err=0;
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
err+=abs(a[i*cols+j]-b[i*cols+j]);
}
}
return err;
}
__global__ void MatrixMul_device(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) {
int tix = threadIdx.x;
int tiy = threadIdx.y;
int bix = blockIdx.x;
int biy = blockIdx.y;
int bdx = blockDim.x;
int bdy = blockDim.y;
int gdx = gridDim.x;
int gdy = gridDim.y;
for (int i = tix; i < b_cols; i += bdx) {
float sum = 0;
for (int k = 0; k < a_cols; k++) {
sum += a[bix*a_rows+k]*b[k*b_cols+i];
}
c[bix*a_cols+i] = sum;
}
}
int main()
{
int Matrixsize=1000;
float *a_host;
float *a_device;
float *b_host;
float *b_device;
float *result_host;
float *result_device;
float *result_cpu;
a_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
b_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
result_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
result_cpu = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
srand(0);
MatrixRandBin(a_host,Matrixsize,Matrixsize);
MatrixRandBin(b_host,Matrixsize,Matrixsize);
cudaMalloc((void**)&a_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMalloc((void**)&b_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMalloc((void**)&result_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMemcpy(a_device,a_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice);
cudaMemcpy(b_device,b_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice);
cudaEvent_t start_device, stop_device;
float time_device;
cudaEventCreate(&start_device);
cudaEventCreate(&stop_device);
cudaEventRecord( start_device, 0 );
dim3 gridsize(1000,1,1);
dim3 blocksize(256,1,1);
MatrixMul_device<<<gridsize,blocksize>>>(a_device,Matrixsize,Matrixsize,b_device,Matrixsize,Matrixsize,result_device);
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaEventElapsedTime( &time_device, start_device, stop_device );
cudaEventDestroy( start_device );
cudaEventDestroy( stop_device );
cout<<"gputime="<<time_device<<"ms"<<endl;
cudaMemcpy(result_host, result_device,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyDeviceToHost);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(result_device);
clock_t start_host = clock();
MatrixMul_host(a_host,Matrixsize,Matrixsize,b_host,Matrixsize,Matrixsize,result_cpu);
cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl;
float err=MatrixCompare(result_cpu,result_host,Matrixsize,Matrixsize);
cout<<"err in gpu and cpu = "<<err<<endl;
}
|
19,814 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#define HOSTLEN 50
// Initial conditions
void initCoord(float *rA, float *vA, float *fA, \
float initDist, int nBod, int nI);
// Forces acting on each body
__global__ void forces(float *rA, float *fA, int nBod);
// Calculate velocities and update coordinates
__global__ void integration(float *rA, float *vA, float *fA, int nBod);
int main(int argc, const char * argv[]) {
int const nI = 32; // Number of bodies in X, Y and Z directions
int const nBod = nI*nI*nI; // Total Number of bodies
int const maxIter = 20; // Total number of iterations (time steps)
float const initDist = 1.0; // Initial distance between the bodies
float *rA, *rA_d; // Coordinates
float *vA, *vA_d; // Velocities
float *fA, *fA_d; // Forces
float time;
int iter;
cudaDeviceProp devProp;
cudaEvent_t start, stop;
rA = (float*)malloc(3*nBod*sizeof(float));
fA = (float*)malloc(3*nBod*sizeof(float));
vA = (float*)malloc(3*nBod*sizeof(float));
cudaMalloc((void**)&rA_d, 3*nBod*sizeof(float));
cudaMalloc((void**)&vA_d, 3*nBod*sizeof(float));
cudaMalloc((void**)&fA_d, 3*nBod*sizeof(float));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaGetDeviceProperties(&devProp, 0);
printf("Name of CUDA GPU: %s\n",devProp.name);
// Setup initial conditions
initCoord(rA, vA, fA, initDist, nBod, nI);
cudaEventRecord(start, 0);
cudaMemcpy(rA_d, rA, 3*nBod*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vA_d, vA, 3*nBod*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(fA_d, fA, 3*nBod*sizeof(float), cudaMemcpyHostToDevice);
// Main loop
for ( iter = 0; iter < maxIter; iter++ ) {
forces<<<nBod/512, 512>>>(rA_d, fA_d, nBod);
integration<<<3*nBod/512, 512>>>(rA_d, vA_d, fA_d, nBod);
}
cudaMemcpy(rA, rA_d, 3*nBod*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(vA, vA_d, 3*nBod*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(fA, fA_d, 3*nBod*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\nTotal time = %10.4f [sec]\n", time*1.E-3);
free(rA);
free(vA);
free(fA);
cudaFree(rA_d);
cudaFree(vA_d);
cudaFree(fA_d);
return 0;
}
// Initial conditions
void initCoord(float *rA, float *vA, float *fA, \
float initDist, int nBod, int nI)
{
int i, j, k;
float Xi, Yi, Zi;
float *rAx = &rA[ 0]; //----
float *rAy = &rA[ nBod]; // Pointers on X, Y, Z components of coordinates
float *rAz = &rA[2*nBod]; //----
int ii = 0;
memset(fA, 0.0, 3*nBod*sizeof(float));
memset(vA, 0.0, 3*nBod*sizeof(float));
for (i = 0; i < nI; i++) {
Xi = i*initDist;
for (j = 0; j < nI; j++) {
Yi = j*initDist;
for (k = 0; k < nI; k++) {
Zi = k*initDist;
rAx[ii] = Xi;
rAy[ii] = Yi;
rAz[ii] = Zi;
ii++;
}
}
}
}
// Forces acting on each body
__global__ void forces(float *rA, float *fA, int nBod)
{
int i, j;
float Xi, Yi, Zi;
float Xij, Yij, Zij; // X[j] - X[i] and so on
float Rij2; // Xij^2+Yij^2+Zij^2
float invRij2, invRij6; // 1/rij^2; 1/rij^6
float *rAx = &rA[ 0]; //----
float *rAy = &rA[ nBod]; // Pointers on X, Y, Z components of coordinates
float *rAz = &rA[2*nBod]; //----
float *fAx = &fA[ 0]; //----
float *fAy = &fA[ nBod]; // Pointers on X, Y, Z components of forces
float *fAz = &fA[2*nBod]; //----
float magForce; // Force magnitude
float const EPS = 1.E-10; // Small value to prevent 0/0 if i==j
i = blockDim.x*blockIdx.x + threadIdx.x;
Xi = rAx[i];
Yi = rAy[i];
Zi = rAz[i];
fAx[i] = 0.0;
fAy[i] = 0.0;
fAz[i] = 0.0;
for (j = 0; j < nBod; j++) {
Xij = rAx[j] - Xi;
Yij = rAy[j] - Yi;
Zij = rAz[j] - Zi;
Rij2 = Xij*Xij + Yij*Yij + Zij*Zij;
invRij2 = Rij2/((Rij2 + EPS)*(Rij2 + EPS));
invRij6 = invRij2*invRij2*invRij2;
magForce = 6.f*invRij2*(2.f*invRij6 - 1.f)*invRij6;
fAx[i]+= Xij*magForce;
fAy[i]+= Yij*magForce;
fAz[i]+= Zij*magForce;
}
}
// Integration of coordinates an velocities
__global__ void integration(float *rA, float *vA, float *fA, int nBod)
{
int i;
float const dt = 0.01; // Time step
float const mass = 1.0; // mass of a body
float const mdthalf = dt*0.5/mass;
i = blockDim.x*blockIdx.x + threadIdx.x;
rA[i]+= (vA[i] + fA[i]*mdthalf)*dt;
vA[i]+= fA[i]*dt;
}
|
19,815 | #include <stdio.h>
#define LIMIT 4
__global__ void cudabrot_kernel(unsigned char* buffer, unsigned int width, unsigned int height,
float cx, float cy, float scale) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int x = index % width;
int y = index / width;
if (index < width * height) {
float ax = cx + (x - width / 2.0f) * scale;
float ay = cy + (y - height / 2.0f) * scale;
float a1 = ax, b1 = ay;
float a2, b2;
int lp = 0;
while (!(lp > 255 || ((a1*a1 + b1*b1) > LIMIT))) {
lp++;
a2 = a1 * a1 - b1 * b1 + ax;
b2 = 2 * a1 * b1 + ay;
a1 = a2;
b1 = b2;
}
if (lp > 255) {
lp = 0;
}
__syncthreads();
// NOTE: OpenGL likes col-major
buffer[4 * (y * width + x) + 0] = (2*lp) % 256;
buffer[4 * (y * width + x) + 1] = (3*lp) % 256;
buffer[4 * (y * width + x) + 2] = (5*lp) % 256;
buffer[4 * (y * width + x) + 3] = 0xff;
}
}
void launch_cudabrot_kernel(unsigned char* buffer, unsigned int width, unsigned int height,
float cx, float cy, float scale) {
int num_threads = 256;
int total_threads = width * height;
int num_blocks = total_threads / num_threads;
num_blocks += ((total_threads % num_threads) > 0) ? 1 : 0;
cudabrot_kernel<<<num_blocks, num_threads>>>(buffer, width, height, cx, cy, scale);
cudaError e = cudaGetLastError();
if (e != cudaSuccess) {
printf("%s\n", cudaGetErrorString(e));
}
}
|
19,816 | #include <stdio.h>
#include <assert.h>
#define N 1000000
int main (int argc, char **argv){
int a_host[N], b_host[N];
int *a_device, *b_device;
int i;
// initialize data
for (i=0;i<N;i++) {
a_host[i]=i;
}
// allocate device memory
cudaMalloc((void**)&a_device,N*sizeof(int));
cudaMalloc((void**)&b_device,N*sizeof(int));
// transfer data onto the device, copy on device, transfer back
cudaMemcpy(a_device,a_host,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_device,a_host,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_host,b_device,N*sizeof(int),cudaMemcpyDeviceToHost);
// correctness check
for (i=0;i<N;i++) {
assert (a_host[i]==b_host[i]);
}
// free GPU memory
for (i=0;i<N;i++) {
cudaFree(a_device);
cudaFree(b_device);
}
return 0;
}
|
19,817 | __global__ void axpbyKernel(double* x, double* y, double a, double b, uint L){
uint stride = gridDim.x * blockDim.x;
uint t = threadIdx.x + blockIdx.x * blockDim.x;
for (uint i = t; i < L; i += stride){
x[i] = a * x[i] + b * y[i];
}
}
__global__ void axpbyyKernel(double* x, double* y,
double a, double b, uint L){
uint stride = gridDim.x * blockDim.x;
uint t = threadIdx.x + blockIdx.x * blockDim.x;
for (uint i = t; i < L; i += stride){
x[i] = a * x[i] + b * y[i] * y[i];
}
}
__global__ void gradModKernel(double* mhat, double* vhat, double* dev,
double eps, uint L){
uint stride = gridDim.x * blockDim.x;
uint t = threadIdx.x + blockIdx.x * blockDim.x;
for (uint i = t; i < L; i += stride){
dev[i] = mhat[i] / (sqrt(vhat[i]) + eps);
}
}
|
19,818 | /**
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
/*
* Modified to iterate the vector addition multiple times to use as
* benchmark/stress test for GPU locking using Cuda call-wrapping
* functions. The program's performance is dominated by memory copies
* between Host and Device using the copy engine (CE), while computation
* on the execution engine (EE) is signigicantly less time consuming than
* the copying.
*
* This version uses a user allocated stream and asynchronous memory
* copy operations (cudaMemcpyAsync()). Cuda kernel invocations on the
* stream are also asynchronous. cudaStreamSynchronize() is used to
* synchronize with both the copy and kernel executions. Host pinned
* memory was also added to better work with the extensive copy operations.
*
* A configurable number of user threads are created, each of which
* independently performs iterations of the vector addition and
* contend for GPU CE and EE resources. POSIX pthreads are used.
*
* The threading structure was originally written by Glenn Elliott
* for CUDA 6.0 (now written for CUDA 6.5)
*
* Modified by Don Smith, Department of Computer Science,
* University of North Carolina at Chapel Hill
* 2015
*/
// control number of iterations by count or elapsed time
#define MAX_LOOPS 10000 // iteration count
#define TIME_LENGTH 30 // elapsed time (seconds)
#define CUDA_CORES 192
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/syscall.h>
#include <pthread.h>
#include <cuda.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
// A few global variables that will be shared by all threads
// Used to synchronize start of thread loops after initialization
pthread_barrier_t worker_barrier;
// parameter defaults (see main() for more details)
bool verbose = false;
int sync_level = 2; //default -- process block
//use syscall to use kernel function to get thread ID (TID)
//glic has no wrapper for this!
inline pid_t gettid(void)
{
return syscall(__NR_gettid);
}
//to pass parameters to threads (see main() for more details)
struct worker_args
{
time_t runtime;
int id;
int cpu;
int sched_policy;
int sched_priority;
};
//to return iteration counts from threads
struct results
{
int id;
long ncompleted;
};
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/*
* Set up the scheduling policy and priority for calling process or thread
* Also includes currently disabled code to set CPU affinity
* for the thread (disabled becuase it causes an infinite loop
* on one of the cores when used on the TK1 with Linux SCHED_FIFO).
*
*/
void setsched(int policy, int priority, int cpu = -1)
{
int ret;
char pbuf[80];
struct sched_param params; //parameter for kernel call
memset(¶ms, 0, sizeof(params));
#ifdef FOOBAR
// This CPU affinity stuff DOES NOT work on TK1 with SCHED_FIFO
int ncpus;
cpu_set_t *cpu_set;
size_t sz;
if (cpu >= 0)
{
// in caller to specified CPU
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus <= cpu)
{
fprintf(stderr, "Bad CPU affinity value %d. (valid: [0,%d])\n", cpu, ncpus);
exit(-1);
}
cpu_set = CPU_ALLOC(ncpus);
sz = CPU_ALLOC_SIZE(ncpus);
CPU_ZERO_S(sz, cpu_set);
CPU_SET_S(cpu, sz, cpu_set);
ret = sched_setaffinity(gettid(), sz, cpu_set);
if (ret != 0)
{
perror("Failed to set CPU affinity");
exit(-1);
}
CPU_FREE(cpu_set);
}
#endif
if (SCHED_OTHER == policy) // Default Linux "fair" scheduler
{
// set SCHED_OTHER policy and interpret priority as a nice value
if (priority < -20 || priority > 19)
{
fprintf(stderr, "Bad SCHED_OTHER priority %d. (valid: [-20,19])\n", priority);
exit(-1);
}
sched_setscheduler(0, policy, ¶ms);
ret = setpriority(PRIO_PROCESS, gettid(), priority);
if (ret != 0)
{
sprintf(pbuf, "Failed to set NICE priority %d", priority);
perror(pbuf);
exit(-1);
}
}
else if (SCHED_FIFO == policy) // Linux Real-Time scheduling class
{
// set SCHED_FIFO policy and priority
if (priority > 99 || priority < 1)
{
fprintf(stderr, "Bad SCHED_FIFO priority %d. (valid: [1,99])\n", priority);
exit(-1);
}
params.sched_priority = priority;
ret = sched_setscheduler(0, policy, ¶ms);
if (ret != 0)
{
perror("Failed to set SCHED_FIFO");
exit(-1);
}
}
else
{
fprintf(stderr, "Unsupported sched policy: %d\n", policy);
exit(-1);
}
}
/*
* Entry point for worker pthreads
*
* Each thread is independently executing the embedded loop
* as fast as it can. Note that each thread wiil
* have its own copy of the input and output vectors and
* allocate its own device memory for the vectors.
*
*/
void* work(void* _args)
{
struct worker_args args = *(struct worker_args*)_args;
long count = 0, total_count = 0;
time_t start_time, now, elapsed;
int i;
// allocate struct to return results
struct results* r = (struct results*)malloc(sizeof(*r));
pid_t my_tid;
// set parameters for the vectorAdd GPU kernel
int numElements = CUDA_CORES / 2;
size_t size = numElements * sizeof(float); //16,000,000 bytes
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
int blocksPerGrid;
my_tid = gettid(); //each thread has a unique thread ID (TID)
free(_args);
// Output thread ID
switch (sync_level)
{
case 0:
printf("TID %d started > Synch Level is Spin\n", my_tid);
break;
case 1:
printf("TID %d started > Synch Level is Yield\n", my_tid);
break;
default:
printf("TID %d started > Synch Level is Block\n", my_tid);
}
// do this before any CUDA calls because the GPU driver
// signaling thread created to work with this thread
// will inherit this thread's priority
setsched(args.sched_policy, args.sched_priority, args.cpu);
// all threads use device 0
cudaSetDevice(0);
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// create a user defined stream
cudaStream_t my_stream;
cudaStreamCreate(&my_stream);
// Host allocations in pinned memory
// Allocate the host input vector A
err = cudaMallocHost((void **)&h_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate host vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the host input vector B
err = cudaMallocHost((void **)&h_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate host vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the host output vector C
err = cudaMallocHost((void **)&h_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate host vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Wait for all worker threads to be ready
// This ensures that all threads have completed the
// initialization steps before starting to iterate
// All should begin contending for CE and EE at
// approximately the same time
pthread_barrier_wait(&worker_barrier);
printf("TID %d Iterating Vector Add CUDA Kernel for %d seconds, %d max loops\n", my_tid, TIME_LENGTH, MAX_LOOPS);
start_time = now = time(NULL);
for (i = 0;
((now - TIME_LENGTH) < start_time) &&
i < MAX_LOOPS; i++) {
// copy the A and B vectors from Host to Device memory
// these calls are asynchronous so only the lock of CE can be handled in the wrapper
err = cudaMemcpyAsync(d_A, h_A, size, cudaMemcpyHostToDevice, my_stream);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
err = cudaMemcpyAsync(d_B, h_B, size, cudaMemcpyHostToDevice, my_stream);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
vectorAdd<<<blocksPerGrid, threadsPerBlock, 0, my_stream>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// synchronize with the stream after kernel execution
// the wrapper for this function releases any lock held (EE here)
cudaStreamSynchronize(my_stream);
// copy the result vector from Device to Host memory
// this call is asynchronous so only the lock of CE can be handled in the wrapper
err = cudaMemcpyAsync(h_C, d_C, size, cudaMemcpyDeviceToHost, my_stream);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
count++;
total_count++;
now = time(NULL);
} // ends for loop
elapsed = now - start_time;
// Verify that the result vector is correct
// This verification is applied only to the
// last result computed
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("TID %d Test PASSED\n", my_tid);
printf("TID %d completed %ld, duration %ld seconds\n", my_tid, total_count, elapsed);
// Free device global memory for inputs A and B and result C
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory that was pinned
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_C);
// clean up the user allocated stream
cudaStreamSynchronize(my_stream);
cudaStreamDestroy(my_stream);
// Wait for all threads to complete
// so all iteration counts are complete
pthread_barrier_wait(&worker_barrier);
r->id = args.id;
r->ncompleted = total_count; // return iteration count from this thread
pthread_exit(r);
}
#define OPTSTR "b:n:s:rvf"
int main(int argc, char* argv[])
{
// set default parameter values
int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
bool realtime = false; // use Linux SCHED_OTHER scheduling class
bool flat = false; // use different priority per thread
time_t runtime = TIME_LENGTH; // time for iterating
int nthreads = 4; // 4 threads (== cores on TK1)
int baseprio = 19; // nice value for SCHED_OTHER
pthread_t* workers;
struct worker_args *wargs;
struct results *r;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
long throughput = 0;
int opt;
while ((opt = getopt(argc, argv, OPTSTR)) != -1)
{
switch(opt)
{
case 'b': // priority for threads. by default, each thread is assigned baseprio+i priority
baseprio = atoi(optarg); //base is nice value for SCHED_OTHER, or the real-time priority
//(1-99) where 1 is lowest for SCHED_FIFO
break;
case 's': // set to control GPU synchronization with host process/thread
sync_level = atoi(optarg);
// level 0 - spin polling (busy waiting) for GPU to finish
// level 1 - yield each time through the polling loop to let another thread run
// level 2 - block process waiting for GPU to finish
break;
case 'r': // use SCHED_FIFO policy and interpret priority as a SCHED_FIFO priority.
// otherwise, use SCHED_OTHER and interpret priority as a nice value.
realtime = true;
break;
case 'f': // make all threads have same priority
flat = true;
break;
case 'n': // number of CPU worker threads
nthreads = atoi(optarg);
break;
case 'v':
verbose = true;
break;
}
}
// require a runtime as the last parameter (positional)
if (argc - optind < 1)
{
fprintf(stderr, "Missing runtime argument (seconds).\n");
exit(-1);
}
runtime = atoi(argv[optind + 0]);
printf("Test: %d threads, %s, for %ld seconds.\n", nthreads, (realtime)? "SCHED_FIFO" : "SCHED_OTHER", runtime);
// Set main priority before the CUDA runtime has a chance to spawn any signaling threads.
// This way, those CUDA threads will adopt the policy and priority we set here.
// Note that this prioritization is set up only for SCHED_FIFO
if (realtime)
{
// set the main process scheduling class and priority
setsched(SCHED_FIFO, baseprio+nthreads+1); // +1 over highest worker thread
// so ALL signals will have highest priority
}
// set the device flags for CPU and GPU program synchronization
// before CUDA runtime is initialized
switch (sync_level)
{
case 0:
cudaSetDeviceFlags(cudaDeviceScheduleSpin);
break;
case 1:
cudaSetDeviceFlags(cudaDeviceScheduleYield);
break;
default:
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
}
// force initialization of cuda runtime
cuInit(0);
cudaFree(0); // used here to invoke initialization of GPU locking
pthread_barrier_init(&worker_barrier, 0, nthreads);
// set parameters for threads and create them
workers = (pthread_t*)malloc(sizeof(pthread_t)*nthreads);
for (int i = 0; i < nthreads; i++)
{
wargs = (struct worker_args*)malloc(sizeof(*wargs));
wargs->runtime = runtime; // time for iterations
wargs->id = i;
wargs->cpu = i % ncpus; // distribute threads among CPUs
if (realtime)
{
wargs->sched_policy = SCHED_FIFO;
// assign an increasing priority (unless 'flat' is true)
wargs->sched_priority = (!flat) ? baseprio + i + 1 : baseprio;
}
else
{
wargs->sched_policy = SCHED_OTHER;
// assign a decreasing priority, becoming less nice (unless 'flat' is true)
wargs->sched_priority = (!flat) ? baseprio - i*4 : baseprio;
}
pthread_create(&workers[i], 0, work, wargs);
}
// Wait for threads to complete and print out statistics. Worker
// threads collectively wait on a barrier before exiting, so all
// threads will be done with GPU work by the time the first call
// to pthread_join() returns.
for (int i = 0; i < nthreads; i++)
{
pthread_join(workers[i], (void**)&r);
throughput += r->ncompleted;
free(r);
}
fprintf(stdout, "total 'frames': %lu\n", throughput);
free(workers);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
19,819 | #include "includes.h"
__global__ void euclideanDistanceCuda(float3* pDotProducts, size_t pSize, float* results) {
int instance = blockIdx.x * blockDim.x + threadIdx.x;
while (instance < pSize) {
results[instance] = pDotProducts[instance].x - 2*pDotProducts[instance].y + pDotProducts[instance].z;
if (results[instance] < 0.0) results[instance] = 0.0;
instance += gridDim.x;
}
} |
19,820 | #include "includes.h"
__global__ void fp_bias_fc(float *preact, float *bias, const int n_channel)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = n_channel;
for (int idx = N * pos / totalPos; idx < N * (pos+1) / totalPos; ++idx) {
preact[idx] += bias[idx];
}
} |
19,821 | //THRUST
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/random.h>
//STL
#include <iostream>
#include <iomanip>
// define a 4d float vector
typedef thrust::tuple< float, float, float, float > vec4;
// return a random vec4 in [0,1)^2
vec4 make_random_vec4( void )
{
static thrust::default_random_engine rng;
static thrust::uniform_real_distribution< float > u01( 0.0f, 1.0f );
float x1D = u01( rng );
float y2D = u01( rng );
float z3D = u01( rng );
float x4D = u01( rng );
return vec4( x1D, y2D, z3D, x4D );
}
int main ( void )
{
const size_t N = 1000000;
// allocate some random points on the host
thrust::host_vector<vec4> h_5Dpoints( N );
thrust::generate( h_5Dpoints.begin(), h_5Dpoints.end(), make_random_vec4 );
std::cout << "The x4D[ 0 ] of h_5Dpoints is " << thrust::get< 3 >( h_5Dpoints[0] ) << std::endl;
// transfer to device
thrust::device_vector< vec4 > d_5Dpoints = h_5Dpoints;
vec4 p = d_5Dpoints[ 0 ]; std::cout << "The x4D[ 0 ] of d_5Dpoints is " << thrust::get< 3 >( p ) << std::endl;
std::cout << "The x4D[ 0 ] of d_5Dpoints is " << thrust::get< 3 >( vec4( d_5Dpoints[ 0 ] ) ) << std::endl;
return 0;
}
|
19,822 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__device__ double2 pow(double2 a, int b){
double r = sqrt(a.x*a.x + a.y*a.y);
double theta = atan(a.y / a.x);
return{pow(r,b)*cos(b*theta),pow(r,b)*sin(b*theta)};
}
__global__ void scalarPow(double2* in, double param, double2* out){
unsigned int gid = getGid3d3d();
double2 result;
result.x = pow(in[gid].x, param);
result.y = pow(in[gid].y, param);
out[gid] = result;
} |
19,823 | #include "includes.h"
__global__ void externSet(int* variablesMem,int* lastValuesMem, int nQueen,int nVariableCollection){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < nVariableCollection*nQueen*nQueen){
variablesMem[index] = 1;
if(index < nVariableCollection*nQueen)
lastValuesMem[index] = 0;
}
} |
19,824 | #include <iostream>
#include <cuda_runtime.h>
using namespace std;
// Derived class
class Rectangle
{
public:
Rectangle()
{
cudaMallocManaged(&width, sizeof(int));
cudaMallocManaged(&height, sizeof(int));
//width = (int *)malloc(sizeof(int));
//height = (int *)malloc(sizeof(int));
}
int getArea()
{
return (*width * *height);
}
int* width;
int* height;
};
// Base class
class Shape
{
public:
Shape()
{
cudaMallocManaged(&rect, sizeof(Rectangle));
*(rect->width) = 20;
*(rect->height) = 10;
}
Rectangle* rect;
};
__global__ void change_width(Shape* sha)
{
*(sha->rect->width) = 10;
return;
}
int main(void)
{
Shape* sha;
cudaMallocManaged(&sha,sizeof(Shape));
//Shape* sha = new Shape();
change_width<<<1,1,0>>>(sha);
cudaDeviceSynchronize();
// Print the area of the object.
cout << "Total area: " << sha->rect->getArea() << endl;
return 0;
}
|
19,825 | #include <stdlib.h>
#include <stdio.h>
#define a(i,l) A[(i)*k + (l)]
#define b(l,j) B[(l)*n + (j)]
#define c(i,j) C[(i)*n + (j)]
#define BLOCK_SIZE 16
#define num_el 4
// Declarations
extern "C" {
void matmult_gpu1(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu2(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu3(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu4(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu5(int m, int n, int k,double *h_A,double *h_B,double *h_C);
}
__global__ void matmult1(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult2(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult3(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult4(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult5(int m, int n, int k,double *A,double *B,double *C);
void matmult_gpu1(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
dim3 dimBlock(1, 1, 1); // Num threads
dim3 dimGrid(1, 1, 1); // Num blocks
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, size_C);
matmult1<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult1(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
for (i = 0; i<m; i++) {
for (l = 0; l<k; l++) {
for (j = 0; j<n; j++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
}
}
}
}
void matmult_gpu2(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid(ceil((double)n/dimBlock.x), ceil((double)m/dimBlock.y), 1); // Num blocks
//printf("x: %d, y: %d, z: %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, size_C);
matmult2<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult2(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = blockIdx.y * blockDim.y + threadIdx.y;
double C_reg = 0.0;
if (i < m && j < n) {
for (l = 0; l<k; l++) {
C_reg = C_reg + a(i,l) * b(l,j);
}
c(i,j) = C_reg;
}
}
void matmult_gpu3(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y) / 2), 1); // Num blocks
//printf("x: %d, y: %d, z: %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, size_C);
matmult3<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult3(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
double C_reg[2] = {0.0};
if (i < m-1 && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
C_reg[1] = C_reg[1] + a(i+1,l) * b(l,j);
}
//Copy back to global memory
c(i,j) = C_reg[0];
c(i+1,j) = C_reg[1];
} else if (i == m-1 && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
}
//Copy back to global memory
c(i,j) = C_reg[0];
}
/*
j = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < m && j < n-1) {
for (l = 0; l<k; l++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
c(i,j+1) = c(i,j+1) + a(i,l) * b(l,j+1);
}
} else if (i < m && j == n-1) {
for (l = 0; l<k; l++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
}
}
*/
}
void matmult_gpu4(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y) / num_el), 1); // Num blocks
cudaMemset(d_C, 0, size_C);
matmult4<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult4(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l,s;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = (blockIdx.y * blockDim.y + threadIdx.y) * num_el;
double C_reg[num_el] = {0.0};
if (i < m-num_el && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
C_reg[1] = C_reg[1] + a(i+1,l) * b(l,j);
C_reg[2] = C_reg[2] + a(i+2,l) * b(l,j);
C_reg[3] = C_reg[3] + a(i+3,l) * b(l,j);
}
c(i,j) = C_reg[0];
c(i+1,j) = C_reg[1];
c(i+2,j) = C_reg[2];
c(i+3,j) = C_reg[3];
} else if (i >= m-num_el && j < n) {
for (s = i; s<m; s++) {
for (l = 0; l<k; l++) {
C_reg[s-i] = C_reg[s-i] + a(s,l) * b(l,j);
}
c(s,j) = C_reg[s-i];
}
}
}
void matmult_gpu5(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y)), 1); // Num blocks
cudaMemset(d_C, 0, size_C);
matmult5<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult5(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
double Cvalue = 0;
double * Asub, *Bsub, *Csub;
i = threadIdx.y;
j = threadIdx.x;
int k_blocked = (k/BLOCK_SIZE);
//printf("k_blocked %d\n", k_blocked);
//Get Csub matrix
//k is here the A.stride
Csub = &C[n*BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x];
for (l = 0; l<k_blocked; l++) {
//Shared memory to store sub-matrices of A and B
__shared__ double as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double bs[BLOCK_SIZE][BLOCK_SIZE];
Asub = &A[k*BLOCK_SIZE * blockIdx.y +
BLOCK_SIZE * l];
Bsub = &B[n*BLOCK_SIZE * l +
BLOCK_SIZE * blockIdx.x];
as[threadIdx.y][threadIdx.x] = Asub[k*i + j];
bs[threadIdx.y][threadIdx.x] = Bsub[n*i + j];
__syncthreads();
//Multiply sub matrices
for(int e = 0; e < BLOCK_SIZE; ++e){
Cvalue += as[i][e] * bs[e][j];
}
__syncthreads();
}
//Write back to global memory somehow
//A.elements[row * A.stride + col] = value
Csub[i*n+j] = Cvalue;
}
|
19,826 | #include <cstdio>
using namespace std;
__global__ void
foo_kernel(int step)
{
printf("loop: %d\n", step);
}
int main()
{
int n_loop = 5;
// execute kernels with the default stream
for (int i = 0; i < n_loop; i++)
foo_kernel<<< 1, 1, 0, 0 >>>(i);
cudaDeviceSynchronize();
return 0;
} |
19,827 | #include <stdio.h>
//Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line){
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit (EXIT_FAILURE);
}
}
//Has to be defined in the comppilation in order to get the correct value of the
//macro __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
//Device code
__global__ void empty_k(void){
}
//Host code
int main (void){
int count;
cudaDeviceProp prop;
empty_k<<<1,1>>>();
testCUDA(cudaGetDeviceCount (&count));
printf("The number of devices available is %i GPUs \n", count);
testCUDA(cudaGetDeviceProperties(&prop, count -1));
printf("Name: %s\n", prop.name);
printf("Global memory size in octet (bytes): %u\n", prop.totalGlobalMem);
printf("Shared memeory size per block: %ld\n", prop.sharedMemPerBlock);
printf("Number of registers per block: %i\n", prop.regsPerBlock);
printf("Number of threads in a warp: %i\n", prop.warpSize);
printf("Maximum number of threads that can be launched per block: %i\n", prop.maxThreadsPerBlock);
printf("Maximum number of threads that can be launched: %i x %i x %i\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
printf("Maximum number GridSize: %i X %i X %i\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Total constant memory size: %ld\n", prop.totalConstMem);
printf("Clock rate; %i\n", prop.clockRate);
return 0;
}
|
19,828 | #include <stdio.h>
#include <iostream>
#define MAX_THREADS 128
using namespace std;
const int threshold =25;
__global__ void bubble_sort(int *a, int left, int right)
{
int temp;
for(int i=left;i<right;i++)
for(int j=i+1;j<=right;j++)
if(a[i]>a[j])
{
temp=a[i];
a[i]=a[j];
a[j]=temp;
}
}
__global__ void partition(int *a,int left,int right,int pivot,int *al,int *ah)
{
int l,h;
int size=(right-left+1);
int k1=threadIdx.x*size+left;
int k2=k1+size-1;
if(threadIdx.x==MAX_THREADS-1)
k2=right;
l=h=k1;
for(int i=k1;i<=k2;i++)
{
al[i]=ah[i]=-999;
}
for(int i=k1;i<=k2;i++)
{
if(a[i]<pivot)
{
al[l++]=a[i];
}
else
{
if(a[i]>pivot)
{
ah[h++]=a[i];
}
}
}
}
void quicksort(int *a, const int left, const int right)
{
if (right-left <= threshold)
{
int *ad;
cudaMalloc((void **)&ad,(right-left+1)*sizeof(int));
cudaMemcpy(ad,a,(right-left+1)*sizeof(int),cudaMemcpyHostToDevice);
bubble_sort<<<1,1>>>(ad, left, right);
cudaMemcpy(a,ad,(right-left+1)*sizeof(int),cudaMemcpyDeviceToHost);
return;
}
int pivot = a[left];
int size = (right-left+1)*sizeof(int);
int *al,*ah,*ad;
cudaMalloc((void **)&ad,size);
cudaMalloc((void **)&al,size);
cudaMalloc((void **)&ah,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
partition<<<1,MAX_THREADS>>>(ad,left,right,pivot,al,ah);
int al_h[right-left+1],ah_h[right-left+1];
cudaMemcpy(al_h,al,size,cudaMemcpyDeviceToHost);
cudaMemcpy(ah_h,ah,size,cudaMemcpyDeviceToHost);
int i=0,k=0;
while(i<right-left+1)
{
while(al_h[i]==-999 && i<right-left+1)
i++;
while(al_h[i]!=-999 && i<right-left+1)
{
al_h[k++]=al_h[i++];
}
}
quicksort(al_h,0,k-1);
int p=left;
int x=0;
while(x<k)
{
a[p++]=al_h[x++];
}
a[p]=pivot;
i=0;
k=0;
while(i<right-left+1)
{
while(ah_h[i]==-999 && i<right-left+1)
i++;
while(ah_h[i]!=-999 && i<right-left+1)
{
ah_h[k++]=ah_h[i++];
}
}
quicksort(ah_h,0,k-1);
i=0;
p++;
while(i<k)
{
a[p++]=ah_h[i++];
}
}
int main()
{
int len,x,flag,choice;
cout<<"ENTER SIZE OF ARRAY :"<<endl;
cin>>len;
int input_data[len];
cout<<"*********** MENU ***************"<<endl;
cout<<"1. INPUT USER DATA "<<endl;
cout<<"2. RANDOM GENERATOR "<<endl;
cout<<"ENTER YOUR CHOICE.......... "<<endl;
cin>>choice;
switch(choice)
{
case 1:
for (int i = 0 ; i < len ; i++)
{
cout<<"ENTER ELEMENTS :" <<endl;
cin>>input_data[i];
}
cout<<"ORIGINAL ARRAY :"<<endl;
for(int i=0;i<len;i++)
cout<<input_data[i]<<"\t";
cout<<endl;
quicksort(input_data,0,len-1);
cout<<"AFTER SORTING "<<endl;
for(int i=0;i<len;i++)
cout<<input_data[i]<<"\t";
cout<<endl;
break;
case 2:
for (int i = 0 ; i < len ; i++)
{
x=rand()%len;
flag=0;
for(int j=0;j<i;j++)
{
if(input_data[j]==x)
{
i--;
flag=1;
break;
}
}
if(flag==0)
input_data[i]=x;
}
cout<<"ORIGINAL ARRAY :"<<endl;
for(int i=0;i<len;i++)
cout<<input_data[i]<<"\t";
cout<<endl;
quicksort(input_data,0,len-1);
cout<<"AFTER SORTING "<<endl;
for(int i=0;i<len;i++)
cout<<input_data[i]<<"\t";
cout<<endl;
break;
}
return 0;
}
|
19,829 | class Complex
{
public:
float r;
float i;
__host__ __device__ Complex() : r(0), i(0) {}
__host__ __device__ Complex( float a, float b ) : r(a), i(b) {}
__host__ __device__ Complex(const Complex& x) : r(x.r), i(x.i) {}
__host__ __device__ float magnitude2( void ) {
return r * r + i * i;
}
__host__ __device__ Complex operator*(const Complex& a) {
return Complex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__host__ __device__ Complex operator+(const Complex& a) {
return Complex(r+a.r, i+a.i);
}
void Print();
};
class RGB // RGB class to define R, G, B coordinates
{
public:
RGB() : r(0), g(0), b(0) {}
RGB(double r0, double g0, double b0) : r(r0), g(g0), b(b0) {}
public:
double r;
double g;
double b;
};
class Memory // Class memory to store minC, maxC values after zooming in. Used in back button
{
public:
float minC_r, minC_i, maxC_r, maxC_i;
Memory(float a, float b, float c, float d) : minC_r(a), minC_i(b), maxC_r(c), maxC_i(d) {}
};
struct Position // Structure for using mouse click
{
Position() : x(0), y(0) {}
float x, y; // X and Y coordinates of the mouse click
}; |
19,830 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
// INSERT KERNEL(S) HERE
__global__ void histo_kernel(unsigned int *buffer, long size, unsigned int *histo, unsigned int num_bins)
{
extern __shared__ unsigned int histo_private[];
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
// stride is total number of threads
unsigned int stride = blockDim.x * gridDim.x;
// All threads handle blockDim.x * gridDim.x
// consecutive elements
//inititialize private histogram
for (int j = 0; j < (num_bins-1)/blockDim.x+1; ++j)
if (blockDim.x*j+threadIdx.x<num_bins)
histo_private[blockDim.x*j+threadIdx.x]=0;
__syncthreads();
//populate private histogram
while (i < size) {
atomicAdd(&(histo_private[buffer[i]]), 1);
i += stride;
}
__syncthreads();
//Transfer data from shared memories to global memory
for (int k = 0; k < (num_bins-1)/blockDim.x+1; ++k)
if (blockDim.x*k+threadIdx.x<num_bins)
atomicAdd(&(histo[blockDim.x*k+threadIdx.x]),
histo_private[blockDim.x*k+threadIdx.x]);
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
const int BLOCK_SIZE = 512;
histo_kernel<<<(num_elements-1)/BLOCK_SIZE+1,BLOCK_SIZE,num_bins*sizeof(unsigned int)>>>(input,num_elements,bins,num_bins);
}
|
19,831 | extern "C"{
const int Tile_width = 16;
// Kernel to Perform median filter operation on a image using Shared memory
__global__ void median_filter_shared(unsigned char* imaged, unsigned char* outputImaged,int width,int height ){
__shared__ unsigned char images[Tile_width+2][Tile_width+2]; // Creating a shared memory element for each block
int row = blockIdx.y * Tile_width + threadIdx.y; //Current operating row
int col = blockIdx.x * Tile_width + threadIdx.x; // Current operating colum
int x = threadIdx.x; // indicates the thread id in the x direction within the block, in other words x indicates the current column within the block
int y = threadIdx.y; // indicates the thread id in the y direction within the block, in other words y indicates the current row within the block
unsigned char temp[9];
if(row<height && col < width){
// Making all borders in shared memory element to be zero
if( x == 0 ){ //
images [y+1][x] = 0;
if( y == 0)
images[y][x] = 0;
if(y == Tile_width -1)
images[y+2] [x]= 0;
}
if(x == Tile_width -1){
images[x+2][y+1] = 0;
if(y==0)
images[y][x+2] = 0;
if(y == 15)
images[y+2][x+2] = 0;
}
if( y==0 )
{
images[y] [x+1]= 0;
}
if( y==15)
{
images[y+2][x+1]=0;
}
__syncthreads();
images[y+1][x+1] = imaged[row*width + col]; // Copies the respective elemnts from global memory to shared memory.
__syncthreads();
// The following set of code below copies the respective border elements for each shared memory variable from the Global memory.
if( x == 0 && col>0 && col < width ){
images [y+1] [x]= imaged[row *width +(col -1)]; // Copies elements to row 1 to 16 in column 0 in shared memory variable from Global memory [w.r.t 3 x 3 filter and Tile_width=16].
if( y == 0)
images[y] [x]= imaged[(row-1)*width + (col-1)]; // Copies element to the row 0 in column 0 in shared memory variable from global memory[w.r.t 3 x 3 filter and Tile_width=16] .
if(y == Tile_width -1)
images[y+2][x] = imaged[(row+1)*width + (col-1)] ;// Copies element to the row 17 in column 0 in shared memory variable from global memory[w.r.t 3 x 3 filter and Tile_width=16].
}
if(x == Tile_width -1 && col>0 && col < width ){
images[y+1][x+2] = imaged[row * width + (col+1)]; // Copies elements to row 1 to 16 in column 17 in shared memory variable from Global memory[w.r.t 3 x 3 filter and Tile_width=16].
if(y==0)
images[y] [x+2]= imaged[(row-1) * width + (col+1)];// Copies element to the row 0 in column 17 in shared memory variable from global memory[w.r.t 3 x 3 filter and Tile_width=16].
if(y == 15)
images[y+2][x+2] = imaged[(row+1) * width + (col+1)];// Copies element to the row 17 in column 17 in shared memory variable from global memory[w.r.t 3 x 3 filter and Tile_width=16].
}
if( y==0 && row >0 && row < height)
{
images[y][x+1] = imaged[(row-1) * width + col];// Copies elements to col 1 to 16 in row 0 in shared memory variable from Global memory[w.r.t 3 x 3 filter and Tile_width=16].
}
if( y==15 && row >0 && row < height )
{
images[y+2][x+1]= imaged[(row+1) * width + col];// Copies elements to col 1 to 16 in row 17 in shared memory variable from Global memory[w.r.t 3 x 3 filter and Tile_width=16].
}
__syncthreads();
// Copies the filter values for a pixels
temp[0] = images[y][x];
temp[1] =images [y+1][x];
temp[2] = images[y+2][x];
temp[3] = images[y][x+1];
temp[4] = images[y+1][x+1];
temp[5] = images[y+2][x+1];
temp[6] = images [y][x+2];
temp[7] = images[y+1][x+2];
temp[8] = images[y+2][x+2];
__syncthreads();
// Replication of border pixels
if(row == 0 || row == height-1 || col == 0 || col == width -1)
{
for(int i=0; i < sizeof(temp); i++)
{
temp[i]=imaged[row*width + col];
}
}
//Bubble sort to find the median value
for (int k = 0; k < sizeof(temp); k++) {
for (int l = k+1; l < sizeof(temp); l++) {
if (temp[k] > temp[l]) {
unsigned char temp1 = temp[k];
temp[k] = temp[l];
temp[l] = temp1;
}
}
}
outputImaged[row * width +col] = temp[4];
}
}
// Kernel to Perform median filter operation on a image using Global memory
__global__ void median_filter_global(unsigned char* imaged, unsigned char* outputImaged,int width,int height ){
// Calculates the row and column indices of matrices
int row = blockIdx.y * Tile_width + threadIdx.y;
int col = blockIdx.x * Tile_width + threadIdx.x;
unsigned char temp[9]; // Storing of filter values
if(row<height && col < width){ // Limits the operating range within the range of image
if( (col ==0) || (row == 0) || (col == width-1) || (row == height-1)) // Replication of pixels for border conditions
{
for (int i=0; i<sizeof(temp);i++)
temp[i] = imaged[col+width*row];
}
// Finding the filter values for non border conditions.
else{
temp[0] = imaged[(col-1)+ width*(row-1)];
temp[1] =imaged[(col-1)+ width*row];
temp[2] = imaged[(col-1)+ width*(row+1)];
temp[3] =imaged[(col)+ width*(row-1)];
temp[4] = imaged[(col)+ width*row];
temp[5] = imaged[col+ width*(row+1)];
temp[6] = imaged[(col+1)+ width*(row-1)];
temp[7] = imaged[(col+1)+ width*row];
temp[8] = imaged[(col+1)+ width*(row+1)];
}
// Bubble sort for finding median value
for (int k = 0; k < sizeof(temp); k++) {
for (int l = k+1; l < sizeof(temp); l++) {
if (temp[k] > temp[l]) {
unsigned char temp1 = temp[k];
temp[k] = temp[l];
temp[l] = temp1;
}
}
}
outputImaged[row * width +col] = temp[4]; // Median value is copied to the output image.
}
}
} |
19,832 | #ifndef VORTEX_KERNEL_CU
#define VORTEX_KERNEL_CU
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void VortexIndicator(
double *device_arr, int symbol_count, int indic_len, int dataLength,
int time_len, int columns, double *device_outval, int win_size, double *error_val)
{
int blockID = blockIdx.y*gridDim.x + blockIdx.x; /// find block Id in 2D grid of 1D blocks
int idx = (threadIdx.x + (blockID*blockDim.x)); /// compute thread index for blockId
if (idx < indic_len)
{
int symbol_idx = idx / dataLength; /// find symbol index from thread Idx
int symbolOffset = symbol_idx * dataLength; /// compute each symbol start offset in 1D array
int date_idx = (idx - symbolOffset) / time_len;
int dateOffset = date_idx*time_len; /// compute each date start offset
int time_idx = idx - symbolOffset - dateOffset; /// compute time Idx for symbol and date from thread Idx
int index = symbolOffset + dateOffset + time_idx;
int cIdx = index * columns;
int outIdx = index * columns;
// skip all error values and values less then average win size
if(device_arr[cIdx] == *error_val || device_arr[cIdx+1] == *error_val ||
device_arr[cIdx+2] == *error_val ||(win_size == 0 && time_idx == 0))
{
device_outval[outIdx] = *error_val;
device_outval[outIdx+1] = *error_val;
device_outval[outIdx+2] = *error_val;
}
else
{
int count = 0;
double plusVM = 0;
double minusVM = 0;
double trueRange = 0;
double high = *error_val;
double low = *error_val;
for(int i=index; i >= symbolOffset; i--)
{
cIdx = i * columns;
if(device_arr[cIdx] != *error_val &&
device_arr[cIdx+1] != *error_val &&
device_arr[cIdx+2] != *error_val)
{
if(high != *error_val && low != *error_val)
{
plusVM += abs(high - device_arr[cIdx+1]); // current high - previous low
minusVM += abs(low - device_arr[cIdx]); // current low - previous high
double val = max(high-low, abs(high-device_arr[cIdx+2]));
trueRange += max(val, abs(low-device_arr[cIdx+2]));
count++;
}
high = device_arr[cIdx];
low = device_arr[cIdx+1];
if(count == win_size)
break;
}
}
if(count == win_size)
{
device_outval[outIdx] = trueRange;
device_outval[outIdx+1] = minusVM / trueRange;
device_outval[outIdx+2] = plusVM / trueRange;
}
else
{
device_outval[outIdx] = *error_val;
device_outval[outIdx+1] = *error_val;
device_outval[outIdx+2] = *error_val;
}
}
}
}
#endif |
19,833 | #include <stdio.h>
#include <assert.h>
const int radius = 4;
#define swap(x, y) { double t = (x); (x) = (y); (y) = (t); }
__inline__ __device__ void periodic_bc(double *u, const int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= radius) return;
// Copy the left data points [u4, u5, u6, u7] to the right buffer
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// o----o----o----o--|--o----o----o----o----o----o----o----o----o----o--|--o----o----o----o
//u0 u1 u2 u3 u4 u5 u4 u5 u6 u7
u[n - radius + idx] = u[radius + idx];
//
// Copy the right data points [u10, u11, u12, u13] to the left buffer
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// o----o----o----o--|--o----o----o----o----o----o----o----o----o----o--|--o----o----o----o
//u10 u11 u12 u13 u4 u5 u10 u11 u12 u13
u[idx] = u[n - 2 * radius + idx];
}
__global__ void heat_kernel(double *v, const double *u, const int n, const double update,
const double *a, const double dt, const double h) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
double coeff[] =
{-1.0 / 560,
8.0 / 315,
-1.0 / 5,
8.0 / 5,
-205.0 / 72,
8.0 / 5,
-1.0 / 5,
8.0 / 315,
-1.0 / 560};
for (int i = idx + radius; i < n - radius; i += blockDim.x * gridDim.x) {
double D2u = 0.0;
#pragma unroll
for (int j = 0; j < 2 * radius + 1; ++j)
D2u += coeff[j] * u[i + j - radius];
double alpha = dt / (h * h);
v[i] = update * u[i] + alpha * a[i] * D2u;
}
periodic_bc(v, n);
}
__global__ void test_periodic_bc(double *u, const int n) {
periodic_bc(u, n);
}
|
19,834 |
inline void create_beta_h(int *h_beta, int *h_left, int *h_rows, int *h_cols, int m, int nnz){
// Note: Index in h_rows and h_cols starts at 1
int *h_visited = (int*)malloc( sizeof(int) * m );
for(int i=0; i<m; i++) h_left[i] = -1;
for(int i=0; i<m; i++) h_visited[i] = 0;
for(int i=0; i<m; i++) h_beta[i] = -1;
for(int l=0; l<nnz; l++)
if (h_visited[h_rows[l]-1] == 0){
h_beta[h_cols[l]-1] = h_beta[h_cols[l]-1] > h_rows[l]-1 ? h_beta[h_cols[l]-1] : h_rows[l]-1;
h_visited[h_rows[l]-1] = 1;
h_left[h_rows[l]-1] = h_cols[l]-1;
}
free(h_visited);
}
inline void create_beta(int *d_beta, int *d_left, int *h_rows, int *h_cols, int m, int nnz){
int *h_beta = (int*)malloc( sizeof(int) * m );
int *h_left = (int*)malloc( sizeof(int) * m );
create_beta_h(h_beta, h_left, h_rows, h_cols, m, nnz);
cudaMemcpy(d_beta, h_beta, m*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_left, h_left, m*sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
free(h_beta);
free(h_left);
}
|
19,835 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
///////////////////////////////////////////////////////////////////////////////
#include <cufft.h>
#include <math_constants.h>
//Round a / b to nearest higher integer value
int cuda_iDivUp(int a, int b)
{
return (a + (b - 1)) / b;
}
// complex math functions
__device__
float2 conjugate(float2 arg)
{
return make_float2(arg.x, -arg.y);
}
__device__
float2 complex_exp(float arg)
{
return make_float2(cosf(arg), sinf(arg));
}
__device__
float2 complex_add(float2 a, float2 b)
{
return make_float2(a.x + b.x, a.y + b.y);
}
__device__
float2 complex_mult(float2 ab, float2 cd)
{
return make_float2(ab.x * cd.x - ab.y * cd.y, ab.x * cd.y + ab.y * cd.x);
}
// generate wave heightfield at time t based on initial heightfield and dispersion relationship
__global__ void generateSpectrumKernel(float2* h0,
float2 *ht,
unsigned int in_width,
unsigned int out_width,
unsigned int out_height,
float t,
float patchSize)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int in_index = y*in_width+x;
unsigned int in_mindex = (out_height - y)*in_width + (out_width - x); // mirrored
unsigned int out_index = y*out_width+x;
// calculate wave vector
float2 k;
k.x = (-(int)out_width / 2.0f + x) * (2.0f * CUDART_PI_F / patchSize);
k.y = (-(int)out_width / 2.0f + y) * (2.0f * CUDART_PI_F / patchSize);
// calculate dispersion w(k)
float k_len = sqrtf(k.x*k.x + k.y*k.y);
float w = sqrtf(9.81f * k_len);
if ((x < out_width) && (y < out_height)) {
float2 h0_k = h0[in_index];
float2 h0_mk = h0[in_mindex];
// output frequency-space complex values
ht[out_index] = complex_add( complex_mult(h0_k, complex_exp(w * t)), complex_mult(conjugate(h0_mk), complex_exp(-w * t)) );
//ht[out_index] = h0_k;
}
}
// update height map values based on output of FFT
__global__ void updateHeightmapKernel(float* heightMap,
float2* ht,
unsigned int width)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
// cos(pi * (m1 + m2))
float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f;
heightMap[i] = ht[i].x * sign_correction;
}
// generate slope by partial differences in spatial domain
__global__ void calculateSlopeKernel(float* h, float2 *slopeOut, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
float2 slope = make_float2(0.0f, 0.0f);
if ((x > 0) && (y > 0) && (x < width-1) && (y < height-1)) {
slope.x = h[i+1] - h[i-1];
slope.y = h[i+width] - h[i-width];
}
slopeOut[i] = slope;
}
// wrapper functions
extern "C"
void cudaGenerateSpectrumKernel(float2* d_h0,
float2 *d_ht,
unsigned int in_width,
unsigned int out_width,
unsigned int out_height,
float animTime,
float patchSize)
{
dim3 block(8, 8, 1);
dim3 grid(cuda_iDivUp(out_width, block.x), cuda_iDivUp(out_height, block.y), 1);
generateSpectrumKernel<<<grid, block>>>(d_h0, d_ht, in_width, out_width, out_height, animTime, patchSize);
}
extern "C"
void cudaUpdateHeightmapKernel(float* d_heightMap,
float2* d_ht,
unsigned int width,
unsigned int height)
{
dim3 block(8, 8, 1);
dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
updateHeightmapKernel<<<grid, block>>>(d_heightMap, d_ht, width);
}
extern "C"
void cudaCalculateSlopeKernel( float* hptr, float2 *slopeOut,
unsigned int width, unsigned int height)
{
dim3 block(8, 8, 1);
dim3 grid2(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
calculateSlopeKernel<<<grid2, block>>>(hptr, slopeOut, width, height);
}
|
19,836 | #include "includes.h"
__global__ void AdaptRefVectorKernel( int cell, float *referenceVector, float oldErrorFraction, float youngErrorFraction, float decayFactor, int *winningCount, float *difference, int inputSize )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize)
{
float errorFraction = (youngErrorFraction - oldErrorFraction) * expf( - decayFactor * winningCount[cell] ) + oldErrorFraction;
referenceVector[cell * inputSize + threadId] += errorFraction * difference[cell * inputSize + threadId];
}
} |
19,837 | #include <iostream>
#include <fstream>
#include <numeric>
#include <cstdlib>
#include <climits>
#include <stdexcept>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/tuple.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <curand.h>
#include <curand_kernel.h>
static void CheckCudaErrorAux(const char *, unsigned, const char *,
cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
const int MAX_OPERATIONS_PER_STEP = 5;
const int MAX_STEPS_PER_JOB = 20;
const int MAX_JOBS = 20;
const int MAX_MACHINES = 20;
int POPULATION_SIZE = 2000;
int INDIVIDUAL_LEN = 20; // TODO
const int SIZE_PARENT_POOL = 7;
int TOTALTHREADS = 2048;
int BLOCKSIZE = 1024;
int total_jobs, total_machines, max_operations;
struct Operation {
int id_machine;
int processing_time;
};
struct Step {
int len;
Operation candidates[MAX_OPERATIONS_PER_STEP];
};
struct Job {
int len;
Step steps[MAX_STEPS_PER_JOB];
};
Job input_data[MAX_JOBS];
struct Gene {
int id_job;
int id_step;
// Make sure update them both.
int id_machine;
int id_operation;
};
std::ostream &operator<<(std::ostream &os, const Gene &gene) {
os << "[" << gene.id_job << ", " << gene.id_step << ", "
<< gene.id_operation << "]";
return os;
}
void parse_input(const char *path) {
auto input = std::ifstream();
input.exceptions(std::ifstream::failbit);
input.open(path);
input >> total_jobs >> total_machines >> max_operations;
if (total_jobs > MAX_JOBS) {
throw std::runtime_error("Too many jobs");
}
if (total_machines > MAX_MACHINES) {
throw std::runtime_error("Too many machines");
}
INDIVIDUAL_LEN = 0;
for (int id_job = 0; id_job < total_jobs; id_job++) {
int number_steps;
input >> number_steps;
if (number_steps > MAX_STEPS_PER_JOB) {
throw std::runtime_error("Too many steps");
}
input_data[id_job].len = number_steps;
for (int id_step = 0; id_step < number_steps; id_step++) {
int number_operations;
input >> number_operations;
if (number_operations > MAX_OPERATIONS_PER_STEP) {
throw std::runtime_error("Too many operations");
}
input_data[id_job].steps[id_step].len = number_operations;
for (int id_operation = 0; id_operation < number_operations;
id_operation++) {
int id_machine;
int processing_time;
input >> id_machine >> processing_time;
input_data[id_job].steps[id_step].candidates[id_operation].id_machine =
id_machine - 1;
input_data[id_job].steps[id_step].candidates[id_operation].processing_time =
processing_time;
}
INDIVIDUAL_LEN++;
}
}
}
__global__ void init_rand_kernel(curandState_t *states, int seed) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, idx, 0, &states[idx]);
}
__global__ void fill_rand_kernel(int *numbers, int len, int max_value,
curandState_t *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
numbers[idx] = curand(&states[idx]) % max_value;
}
__global__ void init_population_kernel(Gene *population, int population_size,
int individual_len, Job *jobs, int total_jobs,
curandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int next_step[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
int cursor = 0;
Gene *me = population + i * individual_len;
memset(next_step, 0, sizeof(next_step));
while (cursor < individual_len) {
int id_job = curand(&rand_states[i]) % total_jobs;
if (next_step[id_job] < jobs[id_job].len) {
me[cursor].id_job = id_job;
me[cursor].id_step = next_step[id_job];
next_step[id_job]++;
cursor++;
}
}
}
}
}
__global__ void pick_parents_kernel(int *parents, int *parent_candidates,
int *scores, int population_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
int best_score = INT_MAX;
int best_index = -1;
for (int j = 0; j < SIZE_PARENT_POOL; j++) {
int k = parent_candidates[i * SIZE_PARENT_POOL + j];
if (scores[k] < best_score) {
best_score = scores[k];
best_index = k;
}
}
parents[i] = best_index;
}
}
}
__device__ void assignment_crossover(Gene *child, Gene *parent_a,
Gene *parent_b, int individual_len, Job *jobs) {
int reverse_index[MAX_JOBS][MAX_STEPS_PER_JOB];
for (int s = 0; s < individual_len; s++) {
int id_job = parent_b[s].id_job;
int id_step = parent_b[s].id_step;
reverse_index[id_job][id_step] = s;
}
for (int s = 0; s < individual_len; s++) {
int id_job = parent_a[s].id_job;
int id_step = parent_a[s].id_step;
int i = reverse_index[id_job][id_step];
child[s] = parent_a[s];
child[s].id_operation = parent_b[i].id_operation;
child[s].id_machine = parent_b[i].id_machine;
}
}
__device__ void sequencing_crossover(Gene *child, Gene *parent_a,
Gene *parent_b, int individual_len, Job *jobs,
curandState_t *rand_state) {
int crossover_point = curand(rand_state) % individual_len;
int last_step[MAX_JOBS];
for (int i = 0; i < MAX_JOBS; i++) {
last_step[i] = -1;
}
for (int s = 0; s < crossover_point; s++) {
int id_job = parent_b[s].id_job;
int id_step = parent_b[s].id_step;
child[s] = parent_b[s];
last_step[id_job] = id_step;
}
int cursor = crossover_point;
for (int s = 0; s < individual_len && cursor<individual_len; s++) {
int id_job = parent_a[s].id_job;
if (last_step[id_job] < parent_a[s].id_step) {
child[cursor] = parent_a[s];
cursor++;
}
}
}
__device__ void assignment_mutation(Gene *individual, int individual_len,
Job *jobs, curandState_t *rand_state) {
int count = 5;
while (count--) {
int mutation_point = curand(rand_state) % individual_len;
int id_job = individual[mutation_point].id_job;
int id_step = individual[mutation_point].id_step;
int len = jobs[id_job].steps[id_step].len;
int id_operation = curand(rand_state) % len;
individual[mutation_point].id_operation = id_operation;
individual[mutation_point].id_machine =
jobs[id_job].steps[id_step].candidates[id_operation].id_machine;
}
}
__device__ void swapping_mutation(Gene *individual, int individual_len,
Job *jobs, curandState_t *rand_state) {
int count = 5;
while (count--) {
int mutation_point = curand(rand_state) % (individual_len - 1);
if (individual[mutation_point].id_job
!= individual[mutation_point + 1].id_job) {
thrust::swap(individual[mutation_point],
individual[mutation_point + 1]);
}
}
}
__global__ void stage_1_breed_kernel(int *parents, Gene *population,
Gene *new_population, int population_size, int individual_len,
Job *jobs, curandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
if (i < population_size * 8 / 10) {
sequencing_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
for (int s = 0; s < individual_len; s++) {
new_population[i * individual_len + s] =
population[parents[i] * individual_len + s];
}
swapping_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
}
}
}
}
__global__ void stage_2_breed_kernel(int *parents, Gene *population,
Gene *new_population, int population_size, int individual_len,
Job *jobs, curandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
if (i < population_size * 4 / 10) {
assignment_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs);
} else if (i < population_size * 8 / 10) {
sequencing_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
for (int s = 0; s < individual_len; s++) {
new_population[i * individual_len + s] =
population[parents[i] * individual_len + s];
}
if (i < population_size * 9 / 10) {
assignment_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
swapping_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
}
}
}
}
}
__global__ void stage_1_evaluate_kernel(int *scores, Gene *population,
int population_size, int individual_len, Job *jobs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int value;
int machines[MAX_MACHINES];
int last_step_id_machine[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
value = 0;
memset(machines, 0, sizeof(machines));
Gene *me = population + i * individual_len;
for (int s = 0; s < individual_len; s++) {
int id_job = me[s].id_job;
int id_step = me[s].id_step;
int len = jobs[id_job].steps[id_step].len;
int best_end_time = INT_MAX;
int best_id_operation = -1;
int best_id_machine = -1;
// Greedy search to find best operation in this step
for (int id_operation = 0; id_operation < len; id_operation++) {
int processing_time =
jobs[id_job].steps[id_step].candidates[id_operation].processing_time;
int id_machine =
jobs[id_job].steps[id_step].candidates[id_operation].id_machine;
int machine_end_time = machines[id_machine];
if (id_step > 0) {
int previous_id_machine = last_step_id_machine[id_job];
if (machine_end_time < machines[previous_id_machine]) {
machine_end_time = machines[previous_id_machine];
}
}
machine_end_time += processing_time;
if (machine_end_time < best_end_time) {
best_end_time = machine_end_time;
best_id_operation = id_operation;
best_id_machine = id_machine;
}
}
me[s].id_operation = best_id_operation;
me[s].id_machine = best_id_machine;
machines[best_id_machine] = best_end_time;
last_step_id_machine[id_job] = best_id_machine;
if (best_end_time > value) {
value = best_end_time;
}
}
scores[i] = value;
}
}
}
__global__ void stage_2_evaluate_kernel(int *scores, Gene *population,
int population_size, int individual_len, Job *jobs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int value;
int machines[MAX_MACHINES];
int last_step_id_machine[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
value = 0;
memset(machines, 0, sizeof(machines));
Gene *me = population + i * individual_len;
for (int s = 0; s < individual_len; s++) {
int id_job = me[s].id_job;
int id_step = me[s].id_step;
int id_machine = me[s].id_machine;
int id_operation = me[s].id_operation;
int processing_time =
jobs[id_job].steps[id_step].candidates[id_operation].processing_time;
int previous_id_machine = last_step_id_machine[id_job];
machines[id_machine] =
(id_step > 0
&& machines[id_machine]
< machines[previous_id_machine]) ?
machines[previous_id_machine] :
machines[id_machine];
machines[id_machine] += processing_time;
value = machines[id_machine] > value ?
machines[id_machine] : value;
last_step_id_machine[id_job] = id_machine;
}
scores[i] = value;
}
}
}
int main(int argc, const char *argv[]) {
cudaDeviceProp prop;
CUDA_CHECK_RETURN(cudaGetDeviceProperties(&prop, 0));
std::cout << "GPU device: " << prop.name << std::endl;
std::cout << "Number of SM: " << prop.multiProcessorCount << std::endl;
std::cout << "Shared memory per block: " << prop.sharedMemPerBlock / 1024.0
<< " KB" << std::endl;
std::cout << "Max Threads per block: " << prop.maxThreadsPerBlock
<< std::endl;
std::cout << "Max Threads per SM: " << prop.maxThreadsPerMultiProcessor
<< std::endl;
const char *path = "./mk01.fjs";
if (argc >= 2) {
path = argv[1];
}
parse_input(path);
std::cout << "total_jobs: " << total_jobs << "\n";
std::cout << "total_machines: " << total_machines << "\n";
std::cout << "INDIVIDUAL_LEN: " << INDIVIDUAL_LEN << "\n";
std::cout << "input data:\n";
for (int id_job = 0; id_job < total_jobs; id_job++) {
std::cout << "[Job " << id_job << "] ";
for (int id_step = 0; id_step < input_data[id_job].len; id_step++) {
std::cout << id_step << ": ";
for (int id_operation = 0;
id_operation < input_data[id_job].steps[id_step].len;
id_operation++) {
std::cout << "("
<< input_data[id_job].steps[id_step].candidates[id_operation].id_machine
<< ", "
<< input_data[id_job].steps[id_step].candidates[id_operation].processing_time
<< ") ";
}
}
std::cout << "\n";
}
Job *jobs;
CUDA_CHECK_RETURN(cudaMalloc((void ** )&jobs, MAX_JOBS * sizeof(Job)));
CUDA_CHECK_RETURN(
cudaMemcpy(jobs, input_data, MAX_JOBS * sizeof(Job),
cudaMemcpyHostToDevice));
thrust::device_vector<Gene> population(POPULATION_SIZE * INDIVIDUAL_LEN);
thrust::device_vector<int> scores(POPULATION_SIZE);
thrust::device_vector<Gene> new_population(
POPULATION_SIZE * INDIVIDUAL_LEN);
Gene *pop_ptr = thrust::raw_pointer_cast(&population[0]);
Gene *new_pop_ptr = thrust::raw_pointer_cast(&new_population[0]);
int *scores_ptr = thrust::raw_pointer_cast(&scores[0]);
curandState_t *parent_candidates_states;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&parent_candidates_states,
POPULATION_SIZE * SIZE_PARENT_POOL
* sizeof(curandState_t)));
curandState_t *population_states;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&population_states,
POPULATION_SIZE * sizeof(curandState_t)));
// Parent candidate indexes
int *parent_candidates;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&parent_candidates,
POPULATION_SIZE * SIZE_PARENT_POOL * sizeof(int)));
// Picked parent indexes
int *parents;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&parents, POPULATION_SIZE * sizeof(int)));
init_rand_kernel<<<POPULATION_SIZE, 1>>>(population_states, time(0));
CUDA_CHECK_RETURN(cudaPeekAtLastError());
init_rand_kernel<<<POPULATION_SIZE * SIZE_PARENT_POOL, 1>>>(
parent_candidates_states, time(0));
CUDA_CHECK_RETURN(cudaPeekAtLastError());
init_population_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(pop_ptr,
POPULATION_SIZE, INDIVIDUAL_LEN, jobs, total_jobs,
population_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
stage_1_evaluate_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(scores_ptr, pop_ptr,
POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
int stage_1 = 3000;
while (stage_1--) {
fill_rand_kernel<<<POPULATION_SIZE * SIZE_PARENT_POOL, 1>>>(
parent_candidates, POPULATION_SIZE * SIZE_PARENT_POOL,
POPULATION_SIZE, parent_candidates_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
pick_parents_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents,
parent_candidates, scores_ptr, POPULATION_SIZE);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
stage_1_breed_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents, pop_ptr,
new_pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs,
population_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
thrust::copy(thrust::device, new_population.begin(),
new_population.end(), population.begin());
stage_1_evaluate_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(scores_ptr,
pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
if (stage_1 % 100 == 0) {
int min_score = *thrust::min_element(scores.begin(), scores.end());
std::cout << "stage_1: " << stage_1 << " score: " << min_score
<< std::endl;
}
}
int stage_2 = 2000;
while (stage_2--) {
fill_rand_kernel<<<POPULATION_SIZE * SIZE_PARENT_POOL, 1>>>(
parent_candidates, POPULATION_SIZE * SIZE_PARENT_POOL,
POPULATION_SIZE, parent_candidates_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
pick_parents_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents,
parent_candidates, scores_ptr, POPULATION_SIZE);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
stage_2_breed_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents, pop_ptr,
new_pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs,
population_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
thrust::copy(thrust::device, new_population.begin(),
new_population.end(), population.begin());
stage_2_evaluate_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(scores_ptr,
pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
if (stage_2 % 100 == 0) {
int min_score = *thrust::min_element(scores.begin(), scores.end());
std::cout << "stage_2: " << stage_2 << " score: " << min_score
<< std::endl;
}
}
auto min_iter = thrust::min_element(scores.begin(), scores.end());
int index = min_iter - scores.begin();
std::cout << "Done" << std::endl;
std::cout << "Best solution score: " << scores[index] << std::endl;
for (int i = 0; i < INDIVIDUAL_LEN; i++) {
std::cout << population[index * INDIVIDUAL_LEN + i] << " ";
}
std::cout << std::endl;
CUDA_CHECK_RETURN(cudaFree(parent_candidates_states));
CUDA_CHECK_RETURN(cudaFree(population_states));
CUDA_CHECK_RETURN(cudaFree(parent_candidates));
CUDA_CHECK_RETURN(cudaFree(parents));
CUDA_CHECK_RETURN(cudaFree(jobs));
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux(const char *file, unsigned line,
const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement << " returned " << cudaGetErrorString(err) << "("
<< err << ") at " << file << ":" << line << std::endl;
exit(1);
} |
19,838 | #include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/iterator/permutation_iterator.h>
#include <iostream>
int main()
{
// gather locations
thrust::device_vector<int> map(4);
map[0] = 3;
map[1] = 1;
map[2] = 0;
map[3] = 5;
// array to gather from
thrust::device_vector<int> source(6);
source[0] = 10;
source[1] = 20;
source[2] = 30;
source[3] = 40;
source[4] = 50;
source[5] = 60;
int sum = thrust::reduce(
thrust::make_permutation_iterator(source.begin(), map.begin()),
thrust::make_permutation_iterator(source.begin(), map.end())
);
// fuse gather with reduction:
// sum = source[map[0]] + source[map[1]] + ...
// = source[3] + source[1] + source[0] + source[5]
// = 40 + 20 + 10 + 60
// = 130
std::cout << "sum = " << sum << std::endl;
return 0;
}
|
19,839 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
__global__ void runningSum(int* d) {
int threads = blockDim.x;
int tid = threadIdx.x;
for (int tc = threads, step = 1; tc > 0; step *= 2) {
if(tid < tc) {
d[tid + step] += d[tid];
}
tc -= step;
}
}
int main() {
const int count = 16;
const int size = count * sizeof(int);
int h[count];
for (int i = 0; i < count; ++i) {
h[i] = i + 1;
}
int* d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
runningSum<<<1, count - 1>>>(d);
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < count; ++i) {
cout << h[i] << '\t';
}
cudaFree(d);
return 0;
}
|
19,840 | #include "includes.h"
__global__ void windowBlackman(float* idata, int length)
{
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
if (tidx < length)
{
idata[tidx] = 0.74 / 2 * -0.5 * cos(2 * PI_F*tidx / (length - 1)) + 0.16 / 2 * sin(4 * PI_F*tidx / (length - 1));
}
} |
19,841 | #include <stdio.h>
#include <cassert>
int main() {
int n_devices;
cudaGetDeviceCount(&n_devices);
assert(n_devices > 0);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Device name: %s\n", prop.name);
printf("Shared memory per block (bytes): %ld\n", prop.sharedMemPerBlock);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
return 0;
}
|
19,842 | #include "reduce.cuh"
#include "real.h"
#include "assert.h"
#include <iostream>
int main(){
real summands[1024];
for (int i=0; i!=1024; ++i)
summands[i]=1;
for (int j=0; j!=1000; ++j)
reducev2(summands,1024);
}
|
19,843 | #include <stdio.h>
__device__ int dev1(){
return 1;
}
__device__ int dev2(){
return 2;
}
/*
* __global__ prefix says a function is kernel,
* Will be executed by GPU
* runs multiple times specified by block and thread number
* must return void
*/
__global__ void myKernel(){
dev1();
dev2();
}
/**
* __host__ prefix specifies
* - runs once per call on CPU
* - only callable from CPU
* Function without prefix are host functions
*/
int main(){
//specifies number of blocks and threads per blocks (2 blocks, 4 threads per block)
myKernel<<<2,4>>>();
printf("Hello, World!\n");
return 0;
}
/*
* __device prefix
*/
|
19,844 | #include<iostream>
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<ctime>
#include<curand.h>
#include<curand_kernel.h>
using namespace std;
__device__ long int n_ok[1]={3};
__global__ void pi (int seed, float *x, float *y) {
curandState_t state;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if(tid < 1000)
{
curand_init(seed,tid,0,&state);
*x = curand_uniform(&state);
*y = curand_uniform(&state);
if ((*x)*(*x) + (*y)*(*y)>=1)
n_ok[0]++;
}
}
int main () {
/*long int n=1000000000;
long int n_ok =0;
float x,y;
srand(time(0));
for (long int i=0;i<n;i++)
{
x = rand();
x = x/RAND_MAX;
//srand(time(0));
y = rand();
y = y/RAND_MAX;
if(sqrt(x*x + y*y) <=1)
n_ok++;
}
float pi = 4.0 * n_ok/n;
cout<<"pi is roughly " <<pi; */
long int * n_ok1 = new long int;
float *x = new float;
float *y = new float;
float *dev_x, *dev_y;
cudaMalloc( (void**)&dev_x, sizeof(float));
cudaMalloc( (void**)&dev_y, sizeof(float));
//cudaMalloc( (void**)&dev_n, sizeof(long int));
pi<<<128,128>>>(time(NULL), dev_x, dev_y);
cudaMemcpy(x, dev_x, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(y, dev_y, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(n_ok1, "n_ok", sizeof(long int), cudaMemcpyDeviceToHost);
cout<< *x<<" "<<*y<<" "<<*n_ok1 ;
//cudaFree(dev_n);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(n_ok1);
return 0;
}
|
19,845 | /*
*
* pgm.cu
*
* Functions to load and store PGM (portable gray map) files.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "pgm.h"
int
pgmLoad(
const char *filename,
unsigned char **pHostData, unsigned int *pHostPitch,
unsigned char **pDeviceData, unsigned int *pDevicePitch,
int *pWidth, int *pHeight, int padWidth, int padHeight)
{
int ret = 1;
const int hsize = 0x40;
int w, h;
FILE *fp = NULL;
int maxval;
char header[hsize];
unsigned char *idata = NULL;
unsigned char *ddata = NULL;
size_t dPitch;
fp = fopen( filename, "rb" );
if ( fp == NULL) {
fprintf( stderr, "Failed to open %s.\n", filename );
goto Error;
}
if (NULL == fgets(header, hsize, fp)) {
fprintf(stderr, "Invalid PGM file.\n");
goto Error;
}
if ( strncmp(header, "P5", 2) ) {
fprintf(stderr, "File is not a PGM image.\n");
goto Error;
}
if ( 1 != fscanf( fp, "%d", &w ) )
goto Error;
if ( 1 != fscanf( fp, "%d", &h ) )
goto Error;
if ( 1 != fscanf( fp, "%d", &maxval ) )
goto Error;
if ( padWidth == 0 && padHeight == 0 ) {
padWidth = w;
padHeight = h;
}
idata = (unsigned char *) malloc( padWidth*padHeight );
if ( ! idata )
goto Error;
for ( int row = 0; row < h; row++ ) {
if ( (size_t) w != fread( idata+row*padWidth, 1, w, fp ) )
goto Error;
}
if ( cudaSuccess != cudaMallocPitch( (void **) &ddata, &dPitch, padWidth, padHeight ) )
goto Error;
*pWidth = padWidth;
*pHeight = padHeight;
*pHostPitch = padWidth;
*pHostData = idata;
*pDeviceData = ddata;
*pDevicePitch = (unsigned int) dPitch;
cudaMemcpy2D( ddata, dPitch, idata, padWidth, padWidth, padHeight, cudaMemcpyHostToDevice );
fclose(fp);
return 0;
Error:
free( idata );
cudaFree( ddata );
if ( fp ) {
fclose( fp );
}
return ret;
}
int
pgmSave(const char* filename, unsigned char *data, int w, int h)
{
int ret = 1;
FILE *fp = fopen( filename, "wb" );
if ( NULL == fp ) {
fprintf( stderr, "Failed to open %s\n", filename );
goto Error;
}
fprintf( fp, "P5\n%d\n%d\n%d\n", w, h, 0xff );
if ( w*h != fwrite(data, sizeof(unsigned char), w*h, fp) ) {
fprintf( stderr, "Write failed\n" );
goto Error;
}
fclose(fp);
ret = 0;
Error:
return ret;
}
|
19,846 | /*
STEPS
1. Allocate host memory and initialized host data e.g. malloc
2. Allocate device memory e.g cudaMalloc
3. Transfer input data from host to device memory e.g cudaMemcpy
4. Execute kernels
5. Transfer output from device memory to host
6. Free Host & CUDA memory e.g. free & cudaFree
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
int index = 0;
int stride = 1;
for(int i = index; i < n; i+=stride){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// 1. Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// 1. Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// 2. Allocate device memory
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
// 3. Transfer input data from host to device
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// 4. Kernel launch
vector_add<<<1,1>>>(d_out, d_a, d_b, N); //use only one thread
// 5. Transfer output from device memory to host
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// 6. Free cuda memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
}
|
19,847 |
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <curand.h>
#include <time.h>
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d : err => %s\n",__FILE__,__LINE__,cudaGetErrorString(x));\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
/**
- memoria unificada
[10000,0.02410],
[1000000,0.12742],
[100000000,10.52038],
1- Gerar uma matriz aleatoria
2- Aplicar um blur ou filtro (gerar uma nova matriz de saida, com a media aritimetica da vizinhanca aplicada a cada elemento da matriz)
3- Testar e mandar resultados de tempo para os segintes casos:
- memoria unificada
- copia manual de memoria
- usando stream para copia CPU->GPU
- usando streams para os dois sentidos de copias (ida e volta)
4- Testar para matrizes de 100x100 , 1000x1000, 10000x10000
**/
__global__ void blur(unsigned int origData[],unsigned result[],int L) {
int thread_idx = threadIdx.x + blockIdx.x * blockDim.x;
int thread_idy = threadIdx.y + blockIdx.y * blockDim.y;
if(thread_idx-1 >= 0 && thread_idx+1 < L && thread_idy-1 >= 0 && thread_idy+1 < L)
{
int temp = origData[(thread_idx) + (thread_idy)*L];
temp += origData[(thread_idx-1) + (thread_idy-1)*L];
temp += origData[(thread_idx) + (thread_idy-1)*L];
temp += origData[(thread_idx+1) + (thread_idy-1)*L];
temp += origData[(thread_idx-1) + (thread_idy)*L];
//temp += origData[(thread_idx) + (thread_idy)*L];
temp += origData[(thread_idx+1) + (thread_idy)*L];
temp += origData[(thread_idx-1) + (thread_idy+1)*L];
temp += origData[(thread_idx) + (thread_idy+1)*L];
temp += origData[(thread_idx+1) + (thread_idy+1)*L];
result[(thread_idx) + (thread_idy)*L] = temp/9;
}else
{
result[(thread_idx) + (thread_idy)*L] = origData[(thread_idx) + (thread_idy)*L];
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*/
int main(int argc, char* argv[]) {
unsigned int L, tam, *data,*res;
size_t size;
cudaError_t err = cudaSuccess;
L = 40;
if(argc > 1)
L = atoi(argv[1]);
tam = L*L;
size = tam*sizeof(unsigned int);
cudaMallocManaged(&data,size);
cudaMallocManaged(&res,size);
cudaEvent_t start, stop;
CUDA_CALL(cudaEventCreate (&start));
CUDA_CALL(cudaEventCreate (&stop));
CUDA_CALL(cudaEventRecord (start, 0)); // 0 is the stream number
dim3 block_dim(L,L,1);
dim3 grid_dim(1,1,1);
if(L>32)
{
block_dim = dim3(32,32,1);
grid_dim = dim3(ceil(L/32),ceil(L/32),1);
}
srand(time(NULL));
for(int i=0; i<tam;i++)
data[i]=rand();
// do Work…
/* Kernel Call */
blur<<<grid_dim,block_dim>>>(data,res, L);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
CUDA_CALL(cudaDeviceSynchronize());
for(int i=0; i<tam;i++)
data[i]=0;
CUDA_CALL(cudaEventRecord (stop, 0));
CUDA_CALL(cudaEventSynchronize (stop));
float elapsedTime;
CUDA_CALL(cudaEventElapsedTime (&elapsedTime, start, stop));
printf ("[%d,%.5f],\n", tam,elapsedTime);
CUDA_CALL(cudaEventDestroy(start));
CUDA_CALL(cudaEventDestroy(stop));
/* Free device memory */
CUDA_CALL( cudaFree(data));
CUDA_CALL( cudaFree(res));
return 0;
} /* main */
|
19,848 | #include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 512
typedef struct Data {
double* a;
double* b;
double* c;
} Data;
__global__ void add( Data data, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x*blockDim.x + threadIdx.x;
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
data.c[tid] = data.a[tid] + data.b[tid];
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 2) {
// Tell the user how to run the program
printf ("Usage: %s vector_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int grid_size = ((vector_size-1)/BLOCK_SIZE) + 1;
// Set device that we will use for our cuda code
// It will be 0, 1, 2 or 3
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// CPU Struct
Data data_cpu;
data_cpu.a = new double [vector_size];
data_cpu.b = new double [vector_size];
data_cpu.c = new double [vector_size];
Data data_gpu_on_cpu;
data_gpu_on_cpu.c = new double [vector_size];
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
data_cpu.a[i] = rand()*cos(i);
data_cpu.b[i] = rand()*sin(i);
data_cpu.c[i] = 0.0;
}
// allocate the memory on the GPU
Data data_gpu;
cudaMalloc (&data_gpu.a, vector_size*sizeof(double));
cudaMalloc (&data_gpu.b, vector_size*sizeof(double));
cudaMalloc (&data_gpu.c, vector_size*sizeof(double));
// copy the input to the GPU
cudaMemcpy (data_gpu.a, data_cpu.a, vector_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy (data_gpu.b, data_cpu.b, vector_size*sizeof(double), cudaMemcpyHostToDevice);
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
data_cpu.c[i] = data_cpu.a[i] + data_cpu.b[i];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
cudaEventRecord(start,0);
// call the kernel
add<<<grid_size, BLOCK_SIZE>>>(data_gpu, vector_size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy (data_gpu_on_cpu.c, data_gpu.c, vector_size*sizeof(double), cudaMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (data_cpu.c[i] != data_gpu_on_cpu.c[i]){
error = 1;
printf( "Error starting element %d, %f != %f\n", i, data_gpu_on_cpu.c[i], data_cpu.c[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (data_cpu.a);
free (data_cpu.b);
free (data_cpu.c);
free (data_gpu_on_cpu.c);
// free the memory allocated on the GPU
cudaFree (data_gpu.a);
cudaFree (data_gpu.b);
cudaFree (data_gpu.c);
return 0;
}
|
19,849 | #include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc, char **argv) {
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid ((nElem+block.x-1)/block.x); // (6+3-1)/3=2
// check grid and block dimension from host side
std::cout << "grid.x " << grid.x << " grid.y " << grid.y << " grid.z " << grid.z << std::endl;
std::cout << "block.x " << block.x << " block.y " << block.y << " block.z " << block.z << std::endl;
// check grid and block dimension from device side
checkIndex <<<grid, block>>> ();
// reset device before your leave
cudaDeviceReset();
return 0;
}
|
19,850 | //
// simple_conv.cu
// Conv
//
// Created by DB on 12/27/18.
// Copyright © 2018 D Blalock. All rights reserved.
//
#include <stdio.h>
|
19,851 | __global__ void process_kernel1(float *A, float *B, float *C, int N)
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x+ blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x* blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int globalThreadId = blockNum * (blockDim.x * blockDim.y * blockDim.z) +threadNum;
if (globalThreadId<N)
{
C[globalThreadId] = sin(A[globalThreadId]) + cos(B[globalThreadId]);
}
}
__global__ void process_kernel2(float *A, float *C, int N)
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x+ blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x* blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int globalThreadId = blockNum * (blockDim.x * blockDim.y * blockDim.z) +threadNum;
if (globalThreadId<N)
{
C[globalThreadId] = log(A[globalThreadId]);
}
}
__global__ void process_kernel3(float *A, float *C, int N)
{
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x+ blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x* blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int globalThreadId = blockNum * (blockDim.x * blockDim.y * blockDim.z) +threadNum;
if (globalThreadId<N)
{
C[globalThreadId] = sqrt(A[globalThreadId]);
}
} |
19,852 | #include <iostream>
#include <stdio.h>
#include <ctime>
#define LOG_NUM_BANKS 5
#define NUM_BANKS 32
#define BLOCK_SIZE 64
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__device__ inline size_t NoConflictIndex(size_t index) {
return index;
// return index + (index >> LOG_NUM_BANKS);
}
__global__ void PrescanBlocks(float * out_data, const float * in_data, float * block_sums, const size_t data_size) {
// keeps all the in_data during processing
extern __shared__ float in_data_shared[];
size_t thread_id_local = threadIdx.x;
size_t offset = 1;
size_t thread_id_global = blockIdx.x * blockDim.x + thread_id_local;
if (thread_id_global >= data_size) {
return;
}
in_data_shared[NoConflictIndex(2 * thread_id_local)] = in_data[2 * thread_id_global];
in_data_shared[NoConflictIndex(2 * thread_id_local + 1)] = in_data[2 * thread_id_global + 1];
for (size_t level_size = 2 * blockDim.x >> 1; level_size > 0; level_size >>= 1) {
__syncthreads();
if (thread_id_local < level_size) {
size_t left_son_idx = offset * (2 * thread_id_local + 1) - 1;
size_t parent_idx = offset * (2 * thread_id_local + 2) - 1;
in_data_shared[NoConflictIndex(parent_idx)] += in_data_shared[NoConflictIndex(left_son_idx)];
}
offset *= 2;
}
if (thread_id_local == 0) {
block_sums[blockIdx.x] = in_data_shared[NoConflictIndex(blockDim.x * 2 - 1)];
in_data_shared[NoConflictIndex(blockDim.x * 2 - 1)] = 0;
}
for (size_t level_size = 1; level_size < 2 * blockDim.x; level_size *= 2) {
offset >>= 1;
__syncthreads();
if (thread_id_local < level_size) {
size_t left_son_idx = offset * (2 * thread_id_local + 1) - 1;
size_t parent_idx = offset * (2 * thread_id_local + 2) - 1;
float left_son_value = in_data_shared[NoConflictIndex(left_son_idx)];
in_data_shared[NoConflictIndex(left_son_idx)] = in_data_shared[NoConflictIndex(parent_idx)];
in_data_shared[NoConflictIndex(parent_idx)] += left_son_value;
}
}
__syncthreads();
out_data[2 * thread_id_global] = in_data_shared[NoConflictIndex(2 * thread_id_local)];
out_data[2 * thread_id_global + 1] = in_data_shared[NoConflictIndex(2 * thread_id_local + 1)];
}
__global__ void AddBlockSums(float * data, const float * block_sums, const size_t data_size) {
__shared__ float this_block_sum;
size_t thread_id_local = threadIdx.x;
size_t thread_id_global = blockIdx.x * blockDim.x + thread_id_local;
if (thread_id_global >= data_size) {
return;
}
if (thread_id_local == 0) {
this_block_sum = block_sums[blockIdx.x];
}
__syncthreads();
data[thread_id_global] += this_block_sum;
}
__host__ void PrescanBlockSums(float * block_sums, const size_t num_blocks) {
float sum = block_sums[0];
block_sums[0] = 0;
float keep;
for (size_t block_id = 1; block_id < num_blocks; ++block_id) {
keep = block_sums[block_id];
block_sums[block_id] = sum;
sum += keep;
}
}
void TotalPrescanGPU(const float * data, float * partial_sums, size_t data_size) {
float * d_data;
float * d_partial_sums;
float * d_block_sums;
float * block_sums;
size_t num_blocks = ((data_size + 2 * BLOCK_SIZE - 1) / (2 * BLOCK_SIZE));
size_t shared_size = ((2 * BLOCK_SIZE + NUM_BANKS - 1) / NUM_BANKS + BLOCK_SIZE) * 2 * sizeof(float);
block_sums = (float *) malloc(num_blocks * sizeof(float));
cudaCheckError( cudaMalloc(&d_data, data_size * sizeof(float)) );
cudaCheckError( cudaMalloc(&d_partial_sums, data_size * sizeof(float)) );
cudaCheckError( cudaMalloc(&d_block_sums, num_blocks * sizeof(float)) );
cudaMemcpy(d_data, data, data_size * sizeof(float), cudaMemcpyHostToDevice);
PrescanBlocks<<<num_blocks, BLOCK_SIZE, shared_size>>>(d_partial_sums, d_data, d_block_sums, data_size);
cudaMemcpy(block_sums, d_block_sums, num_blocks * sizeof(float), cudaMemcpyDeviceToHost);
PrescanBlockSums(block_sums, num_blocks);
cudaMemcpy(d_block_sums, block_sums, num_blocks * sizeof(float), cudaMemcpyHostToDevice);
AddBlockSums<<<num_blocks, 2 * BLOCK_SIZE>>>(d_partial_sums, d_block_sums, data_size);
cudaMemcpy(partial_sums, d_partial_sums, data_size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_block_sums);
cudaFree(d_partial_sums);
cudaFree(d_data);
free(block_sums);
}
void TotalPrescanCPU(const float * data, float * partial_sums, size_t data_size) {
float sum = 0.0;
for (size_t idx = 0; idx < data_size; ++idx) {
partial_sums[idx] = sum;
sum += data[idx];
}
}
int main(int argc, char * argv[]) {
float * data;
float * partial_sums;
size_t logsize = atoi(argv[1]);
size_t num_elements = (1 << logsize);
data = (float *) malloc(num_elements * sizeof(float));
partial_sums = (float *) malloc(num_elements * sizeof(float));
for (size_t idx = 0; idx < num_elements; ++idx) {
data[idx] = 1.0 * idx;
}
size_t num_runs = 100;
float runtimes[100];
float gpu_mean = 0.0;
float gpu_std = 0.0;
for (size_t run = 0; run < num_runs; ++run) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord(start);
const clock_t begin_time = clock();
TotalPrescanGPU(data, partial_sums, num_elements);
float milliseconds = float(clock () - begin_time) / 1000;
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// float milliseconds = 0;
// cudaEventElapsedTime(&milliseconds, start, stop);
// std::cout << "GPU run took " << milliseconds << " ms" << std::endl;
runtimes[run] = milliseconds;
gpu_mean += milliseconds / num_runs;
}
for (size_t run = 0; run < num_runs; ++run) {
gpu_std += (gpu_mean - runtimes[run]) * (gpu_mean - runtimes[run]) / num_runs;
}
gpu_std = sqrt(gpu_std);
/*
float true_answer = 0.0;
bool correct = true;
for (size_t idx = 0; idx < num_elements - 1; ++idx) {
true_answer += idx;
if (true_answer != partial_sums[idx + 1]) {
correct = false;
std::cout << idx << " " << partial_sums[idx + 1] << " " << true_answer << std::endl;
}
}
if (!correct) {
std::cout << "incorrect" << std::endl;
}
*/
float cpu_mean = 0.0;
float cpu_std = 0.0;
for (size_t run = 0; run < num_runs; ++run) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord(start);
const clock_t begin_time = clock();
TotalPrescanCPU(data, partial_sums, num_elements);
float milliseconds = float(clock () - begin_time) / 1000;
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// float milliseconds = 0;
// cudaEventElapsedTime(&milliseconds, start, stop);
// std::cout << "GPU run took " << milliseconds << " ms" << std::endl;
runtimes[run] = milliseconds;
cpu_mean += milliseconds / num_runs;
}
for (size_t run = 0; run < num_runs; ++run) {
cpu_std += (cpu_mean - runtimes[run]) * (cpu_mean - runtimes[run]) / num_runs;
}
cpu_std = sqrt(cpu_std);
std::cout << num_elements << " " << gpu_mean << " " << gpu_std << " " << cpu_mean << " " << cpu_std << std::endl;
free(data);
free(partial_sums);
return 0;
}
|
19,853 | #include "subroutines.cuh"
//-----------------------------------------------------------------------//
//* Some common small functions and constants *//
//-----------------------------------------------------------------------//
const int constSharedMemSize = 256;
const int maxThreads = 256; // number of threads per block
#define imin(a,b) (a<b?a:b)
bool isPow2(unsigned int x) {
return ((x&(x - 1)) == 0);
}
unsigned int nextPow2(unsigned int x) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
__global__ void add_two_values_in_gpu(double *d_input, double *d_res)
{
int tid = threadIdx.x;
if (tid == 0)
d_res[0] = d_input[0] + d_res[1];
}
__global__ void access_value_in_gpu(double *d_input, double *d_res, int index)
{
int tid = threadIdx.x;
if (tid == 0)
d_res[0] = d_input[index];
}
//-----------------------------------------------------------------------//
//* Reduce from cuda book *//
//-----------------------------------------------------------------------//
__global__ void kernel_sum(double *d_data, double *d_result, int data_size)
{
__shared__ double s_data[constSharedMemSize];
long index = threadIdx.x + blockIdx.x * blockDim.x;
int id = threadIdx.x;
s_data[id] = 0.0;
if (index < data_size)
s_data[id] = d_data[index];
__syncthreads();
int step = constSharedMemSize / 2;
while (step != 0) {
if (id < step)
s_data[id] += s_data[id + step];
__syncthreads();
step /= 2;
}
if (id == 0)
d_result[blockIdx.x] = s_data[0];
}
double sum_gpu(double *d_data, int data_size){
int threadsPerBlock = constSharedMemSize;
int numBlocks = imin(65536, (data_size + constSharedMemSize - 1) / constSharedMemSize);
double *h_result;
double *d_result;
h_result = (double*)malloc(numBlocks*sizeof(double));
cudaMalloc((void**)&d_result, numBlocks*sizeof(double));
kernel_sum << <numBlocks, threadsPerBlock >> >(d_data, d_result, data_size);
cudaMemcpy(h_result, d_result, numBlocks*sizeof(double), cudaMemcpyDeviceToHost);
double sum = 0.0;
for (int j = 0; j < numBlocks; j++){
sum += h_result[j];
}
free(h_result);
cudaFree(d_result);
return sum;
}
//----------------------------------------------------------------------------//
//* Three kernels of reduce algorithms using threads matching the data size *//
//----------------------------------------------------------------------------//
__global__ void kernel_reduce_score10(double *d_input, double *d_res, int size)
{
__shared__ double s_data[constSharedMemSize];
long index = threadIdx.x + blockIdx.x*blockDim.x;
int tid = threadIdx.x;
s_data[tid] = 0.0;
if (index < size)
s_data[tid] = d_input[index];
__syncthreads();
for (int s = 2; s <= blockDim.x; s = s * 2)
{
if ((tid % s) == 0)
s_data[tid] += s_data[tid + s / 2];
__syncthreads();
}
if (tid == 0){
d_res[blockIdx.x] = s_data[0];
}
}
// replace % operator, avoid highly divergent wraps and slow operators
__global__ void kernel_reduce_score20(double *d_input, double *d_res, int size)
{
__shared__ double s_data[constSharedMemSize];
long index = threadIdx.x + blockIdx.x*blockDim.x;
int tid = threadIdx.x;
s_data[tid] = 0.0;
if (index < size)
s_data[tid] = d_input[index];
__syncthreads();
for (int s = 2; s <= blockDim.x; s = s * 2){
index = tid * s;
if (index < blockDim.x)
s_data[index] += s_data[index + s / 2];
__syncthreads();
}
if (tid == 0)
d_res[blockIdx.x] = s_data[0];
}
// deal with shared memory bank conflicts
__global__ void kernel_reduce_score30(double *d_input, double *d_res, int size)
{
__shared__ double s_data[constSharedMemSize];
long index = threadIdx.x + blockIdx.x*blockDim.x;
int tid = threadIdx.x;
s_data[tid] = 0.0;
if (index < size)
s_data[tid] = d_input[index];
__syncthreads();
for (int s = blockDim.x / 2; s >= 1; s = s >> 1){
if (tid < s)
s_data[tid] += s_data[tid + s];
__syncthreads();
}
if (tid == 0)
d_res[blockIdx.x] = s_data[tid];
}
double sum_gpu_reduce_full(void(*kernel)(double*, double*, int), double *d_input, int size, int flag)
{
//int threadsPerBlock = constSharedMemSize;
//int numBlocks = imin(65536, (size + constSharedMemSize - 1) / constSharedMemSize);
int threadsPerBlock = constSharedMemSize;
int numBlocks = imin(65536, (nextPow2(size - 1) + constSharedMemSize - 1) / constSharedMemSize);
double *d_result;
cudaMalloc((void**)&d_result, numBlocks * sizeof(double));
kernel << <numBlocks, threadsPerBlock >> >(d_input, d_result, size);
cudaDeviceSynchronize();
if (numBlocks == 1){
double h_result;
cudaMemcpy(&h_result, d_result, sizeof(double), cudaMemcpyDeviceToHost);
if (flag)
cudaFree(d_result);
return h_result;
}
return sum_gpu_reduce_full(kernel, d_result, numBlocks, 1);
}
//----------------------------------------------------------------------------//
//* Three kernels of reduce algorithms using half threads of data size *//
//----------------------------------------------------------------------------//
__device__ void warpReduce(volatile double* s_data, int tid) {
s_data[tid] += s_data[tid + 32];
s_data[tid] += s_data[tid + 16];
s_data[tid] += s_data[tid + 8];
s_data[tid] += s_data[tid + 4];
s_data[tid] += s_data[tid + 2];
s_data[tid] += s_data[tid + 1];
}
// deal with the first loop of idea threads
__global__ void kernel_reduce_half_score40(double *d_input, double *d_res, int size)
{
__shared__ double s_data[constSharedMemSize];
long index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
/*double mySum = (index < size / 2) ? d_input[index] : 0.0f;
if (index + size / 2 < size)
mySum += d_input[index + size / 2];
s_data[tid] = mySum;
__syncthreads();*/
s_data[tid] = 0.0;
if (index < size / 2)
s_data[tid] = d_input[index] + d_input[index + size / 2];
__syncthreads();
for (int s = blockDim.x / 2; s >= 1; s = s >> 1){
if (tid < s)
s_data[tid] += s_data[tid + s];
__syncthreads();
}
if (tid == 0)
d_res[blockIdx.x] = s_data[tid];
}
// deal with first loop and uproll last wrap
__global__ void kernel_reduce_half_score50(double *d_input, double *d_res, int size)
{
__shared__ volatile double s_data[constSharedMemSize];
long index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
/*
double mySum = (index < size / 2) ? d_input[index] : 0.0f;
if (index < size / 2)
mySum += d_input[index + size / 2];
s_data[tid] = mySum;
__syncthreads();*/
s_data[tid] = 0.0;
if (index < size / 2)
s_data[tid] = d_input[index] + d_input[index + size / 2];
__syncthreads();
for (int s = blockDim.x / 2; s >= 64; s = s >> 1){
if (tid < s)
s_data[tid] += s_data[tid + s];
__syncthreads();
}
if (tid < 32)
warpReduce(s_data, tid);
if (tid == 0)
d_res[blockIdx.x] = s_data[tid];
}
double sum_gpu_reduce_half(void(*kernel)(double*, double*, int), double *d_input, int size, int flag)
{
//int threadsPerBlock = constSharedMemSize;
//int numBlocks = imin(65536, (size / 2 + constSharedMemSize - 1) / constSharedMemSize);
int threadsPerBlock = constSharedMemSize;
int numBlocks = imin(65536, (nextPow2((size - 1) / 2) + constSharedMemSize - 1) / constSharedMemSize);
double *d_result;
cudaMalloc((void**)&d_result, numBlocks * sizeof(double));
kernel << <numBlocks, threadsPerBlock >> >(d_input, d_result, size);
cudaDeviceSynchronize();
if (numBlocks == 1){
double h_result;
cudaMemcpy(&h_result, d_result, sizeof(double), cudaMemcpyDeviceToHost);
if (flag)
cudaFree(d_result);
return h_result;
}
return sum_gpu_reduce_half(kernel, d_result, numBlocks, 1);
}
double sum_gpu_reduce_half_wrap(void(*kernel)(double*, double*, int), double *d_input, int size, int flag){
double value = sum_gpu_reduce_half(kernel, d_input, size, flag);
double value_gpu = 0.0;
if (size & 1) // checked if size is odd number
{
double *d_value;
cudaMalloc((void**)&d_value, sizeof(double));
access_value_in_gpu << <1, 1 >> >(d_input, d_value, size - 1);
cudaMemcpy(&value_gpu, d_value, sizeof(double), cudaMemcpyDeviceToHost);
}
return value + value_gpu;
}
//----------------------------------------------------------------------------//
//* Complete unroll last warp, using template *//
//----------------------------------------------------------------------------//
template <unsigned int blockSize>
__device__ void warpReduce2(volatile double *sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template <unsigned int blockSize> //, bool nIsPow2
__global__ void reduce_kernel(double *d_input, double *d_res, int size) {
extern __shared__ volatile double sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * (blockSize * 2) + tid;
int gridSize = blockSize * 2 * gridDim.x;
double mySum = 0.0;
while (i < size) {
mySum += d_input[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (i + blockSize < size)
mySum += d_input[i + blockSize];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
/*sdata[tid] = 0;
while (i < size) {
sdata[tid] += d_input[i] + d_input[i + blockSize];
i += gridSize;
}
__syncthreads();*/
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce2<blockSize>(sdata, tid);
if (tid == 0) d_res[blockIdx.x] = sdata[0];
}
double sum_reduce_recursive_cuda(double *d_data, int size, int flag){
int threads = (size < maxThreads * 2) ? nextPow2((size + 1) / 2) : maxThreads;
int blocks = (size + (threads * 2 - 1)) / (threads * 2);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(double) : threads * sizeof(double);
double *d_result;
cudaMalloc((void**)&d_result, blocks*sizeof(double));
switch (threads)
{
case 512:
reduce_kernel<512> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 256:
reduce_kernel<256> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 128:
reduce_kernel<128> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 64:
reduce_kernel< 64> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 32:
reduce_kernel< 32> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 16:
reduce_kernel< 16> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 8:
reduce_kernel< 8> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 4:
reduce_kernel< 4> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 2:
reduce_kernel< 2> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
case 1:
reduce_kernel< 1> << < blocks, threads, smemSize >> >(d_data, d_result, size); break;
}
cudaDeviceSynchronize();
if (blocks == 1){
double h_result;
cudaMemcpy(&h_result, d_result, sizeof(double), cudaMemcpyDeviceToHost);
if (flag){
cudaFree(d_result);
}
return h_result;
}
return sum_reduce_recursive_cuda(d_result, blocks, 1);
}
|
19,854 | #ifndef HashTable_Tests_CU
#define HashTable_Tests_CU
#include "../SRC/HashTable.cu"
struct Example_t
{
// member declarations.
char name[20];
int id;
int age;
int GetHash()
{
return id;
}
bool equal(Example_t * Node0)
{
std::cout << "Compare:" << id << "-" << Node0->id <<"\n";
return (id == Node0->id);
}
};
int main() {
Example_t* Example01 = new Example_t();
Example01->id = 01;
Example01->age = 01;
Example_t* Example02 = new Example_t();
Example02->id = 01;
Example02->age = 02;
//std::list<Example_t*> *Table = new std::list<Example_t*>[100];
//delete [] Table;
HashTable_t<Example_t>* HashTable = new HashTable_t<Example_t>(1);
/*
HashTable->Compare = [](Example_t Node0,Example_t Node1)->bool {
return true;
};
*/
HashTable->AddGetReference(Example01);
HashTable->AddGetReference(Example02);
HashTable->displayHash();
//delete Example01;
//delete Example02;
delete HashTable;
return 0;
}
#endif //HashTable_Tests_CU
|
19,855 | #include "includes.h"
__global__ void sumGrad(float* output, float* input1, float* input2, float* input3, float* input4, const int numElem)
{
size_t pos = blockDim.x * blockIdx.x + threadIdx.x;
size_t size = blockDim.x * gridDim.x;
for(int i = numElem * pos / size; i < numElem * (pos+1) / size; i++){
output[i] = input1[i] + input2[i] + input3[i] + input4[i];
}
} |
19,856 | #include <iostream>
#define N (256 * 256)
#define FULL_DATA_SIZE (N*20)
using namespace std;
__global__ void kernel(int *a, int *b, int *c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N)
{
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main(int argc, char **argv)
{
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cout << "which Device = " << whichDevice << endl;
cudaGetDeviceProperties(&prop, whichDevice);
if(!prop.deviceOverlap)
{
cout << "Device will not handle overlaps" << endl;
return 0;
}
// create cuda event to calculate the time
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// initialize the streams
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
// malloc data
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;
int *dev_a1, *dev_b1, *dev_c1;
cudaMalloc((void **)&dev_a0, N * sizeof(int));
cudaMalloc((void **)&dev_b0, N * sizeof(int));
cudaMalloc((void **)&dev_c0, N * sizeof(int));
cudaMalloc((void **)&dev_a1, N * sizeof(int));
cudaMalloc((void **)&dev_b1, N * sizeof(int));
cudaMalloc((void **)&dev_c1, N * sizeof(int));
// allocate page-locked memory , used to stream
cudaHostAlloc((void **)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
for(int i = 0; i < FULL_DATA_SIZE; i++)
{
host_a[i] = rand();
host_b[i] = rand();
}
for(int i = 0; i < FULL_DATA_SIZE; i+=N*2)
{
cudaMemcpyAsync(dev_a1,host_a+i + N, N*sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b1,host_b+i + N, N*sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_a0,host_a+i, N*sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(dev_b0,host_b+i, N*sizeof(int), cudaMemcpyHostToDevice, stream0);
kernel<<<N/256, 256, 0, stream1>>>(dev_a1, dev_b1, dev_c1);
kernel<<<N/256, 256, 0, stream0>>>(dev_a0, dev_b0, dev_c0);
cudaMemcpyAsync(host_c+i + N, dev_c1, N*sizeof(int), cudaMemcpyDeviceToHost, stream1);
cudaMemcpyAsync(host_c+i, dev_c0, N*sizeof(int), cudaMemcpyDeviceToHost, stream0);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "total time : " << elapsedTime << " ms" << endl;
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_c);
cudaFree(dev_a0);
cudaFree(dev_b0);
cudaFree(dev_c0);
cudaFree(dev_a1);
cudaFree(dev_b1);
cudaFree(dev_c1);
// destroy stream
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
return 0;
}
/* **** with no gpu scheduling **** */
/*
int main(int argc, char **argv)
{
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cout << "which Device = " << whichDevice << endl;
cudaGetDeviceProperties(&prop, whichDevice);
if(!prop.deviceOverlap)
{
cout << "Device will not handle overlaps" << endl;
return 0;
}
// create cuda event to calculate the time
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// initialize the streams
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
// malloc data
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;
int *dev_a1, *dev_b1, *dev_c1;
cudaMalloc((void **)&dev_a0, N * sizeof(int));
cudaMalloc((void **)&dev_b0, N * sizeof(int));
cudaMalloc((void **)&dev_c0, N * sizeof(int));
cudaMalloc((void **)&dev_a1, N * sizeof(int));
cudaMalloc((void **)&dev_b1, N * sizeof(int));
cudaMalloc((void **)&dev_c1, N * sizeof(int));
// allocate page-locked memory , used to stream
cudaHostAlloc((void **)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
for(int i = 0; i < FULL_DATA_SIZE; i++)
{
host_a[i] = rand();
host_b[i] = rand();
}
for(int i = 0; i < FULL_DATA_SIZE; i+=N*2)
{
cudaMemcpyAsync(dev_a0,host_a+i, N*sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(dev_b0,host_b+i, N*sizeof(int), cudaMemcpyHostToDevice, stream0);
kernel<<<N/256, 256, 0, stream0>>>(dev_a0, dev_b0, dev_c0);
cudaMemcpyAsync(host_c+i, dev_c0, N*sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(dev_a1,host_a+i + N, N*sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b1,host_b+i + N, N*sizeof(int), cudaMemcpyHostToDevice, stream1);
kernel<<<N/256, 256, 0, stream1>>>(dev_a1, dev_b1, dev_c1);
cudaMemcpyAsync(host_c+i + N, dev_c1, N*sizeof(int), cudaMemcpyDeviceToHost, stream1);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "total time : " << elapsedTime << " ms" << endl;
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_c);
cudaFree(dev_a0);
cudaFree(dev_b0);
cudaFree(dev_c0);
cudaFree(dev_a1);
cudaFree(dev_b1);
cudaFree(dev_c1);
// destroy stream
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
return 0;
}
*/
/* ******* only one stream ********* */
/*
int main(int argc, char **argv)
{
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cout << "which Device = " << whichDevice << endl;
cudaGetDeviceProperties(&prop, whichDevice);
if(!prop.deviceOverlap)
{
cout << "Device will not handle overlaps" << endl;
return 0;
}
// create cuda event to calculate the time
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// initialize the stream
cudaStream_t stream;
cudaStreamCreate(&stream);
// malloc data
int *host_a, *host_b, *host_c;
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)&dev_a, N * sizeof(int));
cudaMalloc((void **)&dev_b, N * sizeof(int));
cudaMalloc((void **)&dev_c, N * sizeof(int));
// allocate page-locked memory , used to stream
cudaHostAlloc((void **)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
for(int i = 0; i < FULL_DATA_SIZE; i++)
{
host_a[i] = rand();
host_b[i] = rand();
}
// loop over full data, in bite-size chunks
for(int i = 0; i < FULL_DATA_SIZE; i += N)
{
cudaMemcpyAsync(dev_a, host_a+i,N*sizeof(int), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dev_b, host_b+i,N*sizeof(int), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dev_c, host_c+i,N*sizeof(int), cudaMemcpyHostToDevice, stream);
kernel<<<N/256, 256, 0, stream>>>(dev_a, dev_b, dev_c);
cudaMemcpyAsync(host_c+i, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "total time : " << elapsedTime << " ms" << endl;
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// destroy stream
cudaStreamDestroy(stream);
return 0;
}
*/
|
19,857 | /**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 1024
#define NJ 1024
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init(DATA_TYPE* A)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
A[i*NJ + j] = (float)rand()/RAND_MAX;
}
}
}
__global__ void Convolution2D_kernel(DATA_TYPE *A, DATA_TYPE *B)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0))
{
B[i * NJ + j] = c11 * A[(i - 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] + c31 * A[(i - 1) * NJ + (j + 1)]
+ c12 * A[(i + 0) * NJ + (j - 1)] + c22 * A[(i + 0) * NJ + (j + 0)] + c32 * A[(i + 0) * NJ + (j + 1)]
+ c13 * A[(i + 1) * NJ + (j - 1)] + c23 * A[(i + 1) * NJ + (j + 0)] + c33 * A[(i + 1) * NJ + (j + 1)];
}
}
void convolution2DCuda(DATA_TYPE* A, DATA_TYPE* B)
{
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)NI) / ((float)block.x) ), (size_t)ceil( ((float)NJ) / ((float)block.y)) );
Convolution2D_kernel<<<grid, block>>>(A, B);
// Wait for GPU to finish before accessing on host
// mock synchronization of memory specific to stream
cudaDeviceSynchronize();
}
int main(int argc, char *argv[])
{
DATA_TYPE* A;
DATA_TYPE* B;
cudaMallocManaged( &A, NI*NJ*sizeof(DATA_TYPE) );
cudaMallocManaged( &B, NI*NJ*sizeof(DATA_TYPE) );
//initialize the arrays
init(A);
convolution2DCuda(A, B);
FILE *fp;
fp = fopen("result_2DConv.txt","a+");
for(int i = 0; i < NI*NJ; i+= 10000) {
fprintf(fp, "%lf\n", B[i]);
}
fclose(fp);
cudaFree(A);
cudaFree(B);
return 0;
}
|
19,858 | // Este codigo obtiene la derivada de la funcion u(x) = x*x
// utilizando diferencias finitas, donde el error es proporcional
// a dx
#include <stdio.h>
void derivCPU(float* u_h, float* du_h, float dx, int n)
{
// notar que este loop empieza en 1, porque?
for (int i=1; i < n; i++) du_h[i] = (u_h[i] - u_h[i-1])/dx;
}
int main(int argc, char**argv)
{
unsigned int n;
if(argc == 1) {
n = 41;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Parametros no validos!"
"\n Uso: ./derivCPU # Vector of longitud 10,000"
"\n Uso: ./derivCPU <m> # Vector of longitud m"
"\n");
exit(0);
}
float L = 1.;
float dx = L/(n-1);
// x_h, u_h, du_h
int size = n*sizeof(float);
// Particion del intervalo
float* x_h = (float*) malloc( size );
for (unsigned int i=0; i < n; i++) { x_h[i] = i*dx; }
float* u_h = (float*) malloc( size );
for (unsigned int i=0; i < n; i++) { u_h[i] = x_h[i]*x_h[i]; }
float* du_h = (float*) malloc( size );
for (unsigned int i=0; i < n; i++) { du_h[i] = u_h[i]; }
derivCPU(u_h, du_h, dx, n);
// for(int i=0; i < n; i++) {
// float diff = dx - (2*x_h[i] - du_h[i]);
// printf("%f %f %f %f\n", du_h[i], 2*x_h[i], diff, diff/dx);
// }
const float toleranciaRelativa = 1e-4;
for(int i=1; i < n; i++) {
float deriv = 2*x_h[i];
float relativeError = dx - (deriv - du_h[i]);
if (relativeError > toleranciaRelativa
|| relativeError < -toleranciaRelativa) {
printf("PRUEBA FALLIDA\n\n");
exit(0);
}
}
printf("PRUEBA SUPERADA\n\n");
free(u_h);
free(du_h);
free(x_h);
} |
19,859 | #include <cmath>
#include <cstdio>
#include <iostream>
#include "canny.cuh"
using namespace std;
// __global__ functions can't be inlined actually
__forceinline__ __global__ void generateGaussian(float *filter, float sigma) {
int x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int y_idx = threadIdx.y + blockDim.y * blockIdx.y;
int sz = blockDim.x; // always odd
__shared__ float arr[2]; // Can't use "volatile" to prevent shmem data from being directly loaded onto registers
// float deno = arr[0];
// float sum = arr[1];
if (threadIdx.x == 0 && threadIdx.y == 0) {
arr[1] = 0;
arr[0] = 2 * sigma * sigma; // memory transaction takes place immediately since volatile
}
__syncthreads(); // all should get the sum and deno values populated
filter[y_idx*sz + x_idx] = 1.0/( exp( ( (y_idx-sz/2) * (y_idx-sz/2) + (x_idx-sz/2)*(x_idx-sz/2) )/arr[0] ) * (arr[0] * M_PI) );
/* Effectively serializing the next part of code. Hurts parallelism massively */
// Protection against all threads trying to modify this variable
atomicAdd(&arr[1], filter[y_idx*sz + x_idx]); // memory transaction takes place immediately since volatile
__syncthreads(); // wiat for all threads to have updated the "sum" variable
filter[y_idx*sz + x_idx] /= arr[1];
}
// template <int sig>
// __global__ void generateGaussian(float *filter) {
// float sigma = sig/100;
// int x_idx = threadIdx.x + blockDim.x * blockIdx.x;
// int y_idx = threadIdx.y + blockDim.y * blockIdx.y;
// int sz = blockDim.x; // always odd
// __shared__ float arr[2]; // Can't use "volatile" to prevent shmem data from being directly loaded onto registers
// // float deno = arr[0];
// // float sum = arr[1];
// if (threadIdx.x == 0 && threadIdx.y == 0) {
// arr[1] = 0;
// arr[0] = 2 * sigma * sigma; // memory transaction takes place immediately since volatile
// }
// __syncthreads(); // all should get the sum and deno values populated
// filter[y_idx*sz + x_idx] = 1.0/( exp( ( (y_idx-sz/2) * (y_idx-sz/2) + (x_idx-sz/2)*(x_idx-sz/2) )/arr[0] ) * (arr[0] * M_PI) );
// /* Effectively serializing the next part of code. Hurts parallelism massively */
// // Protection against all threads trying to modify this variable
// atomicAdd(&arr[1], filter[y_idx*sz + x_idx]); // memory transaction takes place immediately since volatile
// __syncthreads(); // wiat for all threads to have updated the "sum" variable
// filter[y_idx*sz + x_idx] /= arr[1];
// }
__global__ void NonMaxSuppression(float *grad, float* magn, float* supp, size_t r, size_t c) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
int bdx = blockDim.x, bdy = blockDim.y;
int idx = i*c+j; // code motion
int tidx = threadIdx.x, tidy = threadIdx.y;
float avg_intensity = 0.0;
extern __shared__ float img[]; // Can't use "volatile" to prevent shmem data from being directly loaded onto registers
// load image elements in-place
if (j < c && i < r)
img[(tidy+1)*(bdx+2) + tidx+1] = magn[idx];
else
img[(tidy+1)*(bdx+2) + tidx+1] = avg_intensity;
if (tidx == 0 && tidy == 0) { // leftmost top corner
if (j >= 1 && i >= 1)
img[tidy*(bdx+2) + tidx] = magn[idx-c-1];
else
img[tidy*(bdx+2) + tidx] = avg_intensity;
}
else if (tidx == 0 && tidy == bdy - 1) { // leftmost bottom corner
if (j >= 1 && i < r-1)
img[(tidy+2)*(bdx+2) + tidx] = magn[idx+c-1];
else
img[(tidy+2)*(bdx+2) + tidx] = avg_intensity;
}
else if (tidx == bdx - 1 && tidy == 0) { // rightmost top corner
if (j < c -1 && i >= 1)
img[tidy*(bdx+2) + tidx+2] = magn[idx-c+1];
else
img[tidy*(bdx+2) + tidx+2] = avg_intensity;
}
else if (tidx == bdx - 1 && tidy == bdy -1) { // rightmost bottom corner
if (j < c -1 && i < r-1)
img[(tidy+2)*(bdx+2) + tidx+2] = magn[idx+c+1];
else
img[(tidy+2)*(bdx+2) + tidx+2] = avg_intensity;
}
if (tidx == 0) { // leftmost col
if (j >= 1)
img[(tidy+1)*(bdx+2) + tidx] = magn[idx-1];
else
img[(tidy+1)*(bdx+2) + tidx] = avg_intensity;
}
else if (tidx == bdx - 1) { // rightmost col
if (j < c-1)
img[(tidy+1)*(bdx+2) + tidx+2] = magn[idx+1];
else
img[(tidy+1)*(bdx+2) + tidx+2] = avg_intensity;
}
if (tidy == 0) { // top row
if (i >= 1)
img[tidy*(bdx+2) + tidx+1] = magn[idx-c];
else
img[tidy*(bdx+2) + tidx+1] = avg_intensity;
}
else if (tidy == bdy - 1) { // bottom row
if (i < r-1)
img[(tidy+2)*(bdx+2) + tidx+1] = magn[idx+c];
else
img[(tidy+2)*(bdx+2) + tidx+1] = avg_intensity;
}
__syncthreads();
// check for out of bounds
if (i > 0 && j > 0 && j < c-1 && i < r-1) {
float angle = grad[idx];
int idx1 = (tidy+1)*(bdx+2) + tidx+1;
if ((-22.5 < angle && angle <= 22.5) || (157.5 < angle && angle <= -157.5)) {
// printf("%f %f %f\n", img[idx1], img[idx1-1], img[idx1+1]);
if (img[idx1] < img[idx1+1] || img[idx1] < img[idx1-1])
supp[idx] = 0.0;
}
if ((-112.5 < angle && angle <= -67.5) || (67.5 < angle && angle <= 112.5)) {
// printf("%f %f %f\n", img[idx1], img[idx1-c], img[idx1+c]);
if (img[idx1] < img[idx1+(bdx+2)] || img[idx1] < img[idx1-(bdx+2)])
supp[idx] = 0.0;
}
if ((-67.5 < angle && angle <= -22.5) || (112.5 < angle && angle <= 157.5)) {
// printf("%f %f %f\n", img[idx1], img[idx1-c+1], img[idx1+c-1]);
if (img[idx1] < img[idx1-(bdx+2)+1] || img[idx1] < img[idx1+(bdx+2)-1])
supp[idx] = 0.0;
}
if ((-157.5 < angle && angle <= -112.5) || (22.5 < angle && angle <= 67.5)) {
// printf("%f %f %f\n", img[idx1], img[idx1+c+1], img[idx1-c-1]);
if (img[idx1] < img[idx1+(bdx+2)+1] || img[idx1] < img[idx1-(bdx+2)-1])
supp[idx] = 0.0;
}
}
}
__global__ void mag_grad(float *Gx, float *Gy, float *magn, float *grad, size_t r, size_t c) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
int idx = i*c+j;
// check for out of bounds
if (j < c && i < r) {
magn[idx] = sqrt(Gx[idx] * Gx[idx] + Gy[idx] * Gy[idx]);
// if (Gx[idx] == 0)
// grad[idx] = 90;
// else
// grad[idx] = atan2(Gy[idx], Gx[idx]) * 180.0/M_PI;
grad[idx] = (Gx[idx] == 0) * 90.0 + (Gx[idx] != 0) * (atan2(Gy[idx], Gx[idx]) * 180.0/M_PI); // Avoids thread divergence
}
}
__device__ void lock(volatile int *mutex) { // spinlock
while (atomicCAS((int*)mutex, 0, 1) != 0);
// other threads in the warp keep spinning, so thread in critical section can't be scheduled to release mutesx. Warp-level semantics
}
__device__ void unlock(volatile int *mutex) {
atomicExch((int*)mutex, 0);
}
__global__ void q_init(float* supp, float high, float *q, int *back, size_t r, size_t c, int* mutex) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
int idx = i*c+j;
__shared__ int arr[1];
if (i == 0 && j == 0) {
arr[0] = *back;
}
__syncthreads();
if (i < r && j < c && supp[idx] > high) {
supp[idx] = 1.0;
lock(mutex);
// push {i,j} into queue if its value > high
q[arr[0]] = i;
q[arr[0] + 1] = j;
printf("Value of back is %d from idx %d %d\n", arr[0], i, j);
arr[0] += 2;
unlock(mutex);
}
}
__global__ void hysteresis(float* supp, size_t r, size_t c, float low, float high, int* ctr) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
int idx = i*c+j;
volatile __shared__ int arr[1];
if (threadIdx.x == 0 && threadIdx.y == 0)
arr[0] = *ctr;
__syncthreads();
if (i < r && j < c) {
if (supp[idx] > high) {
supp[idx] = 1.0;
// unroll loops
if (i+1 < r && j+1 < c && supp[(i+1)*c+(j+1)] > low && supp[(i+1)*c+(j+1)] != 1.0) { // southeast
supp[(i+1)*c+(j+1)] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
if (j+1 < c && supp[i*c+(j+1)] > low && supp[i*c+(j+1)] != 1.0) { // east
supp[i*c+(j+1)] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
if (i+1 < r && supp[(i+1)*c+j] > low && supp[(i+1)*c+j] != 1.0) { // south
supp[(i+1)*c+j] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
if (i-1 >= 0 && supp[(i-1)*c+j] > low && supp[(i-1)*c+j] != 1.0) { // north
supp[(i-1)*c+j] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
if (j-1 >= 0 && supp[i*c+(j-1)] > low && supp[i*c+(j-1)] != 1.0) { // west
supp[i*c+(j-1)] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
if (i+1 < r && j-1 >= 0 && supp[(i+1)*c+(j-1)] > low && supp[(i+1)*c+(j-1)] != 1.0) { // southwest
supp[(i+1)*c+(j-1)] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
if (i-1 >= 0 && j+1 < c && supp[(i-1)*c+(j+1)] > low && supp[(i-1)*c+(j+1)] != 1.0) { // northeast
supp[(i-1)*c+(j+1)] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
if (i-1 >= 0 && j-1 >= 0 && supp[(i-1)*c+(j-1)] > low && supp[(i-1)*c+(j-1)] != 1.0) { // northwest
supp[(i-1)*c+(j-1)] = 1.0;
atomicAdd((int*)&arr[0], 1);
}
}
}
__syncthreads(); // need all other threads in warp to increment arr[0] to get correct value of *ctr
if (threadIdx.x == 0 && threadIdx.y == 0)
*ctr = arr[0];
}
// template <int l, int h>
// __global__ void hysteresis(float* supp, size_t r, size_t c, int* ctr) {
// float low = l/100, high = h/100;
// int j = threadIdx.x + blockDim.x * blockIdx.x;
// int i = threadIdx.y + blockDim.y * blockIdx.y;
// int idx = i*c+j;
// volatile __shared__ int arr[1];
// if (threadIdx.x == 0 && threadIdx.y == 0)
// arr[0] = *ctr;
// __syncthreads();
// if (i < r && j < c) {
// if (supp[idx] > high) {
// supp[idx] = 1.0;
// // unroll loops
// if (i+1 < r && j+1 < c && supp[(i+1)*c+(j+1)] > low && supp[(i+1)*c+(j+1)] != 1.0) { // southeast
// supp[(i+1)*c+(j+1)] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// if (j+1 < c && supp[i*c+(j+1)] > low && supp[i*c+(j+1)] != 1.0) { // east
// supp[i*c+(j+1)] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// if (i+1 < r && supp[(i+1)*c+j] > low && supp[(i+1)*c+j] != 1.0) { // south
// supp[(i+1)*c+j] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// if (i-1 >= 0 && supp[(i-1)*c+j] > low && supp[(i-1)*c+j] != 1.0) { // north
// supp[(i-1)*c+j] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// if (j-1 >= 0 && supp[i*c+(j-1)] > low && supp[i*c+(j-1)] != 1.0) { // west
// supp[i*c+(j-1)] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// if (i+1 < r && j-1 >= 0 && supp[(i+1)*c+(j-1)] > low && supp[(i+1)*c+(j-1)] != 1.0) { // southwest
// supp[(i+1)*c+(j-1)] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// if (i-1 >= 0 && j+1 < c && supp[(i-1)*c+(j+1)] > low && supp[(i-1)*c+(j+1)] != 1.0) { // northeast
// supp[(i-1)*c+(j+1)] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// if (i-1 >= 0 && j-1 >= 0 && supp[(i-1)*c+(j-1)] > low && supp[(i-1)*c+(j-1)] != 1.0) { // northwest
// supp[(i-1)*c+(j-1)] = 1.0;
// atomicAdd((int*)&arr[0], 1);
// }
// }
// }
// __syncthreads(); // need all other threads in warp to increment arr[0] to get correct value of *ctr
// if (threadIdx.x == 0 && threadIdx.y == 0)
// *ctr = arr[0];
// }
__global__ void weak_disconnected_edge_removal(float* supp, size_t r, size_t c) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
int idx = i*c+j;
if (j < c && i < r)
supp[idx] = (supp[idx] != 1.0) * 0.0 + (supp[idx] == 1.0) * supp[idx];
}
|
19,860 | #include <cuda.h>
#include <stdio.h>
__global__
void vecAddKernel(float *A, float *B, float *C, int n) {
int i = threadIdx.x+blockDim.x*blockIdx.x;
printf("i: %d\n", i);
if(i<n)
C[i] = A[i] + B[i];
}
void vecAdd(float *A, float *B, float *C, int n) {
int s = n*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void**)&d_A, s);
cudaMalloc((void**)&d_B, s);
cudaMalloc((void**)&d_C, s);
cudaMemcpy(d_A, A, s, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, s, cudaMemcpyHostToDevice);
vecAddKernel<<<ceil(n/2.0), 2>>>(d_A, d_B, d_C, n);
cudaMemcpy(C, d_C, s, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
float A[]={1,2,3,4,5};
float B[]={2,3,4,5,6};
float C[5];
vecAdd(A,B,C,5);
for(int i=0;i<5;i++) {
printf("C[%d]=%f\n", i, C[i]);
}
}
|
19,861 | /*
Vector addition
*/
#include <stdio.h>
#define N 128
__global__ void add( int *a, int *b, int *c ) {
int tid = threadIdx.x;
if(tid > N-1) return;
c[tid] = a[tid] + b[tid];
}
int main() {
int host_a[N], host_b[N], host_c[N];
int *dev_a, *dev_b, *dev_c;
for (int i=0; i<N; i++) { host_a[i] = i * i; host_b[i] = - i; }
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
cudaMemcpy( dev_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice );
add<<<1,N>>>( dev_a, dev_b, dev_c );
cudaMemcpy( host_c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ) ;
for (int i=0; i<N; i++) { printf( "%d + %d = %d\n", host_a[i], host_b[i], host_c[i] ); }
cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c );
return 0;
} |
19,862 | #include <stdlib.h>
#include <string.h>
#include <time.h>
void sumArrayOnHost(float* A, float* B, float* C, const int N){
for(int idx = 0; idx<N; idx++){
C[idx] = A[idx] + B[idx];
}
}
void initialData(float* ip, int size){
time_t t;
srand((unsigned int)time(&t));
for(int i = 0; i<size; i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
int main(int argc, char** argv){
int nElem = 1024;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
sumArrayOnHost(h_A, h_B, h_C, nElem);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
19,863 | /*CWM HPC Part B Assignment: Monte Carlo Method for calculating pi value on GPU
2021/5/58 Jianhao Yuan */
// reference: https://blog.csdn.net/ichocolatekapa/article/details/18960223
//import libs
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//curand for random points generate
#include <curand.h>
#include <curand_kernel.h>
//Define constants (use 256 threads, and max trial times: 2000000)
#define MAX_THREAD 256
#define MAX_COUNT 2000000
//Kernel
__global__ void get_pi(float *res,int *count){
//declare variables:
//initial # points in 1/4 circle; total number of random point generated: n; loop index:i
int a=0, index_x = threadIdx.x, n = *count,i;
// declare coordinate variables x,y
float x, y;
// result for pi record
res += index_x;
//use curand to get random points
curandState s;
curand_init(42, index_x, 0, &s);
for (i = 1; i <= n; i++) {
//random generate in 1*1 square
x = curand_uniform(&s);
y = curand_uniform(&s);
//count in if point locate in 1/4 circle
if (pow(x, 2) + pow(y, 2) <= 1) {
a++;
}
//get pi value
*res = 4 * (float)a / (float)n;
//synchronzie threads
__syncthreads();
}
}
int main(void){
// declare variables: host pi value, device pi value, actual pi value, error between
float *h_pi, *d_pi, pi=0, err;
//count(both host&device);loop index needed
int maxThread = MAX_THREAD, *h_count, *d_count, i;
//allocate memory for host
h_pi = (float *)malloc(sizeof(float) * maxThread);
h_count = (int *)malloc(sizeof(int) * 1);
//allocate memory for device
cudaMalloc((void **)&d_pi, sizeof(float) * maxThread);
cudaMalloc((void **)&d_count, sizeof(int) * 1);
//initialize count number on host
h_count[0] = MAX_COUNT;
//get count value to device
cudaMemcpy(d_count, h_count, sizeof(int) * 1, cudaMemcpyHostToDevice);
//execute kernel
get_pi<<<1, maxThread>>> (d_pi, d_count);
//get pi value back to host
cudaMemcpy(h_pi, d_pi, sizeof(float) * maxThread,cudaMemcpyDeviceToHost);
//average over 512 threads
for (i = 0; i < maxThread; i++) pi += h_pi[i];
pi = pi / maxThread;
//Find error
err = pi - (float)M_PI;
if (err < 0) {
err = -err;
}
//print output
printf("Points: %d, Generated π: %f, Error: %.0fe-6\n",h_count[0] * maxThread, pi, err * 1000000);
//free memory on host
free(h_pi);
free(h_count);
//free memory on device
cudaFree(d_pi);
cudaFree(d_count);
//end
return 0;
}
|
19,864 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void aux_fields(double *V, double *K, double gdt, double dt, double* Ax, double *Ay, double* Az, double *px, double *py, double *pz, double* pAx, double* pAy, double* pAz, double2* GV, double2* EV, double2* GK, double2* EK, double2* GpAx, double2* GpAy, double2* GpAz, double2* EpAx, double2* EpAy, double2* EpAz){
int gid = getGid3d3d();
int xid = blockDim.x*blockIdx.x + threadIdx.x;
int yid = blockDim.y*blockIdx.y + threadIdx.y;
int zid = blockDim.z*blockIdx.z + threadIdx.z;
GV[gid].x = exp(-V[gid]*(gdt/(2*HBAR)));
GK[gid].x = exp(-K[gid]*(gdt/HBAR));
GV[gid].y = 0.0;
GK[gid].y = 0.0;
// Ax and Ay will be calculated here but are used only for
// debugging. They may be needed later for magnetic field calc
pAx[gid] = Ax[gid] * px[xid];
pAy[gid] = Ay[gid] * py[yid];
pAz[gid] = Az[gid] * pz[zid];
GpAx[gid].x = exp(-pAx[gid]*gdt);
GpAx[gid].y = 0;
GpAy[gid].x = exp(-pAy[gid]*gdt);
GpAy[gid].y = 0;
GpAz[gid].x = exp(-pAz[gid]*gdt);
GpAz[gid].y = 0;
EV[gid].x=cos(-V[gid]*(dt/(2*HBAR)));
EV[gid].y=sin(-V[gid]*(dt/(2*HBAR)));
EK[gid].x=cos(-K[gid]*(dt/HBAR));
EK[gid].y=sin(-K[gid]*(dt/HBAR));
EpAz[gid].x=cos(-pAz[gid]*dt);
EpAz[gid].y=sin(-pAz[gid]*dt);
EpAy[gid].x=cos(-pAy[gid]*dt);
EpAy[gid].y=sin(-pAy[gid]*dt);
EpAx[gid].x=cos(-pAx[gid]*dt);
EpAx[gid].y=sin(-pAx[gid]*dt);
} |
19,865 | #include "imageprocessing.cuh"
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <math.h>
#include <sstream>
// TODO: read about the CUDA programming model: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#programming-model
// If everything is setup correctly, this file is compiled by the CUDA/C++ compiler (that is different from the C++ compiler).
// The CUDA/C++ compiler understands certain things that your C++ compiler doesn't understand - like '__global__', 'threadIdx', and function calls with triple-angle brackets, e.g., testArray<<<...>>>();
// do not use this method for anything else than verifying cuda compiled, linked and executed
__global__ void testArray(float* dst, float value) {
unsigned int index = threadIdx.x;
dst[index] = value;
}
void testCudaCall() {
// quick and dirty test of CUDA setup
const unsigned int N = 1024;
float* device_array;
cudaMalloc(&device_array, N * sizeof(float));
testArray << <1, N >> > (device_array, -0.5f);
float x[N];
cudaMemcpy(x, device_array, N * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "quick and dirty test of CUDA setup: " << x[0] << " " << x[1] << " " << x[1023] << std::endl;
cudaFree(device_array);
}
// TODO: implement the image processing operations using CUDA kernels
|
19,866 | //pass
//--gridDim=[32768,1,1] --blockDim=[512,1,1]
__global__ void SimpleKernel(float *src, float *dst)
{
// Just a dummy kernel, doing enough for us to verify that everything
// worked
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] * 2.0f;
}
|
19,867 | #include <stdio.h>
#include <stdlib.h>
extern int N;
__global__ void gpuReduction(int *g_num,
int *g_sum,
int TotalNum) {
// TODO: implement kernel code here
}
double reduction_cuda(int *array, int N) {
// TODO: implement host code here
return 0;
}
|
19,868 | #include<stdio.h>
#define N 100
#include <math.h>
__global__ void vector_add(float *out, float *a, float *b, int n) {
// get global thread id
int id = blockIdx.x *blockDim.x * blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
// make sure we dont go out of thread index
if( id < N ){
out[id] = a[id] + b[id];
}
}
int main(){
float *a, *b, *out;
float *d_a,*d_b,*d_c;
// Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
//Allocate device memory for a,b,c
cudaMalloc((void**)&d_a,sizeof(float)*N );
cudaMalloc((void**)&d_b,sizeof(float)*N );
cudaMalloc((void**)&d_c,sizeof(float)*N );
// Initialize array
for(int i = 0; i < N; i++){
a[i] = sin(i)*sin(i)+cos(i); b[i] = cos(i)*cos(i)+sin(i);
}
// transfer data from host to device
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// cudaMemcpy(d_c, out, sizeof(float) * N, cudaMemcpyHostToDevice);
// define 2d block. Note this is 1 grid but 2d block . use the cheat cheat
// to get how to compute the global id
dim3 threads(2,25);
// Main function
vector_add<<<2,threads>>>(d_c,d_a, d_b, N);
//copy result back to host
cudaMemcpy(out, d_c,sizeof(float) * N , cudaMemcpyDeviceToHost);
// print results
int i;
for (i=0;i <N;i++) {
printf("%lf,",out[i]); }
// synchronize execution
//cudaDeviceSynchronize();
//clean up after executing kernel
cudaFree(d_a);cudaFree(d_b);cudaFree(d_c);
free(a);free(b);free(out);
}
|
19,869 | #include "includes.h"
__global__ void findID(double *a, int n){
// First we need to find our global threadID
int tPosX = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we are not out of range
if (tPosX < n){
a[tPosX] = tPosX;
}
} |
19,870 | #include <stdio.h>
#include <assert.h>
#include <math.h>
#include "CPUconvLayer.cuh"
__host__ void dotProduct(float* A, float* B, float* C, int n)
{
for (int i = 0; i < n; i++) {
*C += A[i] * B[i];
}
}
/*
input_maps: (N, N, N, F_in, S) input feature maps
weights: (K, K, K, F_out, F_in) filter weights
output_maps: (M, M, M, F_out, S) output feature maps
N: input feature map dimension (same in x, y, z direction)
M: output map dimension
F_in: number of channels in input layer
F_out: number of channels in output layer
K: filter size (same in x, y, z direction)
S: batch size
access input_maps[i, j, k, f_in, s] by input_maps[(N*N*N*F_in)*s + (N*N*N)*f_in + (N*N)*k + (N)*j + i ]
access weights[i, j, k, f_out, f_in] by weights[(K*K*K*F_out)*f_in + (K*K*K)*f_out + (K*K)*k + (K)*j + i ]
access output_maps[i, j, k, f_out, s] by input_maps[(M*M*M*F_out)*s + (N*N*N)*f_out + (N*N)*k + (N)*j + i ]
*/
void convLayerCPU(float* input_maps, float* weights, float* output_maps, int N, int M, int F_in, int F_out, int K, int S)
{
int size = K*K*K * sizeof(float);
float *filter = (float *) malloc(size);
float *sig = (float *) malloc(size);
// iterate through samples
for (int s = 0; s < S; s++) {
//iterate through input feature maps
for (int f_out = 0; f_out < F_out; f_out++) {
//iterate through output feature maps
for (int f_in = 0; f_in < F_in; f_in++) {
//set filter for this f_out - f_in combo (filter will be same for all i, j, k in this output map section)
for (int kx = 0; kx < K; kx++) {
for (int ky = 0; ky < K; ky++) {
for (int kz = 0; kz < K; kz++) {
filter[(K*K)*kz + (K)*ky + kx] = weights[(K*K*K*F_out)*f_in + (K*K*K)*f_out + (K*K)*kz + (K)*ky + kx];
}
}
} //end filter setup loop
//go through all spatial locations of output map
for (int i = 0; i < M; i++) {
for (int j = 0; j < M; j++) {
for (int k = 0; k < M; k++) {
//set input signal for this spatial location
for (int a = 0; a < K; a++) {
for (int b = 0; b< K; b++) {
for (int c = 0; c < K; c++) {
//set input signal for this spatial location
sig[(K*K)*c + (K)*b + a] = input_maps[(N*N*N*F_in)*s + (N*N*N)*f_in + (N*N)*(k+c) + (N)*(j+b) + (i+a)];
}
}
} //end signal setup loop
//finally compute value of outtput map
dotProduct(filter, sig, &output_maps[(M*M*M*F_out)*s + (M*M*M)*f_out + (M*M)*k + (M)*j + i], K*K*K);
}
}
}
} //end f_in loop
} //end f_out loop
} //end samples loop
} |
19,871 | #include "includes.h"
__global__ void cudaKernel(int n, double* gpuWeights, int* gpuG, int* gpuTempGrid, int *flag)
{
// Moment's coordinates in the grid //
int momentCol = blockIdx.x*blockDim.x + threadIdx.x;
int momentRow = blockIdx.y*blockDim.y + threadIdx.y;
// Shared memory allocated for weights //
__shared__ double sharedWeights[25];
// Shared memory allocated for a block of moments //
// Size is (BLOCK_SIZE+4)^2 //
int sharedSize = (BLOCK_SIZE+4);
__shared__ int sharedG[(BLOCK_SIZE+4)*(BLOCK_SIZE+4)];
// Moment's coordinates in the shared memory //
int sharedRow = threadIdx.y+2;
int sharedCol= threadIdx.x+2;
// Indexes used to read from global memory //
int idxRow, idxCol;
// Variable storing neighbourhood's influence //
double weightFactor = 0.0;
// Store weights in shared memory //
if(threadIdx.x<5 && threadIdx.y<5)
sharedWeights[threadIdx.x*5+threadIdx.y] = gpuWeights[threadIdx.x*5+threadIdx.y];
// In this double loop, moments and their necessary neighbours are //
// passed from global to shared memory. After this data trannsfer //
// each thread calculates the atomic spin of its moment, based on //
// the spins of the moment's neighbours //
for(int i=momentRow; i<n+2; i+=blockDim.y*gridDim.y)
{
for(int j=momentCol; j<n+2; j+=blockDim.x*gridDim.x)
{
// Store moment in shared memory //
sharedG[sharedRow*sharedSize+sharedCol] = gpuG[( (i + n)%n )*n + ( (j + n)%n )];
// In this if statement, we also add to shared memory the -2 left neighbour, //
// the +BLOCK_SIZE right neighbour of every moment on the 0 and 1 column. //
// We also add the corners of the block that are necessary in order //
// to calculate the atomic spins of the block. All this work is done //
// by threads with positioned in 0 and 1 column. //
if(threadIdx.x < 2)
{
// Left Boundaries //
idxRow = (i + n)%n;
idxCol = (-2 + j + n)%n;
sharedG[(sharedRow)*sharedSize+sharedCol-2] = gpuG[n*idxRow+idxCol];
// Right Boundaries //
idxCol = (BLOCK_SIZE + j + n)%n;
sharedG[(sharedRow)*sharedSize+sharedCol+BLOCK_SIZE] = gpuG[n*idxRow+idxCol];
if(threadIdx.y <2)
{
// Top Left Corner //
idxRow = (-2 + i + n)%n;
idxCol = (-2 + j + n)%n;
sharedG[(sharedRow-2)*sharedSize+sharedCol-2] = gpuG[n*idxRow+idxCol];
// Bottom Left Corner //
idxRow = (i + n + BLOCK_SIZE)%n;
idxCol = (-2 + j + n)%n;
sharedG[(sharedRow+ BLOCK_SIZE)*sharedSize+sharedCol-2] = gpuG[n*idxRow+idxCol];
// Top Right Corner//
idxRow = (-2 + i + n)%n;
idxCol = (j + n + BLOCK_SIZE)%n;
sharedG[(sharedRow-2)*sharedSize+sharedCol + BLOCK_SIZE] = gpuG[n*idxRow+idxCol];
// Bottom Right Corner//
idxRow = (i + n+BLOCK_SIZE)%n;
idxCol = (j + n+BLOCK_SIZE)%n;
sharedG[(sharedRow+BLOCK_SIZE)*sharedSize+sharedCol+BLOCK_SIZE] = gpuG[n*idxRow+idxCol];
}
}
// In this if statement we also add the top and bottom neighbours of //
// the block. This is done by threads positioned in 0 and 1 row. //
if(threadIdx.y < 2)
{
// Top Boundaries //
idxRow = (-2 + i + n)%n;
idxCol = (j + n)%n;
sharedG[(sharedRow-2)*sharedSize+sharedCol] = gpuG[n*idxRow+idxCol];
// Bottom Boundaries //
idxRow = (i + n+BLOCK_SIZE)%n;
sharedG[(sharedRow+BLOCK_SIZE)*sharedSize+sharedCol] = gpuG[n*idxRow+idxCol];
}
// Synchronize all threads to ensure that writting to shared memory is done. //
__syncthreads();
// Compute the spins of moments with coordinates within n-size //
if(i<n && j<n)
{
weightFactor = 0.0;
for(int row=0; row<5; row++)
{
for(int col=0; col<5; col++)
{
if(col==2 && row==2)
continue;
//Calculate neighbourhood's total nfluence
weightFactor+= sharedG[(sharedRow-2+row)*sharedSize+sharedCol-2+col] * sharedWeights[row*5+col];
}
}
// Determine future atomic spin value based on total influence //
if(weightFactor < 0.0001 && weightFactor > -0.0001)
{
gpuTempGrid[n*i+j] = sharedG[sharedRow*sharedSize+sharedCol];
}else if(weightFactor > 0.00001)
{
gpuTempGrid[n*i+j] = 1;
if (gpuG[n*i+j] == -1)
{
*flag = 1;
}
}else
{
gpuTempGrid[n*i+j] = -1;
if (gpuG[n*i+j] == -1)
{
*flag = 1;
}
}
}
// Synchronize threads before writting again to shared memory //
// to ensure that no one is reading data from shared memory //
__syncthreads();
}
}
} |
19,872 |
template<int block_size, int elements_per_thread, int tiling_strategy>
__global__ void vector_add(int n, float* C, const float* A, const float* B) {
static_assert(
tiling_strategy >= 0 && tiling_strategy < 3,
"invalid tiling strategy");
for (int k = 0; k < elements_per_thread; k++) {
int i;
// contiguous. thread processes element i, i+1, i+2, ...
if (tiling_strategy == 0) {
i = (blockIdx.x * block_size + threadIdx.x) * elements_per_thread
+ k;
}
// block-strided. thread processes elements i, i + block_size, i + 2*block_size
else if (tiling_strategy == 1) {
i = blockIdx.x * elements_per_thread * block_size + threadIdx.x
+ k * block_size;
}
// grid-strided. thread processes elements i, i + grid_size, i + 2 * grid_size
else if (tiling_strategy == 2) {
i = blockIdx.x * block_size + threadIdx.x
+ k * (gridDim.x * block_size);
}
if (i < n) {
C[i] = A[i] + B[i];
}
}
} |
19,873 | extern "C"
__global__ void capByScalar(int n, float *a, float b, float *result)
{
float cap = b;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] < cap ? a[i] : cap;
}
}
extern "C"
__global__ void floorByScalar(int n, float *a, float b, float *result)
{
float floor = b;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] > floor ? a[i] : floor;
}
}
extern "C"
__global__ void addScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b;
}
}
extern "C"
__global__ void subScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] - b;
}
}
extern "C"
__global__ void busScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = -a[i] + b;
}
}
extern "C"
__global__ void multScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * b;
}
}
extern "C"
__global__ void divScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] / b;
}
}
extern "C"
__global__ void vidScalar(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = b / a[i];
}
}
extern "C"
__global__ void squared(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * a[i];
}
}
extern "C"
__global__ void cuPow(int n, float *a, float b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = pow(a[i],b);
}
}
extern "C"
__global__ void cuSqrt(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = sqrt(a[i]);
}
}
extern "C"
__global__ void cuExp(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = exp(a[i]);
}
}
extern "C"
__global__ void cuLog(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = log(a[i]);
}
}
extern "C"
__global__ void invert(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = 1.0f / a[i];
}
}
extern "C"
__global__ void cuAbs(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = abs(a[i]);
}
}
extern "C"
__global__ void cap(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] < b[i] ? a[i] : b[i];
}
}
extern "C"
__global__ void cuFloor(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] > b[i] ? a[i] : b[i];
}
}
extern "C"
__global__ void add(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i];
}
}
extern "C"
__global__ void sub(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] - b[i];
}
}
extern "C"
__global__ void mult(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * b[i];
}
}
extern "C"
__global__ void cuDiv(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] / b[i];
}
}
extern "C"
__global__ void accrue(int n, float *a, float *b, float p, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * (1.0f + b[i] * p);
}
}
extern "C"
__global__ void discount(int n, float *a, float *b, float p, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
// We force to avoid fma
float prod = b[i] * p;
float fma = (1.0f + prod);
result[i] = a[i] / fma;
}
}
extern "C"
__global__ void addProduct(int n, float *a, float *b, float *c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c[i];
}
}
extern "C"
__global__ void addProduct_vs(int n, float *a, float *b, float c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c;
}
}
extern "C"
__global__ void addRatio(int n, float *a, float *b, float *c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] / c[i];
}
}
extern "C"
__global__ void subRatio(int n, float *a, float *b, float *c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] - b[i] / c[i];
}
}
extern "C"
__global__ void reduceFloatVectorToDoubleScalar(int size, void *data, double *result) {
float *fdata = (float*) data;
extern __shared__ double sdata[];
double* s2data = sdata + blockDim.x;
double* cdata = s2data + blockDim.x;
// perform first level of reduction,
// reading from global memory, writing to shared memory unsigned int tid = threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = (double)(i < size ? fdata[i] : 0) + (double)(i+blockDim.x < size ? fdata[i+blockDim.x] : 0);
cdata[tid] = sdata[tid] - (double)(i < size ? fdata[i] : 0) - (double)(i+blockDim.x < size ? fdata[i+blockDim.x] : 0);
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
s2data[tid] = sdata[tid] + sdata[tid + s] - cdata[tid] - cdata[tid+s];
cdata[tid] = (s2data[tid] - sdata[tid]) - sdata[tid + s];
sdata[tid] = s2data[tid];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) result[blockIdx.x] = sdata[0];
}
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void reducePartial(int size, void *data, void *result) {
float *fdata = (float*) data;
float *sum = (float*) result;
extern __shared__ double sdata[];
double* s2data = sdata + blockDim.x;
double* cdata = s2data + blockDim.x;
// perform first level of reduction,
// reading from global memory, writing to shared memory unsigned int tid = threadIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = (double)(i < size ? fdata[i] : 0) + (double)(i+blockDim.x < size ? fdata[i+blockDim.x] : 0);
cdata[tid] = sdata[tid] - (double)(i < size ? fdata[i] : 0) - (double)(i+blockDim.x < size ? fdata[i+blockDim.x] : 0);
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
s2data[tid] = sdata[tid] + sdata[tid + s] - cdata[tid] - cdata[tid+s];
cdata[tid] = (s2data[tid] - sdata[tid]) - sdata[tid + s];
sdata[tid] = s2data[tid];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) sum[blockIdx.x] = sdata[0];
}
|
19,874 | #include <cuda.h>
#include <bits/stdc++.h>
#define BLOCK_SIZE 32
#define EPSILON 0.1
#define MAX_MASK_SIZE 15
using namespace std;
__constant__ float g_mask[MAX_MASK_SIZE];
bool cmp_float (float a, float b){
if (fabs (a - b) > EPSILON)
return false;
else
return true;
}
void fill_vector_random (float *vec, int size, float max_size = 10.0){
for (int i = 0; i < size; i++){
vec[i] = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX/max_size));
}
}
bool cmp_vector(float *a, float *b, int size){
for (int i = 0; i < size ; i++){
if (!cmp_float (a[i], b[i]))
return false;
}
return true;
}
void print_vector (float *a, int size){
for (int i = 0; i < size; i++)
cout << a[i] << endl;
cout << "--------------" << endl;
}
void convolution_kernel_seq (float *vec, float *mask, float *ans, int vec_size, int mask_size){
int start = 0;
float con_val = 0.0;
for (int i = 0; i < vec_size; i++){
for (int j = 0; j < mask_size; j++){
start = i - (mask_size / 2);
if (start + j >= 0 && start + j < vec_size)
con_val += vec[start + j] * mask[j];
}
ans[i] = con_val;
con_val = 0.0;
}
}
__global__ void convolution_kernel_kernel (float *vec, float *mask, float *ans, int vec_size, int mask_size){
int pos = blockIdx.x * blockDim.x + threadIdx.x;
float con_val = 0.0;
int start = pos - (mask_size / 2);
for (int i = 0; i < mask_size; i ++){
if (start + i >= 0 && start + i < vec_size)
con_val += vec[start + i] * mask[i];
}
ans[pos] = con_val;
}
void convolution_kernel_con (float *vec, float *mask, float *ans, int vec_size, int mask_size){
float *d_vec;
float *d_mask;
float *d_ans;
cudaMalloc(&d_vec, vec_size * sizeof(float));
cudaMalloc(&d_mask, mask_size * sizeof(float));
cudaMalloc(&d_ans, vec_size * sizeof(float));
cudaMemcpy (d_vec, vec, vec_size * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy (d_mask, mask, mask_size * sizeof (float), cudaMemcpyHostToDevice);
dim3 dimGrid (ceil (vec_size / float (BLOCK_SIZE)), 1, 1);
dim3 dimBlock (BLOCK_SIZE, 1, 1);
convolution_kernel_kernel<<<dimGrid, dimBlock>>> (d_vec, d_mask, d_ans, vec_size, mask_size);
cudaDeviceSynchronize();
cudaMemcpy (ans, d_ans, vec_size * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_vec);
cudaFree (d_mask);
cudaFree (d_ans);
}
__global__ void convolution_kernel_kernel_constant (float *vec, float *ans, int vec_size, int mask_size){
int pos = blockIdx.x * blockDim.x + threadIdx.x;
float con_val = 0.0;
int start = pos - (mask_size / 2);
for (int i = 0; i < mask_size; i ++){
if (start + i >= 0 && start + i < vec_size)
con_val += vec[start + i] * g_mask[i];
}
ans[pos] = con_val;
}
void convolution_kernel_constant(float *vec, float *mask, float *ans, int vec_size, int mask_size){
float *d_vec;
float *d_ans;
cudaMalloc(&d_vec, vec_size * sizeof(float));
cudaMalloc(&d_ans, vec_size * sizeof(float));
cudaMemcpy (d_vec, vec, vec_size * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (g_mask, mask, mask_size * sizeof (float));
dim3 dimGrid (ceil (vec_size / float (BLOCK_SIZE)), 1, 1);
dim3 dimBlock (BLOCK_SIZE, 1, 1);
convolution_kernel_kernel_constant<<<dimGrid, dimBlock>>> (d_vec, d_ans, vec_size, mask_size);
cudaMemcpy (ans, d_ans, vec_size * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_vec);
cudaFree (d_ans);
}
__global__ void convolution_kernel_kernel_tiled (float *vec, float *ans, int vec_size, const int mask_size){
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int last_pos = (blockIdx.x - 1) * blockDim.x + threadIdx.x;
int next_pos = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
int offset = mask_size / 2;
float sum;
__shared__ float s_vec[BLOCK_SIZE + MAX_MASK_SIZE -1];
if (threadIdx.x >= blockDim.x - offset){
if (last_pos < 0)
s_vec[threadIdx.x - (blockDim.x - offset)] = 0;
else
s_vec[threadIdx.x - (blockDim.x - offset)] = vec[last_pos];
}
s_vec[threadIdx.x + offset] = vec[pos];
if (threadIdx.x < offset){
if (next_pos >= vec_size)
s_vec[threadIdx.x + blockDim.x + offset] = 0;
else
s_vec[threadIdx.x + blockDim.x + offset] = vec[next_pos];
}
__syncthreads();
for (int i = 0; i < mask_size; i++)
sum += g_mask[i] * s_vec[threadIdx.x + i];
ans[pos] = sum;
}
void convolution_kernel_tiled(float *vec, float *mask, float *ans, int vec_size, int mask_size){
float *d_vec;
float *d_ans;
cudaMalloc(&d_vec, vec_size * sizeof(float));
cudaMalloc(&d_ans, vec_size * sizeof(float));
cudaMemcpy (d_vec, vec, vec_size * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (g_mask, mask, mask_size * sizeof (float));
dim3 dimGrid (ceil (vec_size / float (BLOCK_SIZE)), 1, 1);
dim3 dimBlock (BLOCK_SIZE, 1, 1);
convolution_kernel_kernel_tiled<<<dimGrid, dimBlock>>> (d_vec, d_ans, vec_size, mask_size);
cudaMemcpy (ans, d_ans, vec_size * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_vec);
cudaFree (d_ans);
}
int main(int argc, char **argv){
if (argc < 2){
cout << "Usage: ./convolution max_size" << endl;
return 0;
}
const int max_size = atoi(argv[1]);
srand (time (NULL));
ofstream x ("x.mio"),
y_seq ("y_seq.mio"),
y_con_g("y_con_g.mio"),
y_con_c("y_con_c.mio"),
y_con_t("y_con_t.mio");
clock_t begin, end;
double secs;
float t_vec[32], t_mask[3], t_ans[32];
fill_vector_random (t_vec, 32);
fill_vector_random (t_mask, 3);
convolution_kernel_con(t_vec, t_mask, t_ans, 32, 3);
for (int i = 32; i <= max_size; i += 32){
float vec[i], mask[9], c[i], d[i];
fill_vector_random(vec, i);
fill_vector_random(mask, 9);
x << i << endl;
begin = clock();
convolution_kernel_seq(vec, mask, c, i, 9);
end = clock();
secs = double(end - begin) / CLOCKS_PER_SEC;
y_seq << secs <<endl;
begin = clock();
convolution_kernel_con(vec, mask, d, i, 9);
end = clock();
secs = double(end - begin) / CLOCKS_PER_SEC;
y_con_g << secs <<endl;
if (!cmp_vector(c, d, i))
cout << "SWW" << endl;
begin = clock();
convolution_kernel_constant(vec, mask, d, i, 9);
end = clock();
secs = double(end - begin) / CLOCKS_PER_SEC;
y_con_c << secs <<endl;
if (!cmp_vector(c, d, i))
cout << "SWW" << endl;
begin = clock();
convolution_kernel_tiled(vec, mask, d, i, 9);
end = clock();
secs = double(end - begin) / CLOCKS_PER_SEC;
y_con_t << secs <<endl;
if (!cmp_vector(c, d, i))
cout << "SWW" << endl;
}
return 0;
}
|
19,875 | /*******************
* npCase1: Matrix Multiplication
* Author : Fanny Nina-Paravecino
* Date : October 2016
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#define COL 1024
#define ROW 1024
double wallS0, wallS1, wallP0, wallP1;
float sum;
double getWallTime(){
struct timeval time;
if(gettimeofday(&time,NULL)){
printf("Error getting time\n");
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
__global__ void matrixMulKernel( float *devA, float *devB, float *devC, int row, int col, const int k){
int txID = blockIdx.x * blockDim.x + threadIdx.x;//Col of devC
int tyID = blockIdx.y * blockDim.y + threadIdx.y;//Row of devC.
if ((txID < col) && (tyID < row)){
float Pvalue = 0;
for(int w=0; w<k; w++){
Pvalue += devA[tyID*k+w] * devB[w*k+txID];
}
devC[tyID*k+txID] = Pvalue;
}
}
void matrixMultiplication(float *a, float *b, float *c, int row, int col, int k, int blockX, int blockY)
{
//Setting device memory space.
int sizeA = row*k*sizeof(float);
int sizeB = k*col*sizeof(float);
int sizeC = row*col*sizeof(float);
float *devA, *devB, *devC;
//Time variables
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&devA, sizeA);
cudaMalloc((void**)&devB, sizeB);
cudaMalloc((void**)&devC, sizeC);
//Copying [A] and [B] from host memory to device memory.
cudaMemcpy(devA, a, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, sizeB, cudaMemcpyHostToDevice);
//Setting execution configuration.
dim3 dimBlock(blockX, blockY, 1);
dim3 dimGrid((COL+dimBlock.x-1)/dimBlock.x, (ROW+dimBlock.y-1)/dimBlock.y, 1);
printf("\tBlock(%d, %d, %d)\n", dimBlock.x, dimBlock.y, dimBlock.z);
printf("\tGrid(%d, %d, %d)\n", dimGrid.x, dimGrid.y, dimGrid.z);
//Launching device computation threads.
matrixMulKernel<<<dimGrid, dimBlock>>>(devA, devB, devC, row, col, k);
//Transferring [C] from device to host.
cudaMemcpy(c, devC, sizeC, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Display time
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job time: %.2f ms", time);
//Freeing device matrices.
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
}
bool checkResults(float *test, float *c, int row, int col){
bool b= true;
for(int i=0; i<row; i++){
for(int j=0; j<col; j++){
if(test[i*col+j] != c[i*col+j]){
b=false;
printf("test[%d, %d] = %.2f \t c[%d, %d] = %.2f\n", i, j, test[i*col+j], i, j, c[i*col+j]);
break;
}
}
}
return b;
}
int main(int argC, char** argV)
{
float *a, *b, *c, *test;
//Setting matrix parameters.
int row = ROW;
int col = COL;
int k = COL;
//Setting host memory space.
a = (float *) malloc(row*k*sizeof(float));
b = (float *) malloc(k*col*sizeof(float));
c = (float *) malloc(row*col*sizeof(float));
test = (float *) malloc(row*col*sizeof(float));
//Initializing [A] and [B] with random values from 1 to 10.
for(int i=0; i<row; i++){
for(int j=0; j<k; j++){
a[i*k+j] = rand()%10;
}
}
for(int i=0; i<k; i++){
for(int j=0; j<col; j++){
b[i*col+j] = rand()%10;
}
}
printf("Matrix Multiplication: \nA[%d, %d] * B[%d, %d] = C[%d, %d]\n", row, k, k, col, row, col);
//Performing sequential job.
wallS0 = getWallTime();
for(int i=0; i<row; i++){
for(int j=0; j<col; j++){
sum = 0;
for(int w=0; w<k; w++){
sum += a[i*k+w] * b[w*col+j];
}
test[i*col+j]=sum;
}
}
wallS1 = getWallTime();
printf("\tSequential Job Time: %f ms\n", (wallS1-wallS0)*1000);
//Calling stub function to allocate device memory, perform data transfer, and launch kernel.
int blockX = 32;
if (argV[1] != NULL)
blockX = atoi(argV[1]);
int blockY = 32;
if (argV[2] != NULL)
blockY = atoi(argV[2]);
if (!blockX)
blockX = 32;
if (!blockY)
blockY = 32;
matrixMultiplication(a, b, c, row, col, k, blockX, blockY);
//Verify results
if(checkResults(test, c, row, col)){
printf("\nResults are correct.\n");
}
} |
19,876 | #include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#define MAXWORDS 20000
//#define MAXWORDS 10
void print_to_file(int *h_hist, int N)
{
const char *fname = "assignment3_out";
FILE *f = fopen(fname, "w");
int loop, loop1,a,b;
for(loop = 0; loop < pow(20,N); loop++){
if (h_hist[loop]>0){
a=loop/(int)pow(20,N-1);
b=loop;
fprintf(f,"%d ",a+1);
for (loop1=1;loop1 < N; loop1++){
a=b-a*(int)pow(20,N-loop1);
b=a;
a/=(int)pow(20,N-loop1-1);
fprintf(f,"%d ", a+1);
}
fprintf(f,"%d \n",h_hist[loop]);
}
}
fclose(f); }
int checkWord(char* word,char* words,int* count_array,int offset){
// Check if word meets, else pre-process
// Args:
// word >> word of consideration from fscanf
// words >> Array where, every 20 chars is a word
// offset >> Which entry to start writting at (modulo 20)
// Returns:
// new offset
// Modifies:
// words
int loop=0;
int count=0;
for (loop=0;loop<strlen(word)-1;loop++)
{
if (word[loop]=='-')
{
words[offset*20+loop]=0;
//printf("Word %s \n",&words[offset*20]);
offset+=1;
count_array[offset]=count;
count=0;
}
else{
/* Copy character */
words[offset*20+loop]=word[loop];
count+=1;
}
}
if (ispunct((unsigned char)word[strlen(word)-1]))
{
/* Skip this character */
words[offset*20+strlen(word)-1]=0;
count_array[offset]=count;
offset+=1;
}
else{
words[offset*20+strlen(word)-1]=word[strlen(word)-1];
count+=1;
words[offset*20+strlen(word)]=0;
count_array[offset]=count;
offset+=1;
}
return offset;
}
__global__ void nCountGram_optimal(int* d_count, int* d_hist, int N, int totalWordCount, int sub_hist_size){
extern __shared__ int buffer[];
int *temp = &buffer[0];
//__shared__ int temp[1024];
// Helper var
int index, j, p;
int a, b;
a=1;
for (p=0;p<N;p++){
a*=20;
}
for (p=0; p<sub_hist_size/1024 +1; p++){
if (threadIdx.x + p*1024 < sub_hist_size){
temp[threadIdx.x + p*1024] = 0;
}
}
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x ;//blockIdx.y*gridDim.y;
int offset = blockDim.x * gridDim.x*blockIdx.y*gridDim.y;
while (i < totalWordCount - N + 1)
{
// Since 0,0 is invalid
index=-1;
b=a/20;
for (j = 0;j < N; j++){
index+=(d_count[i+j])*b;
b/=20;
}
if ((index<sub_hist_size*(blockIdx.y+1)) && (index > sub_hist_size*blockIdx.y)){
//printf("Index %d",index);
atomicAdd( &temp[index - blockIdx.y*sub_hist_size], 1);
}
i += offset;
}
__syncthreads();
for (p=0;p<sub_hist_size/1024+1;p++){
if (threadIdx.x + p*1024 < sub_hist_size){
atomicAdd( &(d_hist[threadIdx.x + sub_hist_size*blockIdx.y + p*1024]), temp[threadIdx.x + p*1024] );
if (d_hist[threadIdx.x+ sub_hist_size*blockIdx.y + p*1024]>0){
//printf("Hist val at %d is %d \n",threadIdx.x+sub_hist_size*blockIdx.y+p*1024,d_hist[threadIdx.x +sub_hist_size*blockIdx.y+ p*1024]);
}
}
}
__syncthreads();
}
__global__ void nCountGram(int* d_count, int* d_hist, int N, int totalWordCount){
extern __shared__ int buffer[];
int *temp = &buffer[0];
//__shared__ int temp[1024];
// Helper var
int index, j, p;
int a, b;
a=1;
for (p=0;p<N;p++){
a*=20;
}
//printf("t %d",threadIdx.x + 1*1024);
for (p=0; p<a/1024+1; p++){
if (threadIdx.x + p*1024< a){
temp[threadIdx.x + p*1024] = 0;
}
}
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
//printf("Offset %d",offset);
while (i < totalWordCount - N + 1)
{
// Since 0,0 is invalid
index=-1;
b=a/20;
for (j = 0;j < N; j++){
index+=(d_count[i+j])*b;
b/=20;
}
atomicAdd( &temp[index], 1);
i += offset;
//printf("Index %d",index);
}
__syncthreads();
for (p=0;p<a/1024+1;p++){
if (threadIdx.x + p*1024< a){
atomicAdd( &(d_hist[threadIdx.x + p*1024]), temp[threadIdx.x + p*1024] );
if (temp[threadIdx.x+p*1024]>0){
//printf("Hist val at %d is %d \n",threadIdx.x+p*1024,d_hist[threadIdx.x + p*1024]);
}
}
}
__syncthreads();
}
int main(int argc,char **argv) {
// Helper vars
int loop, loop1, a, b;
int sub_hists, sub_hist_size;
float time_spent;
int N = atoi(argv[1]);
char *filename = argv[2];
int count_array[MAXWORDS];
char words[MAXWORDS * 20];
// For calculating N-count-grams
// Filename: shaks.txt
// Stores all words in 1D array
// Single word length is bounded by 20
// Take input string into this
char curWord[20];
int totalWordCount = 0;
FILE *ipf;
ipf = fopen(filename, "r");
while (fscanf(ipf, "%s ", curWord) != EOF && totalWordCount < MAXWORDS) {
// Count of number of words read
//printf("Curr word %s \n",curWord);
totalWordCount=checkWord(curWord,words,count_array,totalWordCount);
//printf("Current word %s \n",curWord);
//printf("Word count %d \n",totalWordCount);
}
fclose(ipf);
//for (loop=0;loop<totalWordCount;loop++){
// printf("Word %d %s ",loop,&words[20*loop]);
// printf("Char count %d \n",count_array[loop]);
//}
// Check for word properties
// and update ‘words[]’ array.
// Modify this section according
// to below mentioned properties
//Create CPU arrays (hist)
int* h_hist = (int*)malloc((int)pow(20,N)*sizeof(int));
// Create GPU arrays, Copy count array from host memory to device memory
int* d_count; cudaMalloc(&d_count, MAXWORDS*sizeof(int));
cudaMemcpy(d_count, count_array, MAXWORDS*sizeof(int),cudaMemcpyHostToDevice);
int* d_hist; cudaMalloc(&d_hist, (int)pow(20,N)*sizeof(int));
// GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// // Invoke kernel
if (N<5){
int threadsPerBlock = 1024;
int blocksPerGrid = ((pow(20,N) + threadsPerBlock - 1) /threadsPerBlock);
cudaEventRecord(start, 0);
nCountGram<<<blocksPerGrid, threadsPerBlock, (unsigned int)pow(20,N)*sizeof(int)>>>(d_count,d_hist, N,totalWordCount);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
printf("\nTime spent in hist binning %f\n",time_spent);
}
else{
sub_hist_size=160000;
sub_hists=(int)pow(20,N)/sub_hist_size;
int threadsPerBlock = 1024;
dim3 blocksPerGrid = (((pow(20,N)/sub_hists + threadsPerBlock - 1) /threadsPerBlock),sub_hists);
cudaEventRecord(start, 0);
nCountGram_optimal<<<blocksPerGrid, threadsPerBlock, sub_hist_size*sizeof(int)>>>(d_count,d_hist, N,totalWordCount,sub_hist_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
printf("\nTime spent in hist binning %f\n",time_spent);
}
// h_hist contains the result in host memory
cudaMemcpy(h_hist, d_hist, (int)pow(20,N)*sizeof(int),cudaMemcpyDeviceToHost);
print_to_file(h_hist,N);
// printf("\n\n Histogram for N of value %d, total number of words %d \n",N,totalWordCount);
// for(loop = 0; loop < pow(20,N); loop++){
// if (h_hist[loop]>0){
// a=loop/(int)pow(20,N-1);
// b=loop;
// printf("Value %d ",a+1);
// for (loop1=1;loop1 < N; loop1++){
// a=b-a*(int)pow(20,N-loop1);
// b=a;
// a/=(int)pow(20,N-loop1-1);
// printf("%d ", a+1);
// }
// printf(" Count: %d \n",h_hist[loop]);
// }
// }
// Free device memory
cudaFree(d_count);
cudaFree(d_hist);
// Free host memory
free(h_hist);
return 0;
}
|
19,877 | #include <stdio.h>
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cuda_runtime.h>
extern "C" void listdev( int rank ){
cudaError_t err;
int dev_cnt = 0;
err = cudaGetDeviceCount( &dev_cnt );
assert( err == cudaSuccess || err == cudaErrorNoDevice );
//printf( "rank %d, cnt %d\n", rank, dev_cnt );
cudaDeviceProp prop;
for (int dev = 0; dev < dev_cnt; ++dev) {
err = cudaGetDeviceProperties( &prop, dev );
assert( err == cudaSuccess );
printf( "rank %d, dev %d, prop %s, pci %d, %d, %d\n",
rank, dev,
prop.name,
prop.pciBusID,
prop.pciDeviceID,
prop.pciDomainID );
}
}
|
19,878 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
// RG*RG*MAXN must fit within mytype
#define MAXN 100000
#define RG 10
#define USECPSEC 1000000ULL
#define nTPB 256
#define DSIZE 8192
//cuda error checking macros
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
typedef float mytype;
// host function to compute convolution reference results
void conv(const mytype *A, const mytype *B, mytype* C, int N) {
for (int j = 0; j < ((2*N)-1); ++j){ // iterate over columns of result
mytype my_sum = 0;
for (int i = 0; i < N; ++i) // iterate down each column
if (((j < N) && (i <= j)) || ((j >= N) && (i > (j-N)))) my_sum += A[i]*B[j-i];
C[j] = my_sum;}
}
// host function - alternate realization
void conv2(const mytype *A, const mytype *B, mytype* C, int N) {
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j)
C[i + j] += A[i] * B[j];
}
// timing measurement function
unsigned long long dtime_usec(unsigned long long prev){
timeval tv1;
gettimeofday(&tv1,0);
return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev;
}
// convolution GPU kernel - not using constant memory
// Task 1
__global__ void conv_Kernel(const mytype * __restrict__ A, const mytype * __restrict__ B, mytype *C, const int N){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < (2*N)-1){
mytype my_sum = 0;
for (int i = 0; i < N; i++)
if (((idx < N) && (i <= idx)) || ((idx >= N) && (i > (idx-N)))) my_sum += A[i]*B[idx-i];
C[idx] = my_sum;
}
}
#if (DSIZE < 8193)
__constant__ mytype cA[DSIZE];
// convolution GPU kernel - using constant memory
// Task 1 Optimized
__global__ void conv_const_Kernel(const mytype * __restrict__ B, mytype *C, const int N){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < (2*N)-1){
mytype my_sum = 0;
for (int i = 0; i < N; i++)
if (((idx < N) && (i <= idx)) || ((idx >= N) && (i > (idx-N)))) my_sum += cA[i]*B[idx-i];
C[idx] = my_sum;
}
}
#endif
int main(int argc, char *argv[]){
mytype *d_A, *A, *d_B, *B, *d_C, *C, *h_C;
int my_N = DSIZE;
if ((my_N < 1) || (my_N > MAXN)) {printf("N out of range\n"); return 1;}
// allocate host data
A = (mytype *)malloc(my_N*sizeof(mytype));
B = (mytype *)malloc(my_N*sizeof(mytype));
C = (mytype *)malloc(((2*my_N)-1)*sizeof(mytype));
h_C = (mytype *)malloc(((2*my_N)-1)*sizeof(mytype));
// allocate device data
CUDA_CALL(cudaMalloc(&d_A, my_N*sizeof(mytype)));
CUDA_CALL(cudaMalloc(&d_B, my_N*sizeof(mytype)));
CUDA_CALL(cudaMalloc(&d_C, ((2*my_N)-1)*sizeof(mytype)));
//initialize host input data
for (int i=0; i < my_N; i++){
A[i] = rand()%RG;
B[i] = rand()%RG;}
//zero out host result data
for (int i=0; i < (2*my_N)-1; i++){
C[i] = 0;
h_C[i] = 0;}
//begin timing for host reference function
unsigned long cpu_time = dtime_usec(0);
conv(A, B, C, my_N);
cpu_time = dtime_usec(cpu_time);
//initialize device result data
CUDA_CALL(cudaMemset(d_C, 0, ((2*my_N)-1)*sizeof(mytype)));
//begin timing for host reference function
unsigned long gpu_time = dtime_usec(0);
//copy host input data to device
#if (DSIZE < 8193)
CUDA_CALL(cudaMemcpyToSymbol(cA, A, my_N*sizeof(mytype)));
#else
CUDA_CALL(cudaMemcpy(d_A, A, my_N*sizeof(mytype), cudaMemcpyHostToDevice));
#endif
CUDA_CALL(cudaMemcpy(d_B, B, my_N*sizeof(mytype), cudaMemcpyHostToDevice));
//run convolution kernel on GPU
#if (DSIZE < 8193)
conv_const_Kernel<<<(((2*my_N)-1)+nTPB-1)/nTPB,nTPB>>>(d_B, d_C, my_N);
#else
conv_Kernel<<<(((2*my_N)-1)+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_C, my_N);
#endif
CUDA_CHECK();
//copy results from device to host
CUDA_CALL(cudaMemcpy(h_C, d_C, ((2*my_N)-1)*sizeof(mytype), cudaMemcpyDeviceToHost));
gpu_time = dtime_usec(gpu_time);
//check validity of results
for (int i = 0; i < ((2*my_N)-1); i++) if (C[i] != h_C[i]) {printf("FAIL at %d, cpu: %f, gpu %f\n", i, C[i], h_C[i]); return 1;}
//print timing and speed comparison
printf("PASS. cpu time: %lu us, gpu time: %lu us\n", cpu_time, gpu_time);
printf("Speedup: cpu/gpu = %f\n", cpu_time/(float)gpu_time);
//all host and device allocated data will be implicitly freed at program termination
return 0;
}
|
19,879 | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<limits.h>
#include<algorithm>
using namespace std;
#define INF INT_MAX-1
int tilesize[2] = {2, INT_MAX};
int rowSize;
void print_matrix(float *d)
{
int i, j;
for (i = 0; i < 32; i++)
{
for (j = 0; j < 32; j++)
printf("%0.1f\t", d[i * rowSize + j]);
puts("");
}
}
__global__ void Dloop_FW(float *d_a, int k, int rowSize)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= rowSize)
return;
__shared__ int intermed;
if (threadIdx.x == 0) {
intermed = d_a[rowSize * blockIdx.y + k];
__syncthreads();
}
d_a[blockIdx.y * rowSize + col] = fmin(d_a[blockIdx.y * rowSize + col], intermed + d_a[k * rowSize + col]);
}
void FW_D_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int size)
{
int threadsPerBlock = 256;
dim3 blocksPerGrid( (rowSize + threadsPerBlock - 1) / threadsPerBlock , rowSize);
for (int k = vRowStart; k < (vRowStart + size); k++)
{
Dloop_FW <<< blocksPerGrid, threadsPerBlock>>>(d_a, k, rowSize);
cudaThreadSynchronize();
}
}
void DFW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d)
{
int r = tilesize[d];
if (r >= currSize)
FW_D_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
DFW(d_a, (i - 1)*newsize, (j - 1)*newsize, (i - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (j - 1)*newsize, newsize, d + 1);
}
}
}
}
}
__global__ void Cloop_FW(float *d_a, int j, int k, int rowSize)
{
__shared__ int intermed;
if (threadIdx.x == k) {
intermed = d_a[k * rowSize + j];
d_a[threadIdx.x * rowSize + j ] = fmin( d_a[threadIdx.x * rowSize + j ], d_a[threadIdx.x * rowSize + k] + intermed);
__syncthreads();
}
d_a[threadIdx.x * rowSize + j ] = fmin( d_a[threadIdx.x * rowSize + j ], d_a[threadIdx.x * rowSize + k] + intermed);
}
void FW_C_loop(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int size)
{
for (int k = vRowStart; k < (vRowStart + size); k++)
{
for (int j = xColStart; j < (xColStart + size); j++)
{
Cloop_FW <<< 1, rowSize>>>(d_a, j, k, rowSize);
cudaThreadSynchronize();
}
}
}
void CFW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d)
{
int r = tilesize[d];
if (r >= currSize)
FW_C_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int i = 1; i <= r; i++) {
CFW(d_a, (i - 1)*newsize, (k - 1)*newsize, (i - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, newsize, d + 1);
}
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (j != k)
DFW(d_a, (i - 1)*newsize, (j - 1)*newsize, (i - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (j - 1)*newsize, newsize, d + 1);
}
}
}
}
}
__global__ void Bloop_FW(float *d_a, int i, int k, int colSize)
{
__shared__ int intermed;
if (threadIdx.x == k) {
intermed = d_a[i * colSize + k];
d_a[i * colSize + threadIdx.x ] = fmin(intermed + d_a[k * colSize + threadIdx.x] , d_a[i * colSize + threadIdx.x ]);
__syncthreads();
}
d_a[i * colSize + threadIdx.x ] = fmin(intermed + d_a[k * colSize + threadIdx.x], d_a[i * colSize + threadIdx.x ]);
}
void FW_B_loop(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int size)
{
for (int k = vRowStart; k < (vRowStart + size); k++)
{
for (int i = xRowStart; i < (xRowStart + size); i++)
{
Bloop_FW <<< 1, rowSize>>>(d_a, i, k, rowSize);
cudaThreadSynchronize();
}
}
}
void BFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d)
{
int r = tilesize[d];
if (r >= currSize)
FW_B_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int j = 1; j <= r; j++) {
BFW(d_a, (k - 1)*newsize, (j - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (j - 1)*newsize, newsize, d + 1);
}
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (i != k)
DFW(d_a, (i - 1)*newsize, (j - 1)*newsize, (i - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (j - 1)*newsize, newsize, d + 1);
}
}
}
}
}
__global__ void Aloop_FW(float *d_a, int i, int j, int k, int rowSize)
{
d_a[i * rowSize + j] = fmin( d_a[i * rowSize + k] + d_a[k * rowSize + j] , d_a[i * rowSize + j]);
}
void FW_A_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int size)
{
for (int k = vRowStart; k < (vRowStart + size); k++)
{
for (int i = xRowStart; i < (xRowStart + size); i++)
{
for (int j = xColStart; j < (xColStart + size); j++)
{
Aloop_FW <<< 1, 1>>>(d_a, i, j, k, rowSize);
cudaThreadSynchronize();
}
}
}
}
void AFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d)
{
int r = tilesize[d];
if (r >= currSize)
FW_A_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
AFW(d_a, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, newsize, d + 1);
for (int j = 1; j <= r; j++) {
if (j != k)
BFW(d_a, (k - 1)*newsize, (j - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (j - 1)*newsize, newsize, d + 1);
}
for (int i = 1; i <= r; i++) {
if (i != k)
CFW(d_a, (i - 1)*newsize, (k - 1)*newsize, (i - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, newsize, d + 1);
}
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (i != k && j != k)
DFW(d_a, (i - 1)*newsize, (j - 1)*newsize, (i - 1)*newsize, (k - 1)*newsize, (k - 1)*newsize, (j - 1)*newsize, newsize, d + 1);
}
}
}
}
}
int main(int argc, char *argv[])
{
float *d_a;
float *a;
size_t pitch;
rowSize = 32;
int colSize = rowSize;
int i, j;
cudaError_t err = cudaSuccess;
size_t totalSize = rowSize * colSize * sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = cudaMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if (!d_a)
{
printf("memory failed for cudaMalloc");
return 1;
}
if (err != 0) {
printf("%s-%d", cudaGetErrorString(err), 3);
//getchar();
}
for (i = 0; i < rowSize; i++)
for (j = 0; j < colSize; j++)
{
if (i == j) {
a[i * rowSize + j] = 0;
}
else {
a[i * rowSize + j] = (i + j) % 5 ? (i + j) : (i + j) % 7;
}
}
err = cudaMemcpy(d_a, a, totalSize, cudaMemcpyHostToDevice);
AFW(d_a, 0, 0, 0, 0, 0, 0, rowSize, 0);
err = cudaMemcpy(a, d_a, totalSize, cudaMemcpyDeviceToHost);
print_matrix(a);
return 0;
}
|
19,880 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float* var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) {
float tmp_1 = (var_2 / cosf(var_3 + +1.2924E-37f - +0.0f - (var_4 * var_5 + var_6)));
comp = tmp_1 * -1.9477E34f / var_7 / (+1.6999E-2f - sinf((var_8 - +1.5392E-24f)));
comp = (+1.7508E36f * (+1.9787E13f + var_9));
for (int i=0; i < var_1; ++i) {
float tmp_2 = cosf((var_11 - -1.7486E23f));
var_10[i] = +0.0f;
comp += var_10[i] / tmp_2 - -1.8188E-35f - (-1.6092E-36f + (var_12 - sqrtf((+1.5480E-44f + (-1.8010E26f / (var_13 / (+0.0f + var_14)))))));
}
if (comp > var_15 - (var_16 - (-0.0f / var_17 + var_18))) {
float tmp_3 = -1.2566E-16f;
comp = tmp_3 * +0.0f + var_19 + (-0.0f + +1.6479E-36f + var_20);
comp += ldexpf(acosf(-1.1331E-12f * (-1.3463E-37f * +1.6486E-37f - +1.2464E-41f)), 2);
}
if (comp >= -1.9416E-5f / (-1.7141E-9f - (var_21 / ceilf(-0.0f)))) {
comp += (+1.0532E-37f * (var_22 - -0.0f + sinf(var_23 - (var_24 - -1.7583E-26f))));
float tmp_4 = (var_25 + +1.3359E35f + (var_26 - var_27 * -1.7313E-35f));
comp = tmp_4 / +1.7720E36f * (+1.3877E-9f / ldexpf(+1.8074E-36f, 2));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float* tmp_11 = initPointer( atof(argv[11]) );
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28);
cudaDeviceSynchronize();
return 0;
}
|
19,881 | __global__ void test_restrict() {
int a[5];
int* __restrict__ pa = a;
pa[0] = 1;
a[0];
}
__global__ void test_restrict_args(int* __restrict__ a, int* __restrict__ b, int n) {
for (int i = 0; i < n; i++) {
a[i] = b[i];
}
}
__global__ void test_no_restrict_violation(int* __restrict__ a, int* __restrict__ b, int* __restrict__ c, int n) {
// b == c, but only reads happen
for (int i = 0; i < n; i++) {
a[i] = b[i] + c[i];
}
}
__global__ void test_restrict_no_reads(int* __restrict__ a, int* __restrict__ b) {
a[3] = 42;
a[0] = 5;
if (a[0] == 6) {
b[2];
b[3];
}
}
__global__ void test_restrict_conditional_reads(int* __restrict__ a, int* __restrict__ b) {
a[3] = 42;
a[0] = 5;
if (a[0] == 5) {
b[2];
b[3];
}
}
__global__ void test_restrict_reads_in_for(int* __restrict__ a, int* __restrict__ b) {
a[3] = 42;
for (int i = 1; i < 5; i++) {
b[i];
}
}
__global__ void test_restrict_reads_in_while(int* __restrict__ a, int* __restrict__ b) {
a[3] = 42;
int i = 0;
while (i++ < 4) {
b[i];
}
}
__global__ void test_restrict_read_in_condition(int* __restrict__ a, int* __restrict__ b) {
a[3] = 42;
if (b[3] == 42) {
}
}
__global__ void test_restrict_no_writes(int* __restrict__ a, int* __restrict__ b) {
a[3];
a[0];
int x = 5;
if (x == 6) {
b[2] = 42;
b[3] = 42;
}
}
__global__ void test_restrict_conditional_writes(int* __restrict__ a, int* __restrict__ b) {
a[3];
a[0];
int x = 5;
if (x == 5) {
b[2] = 42;
b[3] = 42;
}
}
__global__ void test_restrict_writes_in_for(int* __restrict__ a, int* __restrict__ b) {
a[3];
for (int i = 1; i < 5; i++) {
b[i] = 42;
}
}
__global__ void test_restrict_writes_in_while(int* __restrict__ a, int* __restrict__ b) {
a[3];
int i = 0;
while (i++ < 4) {
b[i] = 42;
}
}
__global__ void test_restrict_read_in_while_condition(int* __restrict__ a, int* __restrict__ b) {
a[3] = 42;
while (b[3] == 42) {
b[3] = 0;
}
}
__global__ void test_restrict_builtin_var_read(int* __restrict__ a, int* __restrict__ b) {
a[3] = 42;
b[threadIdx.x];
}
__global__ void test_restrict_builtin_var_write(int* __restrict__ a, int* __restrict__ b) {
a[3];
b[threadIdx.x] = 42;
}
__global__ void test_restrict_sum(int* __restrict__ a, int* __restrict__ b, int* __restrict__ c) {
*a = 5;
*b = 6;
*c = *a + *b;
}
__global__ void test_restrict_self_assignment(int* __restrict__ a, int* __restrict__ b) {
*a = *b;
} |
19,882 | #include "includes.h"
__global__ void add_thread(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
} |
19,883 | #include "knn.cuh"
#include "morton.cuh"
#include "coords.cuh"
#include "float3math.cuh"
#define PI 3.145927
__host__ __device__
uint4 toUint4(float3 value, uint32_t index) {
uint32_t scale = (1 << 30);
uint4 result = {value.x * scale, value.y * scale, value.z * scale, index};
return result;
}
__device__
void calculateTransformMatrixKNN(CoordinateSystem from, CoordinateSystem to, float *matrix) {
float3 u = to.x;
float3 v = to.y;
float3 w = to.z;
for (int i = 0; i < 4; i++) {
float3 t;
if(i == 0) {
t = from.x;
} else if(i == 1) {
t = from.y;
} else if(i == 2) {
t = from.z;
} else {
float3 tmp = {0,0,0};
t = tmp;
}
float d0 = dot(u, cross(v,w));
float d1 = dot(t, cross(v,w));
float d2 = dot(u, cross(t,w));
float d3 = dot(u, cross(v,t));
float e1 = d1/d0;
float e2 = d2/d0;
float e3 = d3/d0;
matrix[i*4+0] = e1;
matrix[i*4+1] = e2;
matrix[i*4+2] = e3;
if(i == 3) {
matrix[i*4+3] = 1;
} else {
matrix[i*4+3] = 0;
}
}
}
__host__ __device__
float3 multiply4x4x3KNN(float *matrix, float3 a) {
float u = a.x * matrix[0] + a.y * matrix[4] + a.z * matrix[8] + matrix[12];
float v = a.x * matrix[1] + a.y * matrix[5] + a.z * matrix[9] + matrix[13];
float w = a.x * matrix[2] + a.y * matrix[6] + a.z * matrix[10] + matrix[14];
float3 result = {u,v,w};
return result;
}
__host__ __device__
float3 findArbitraryTangent(float3 normal) {
if(fabs(normal.z) > EPSILON) {
float x = 1.0;
float y = 1.0;
float z = (normal.x + normal.y)/normal.z;
float3 result = {x,y,z};
return result;
} else if(fabs(normal.y) > EPSILON) {
float x = 1.0;
float z = 1.0;
float y = (normal.x + normal.z)/normal.y;
float3 result = {x,y,z};
return result;
} else {
float y = 1.0;
float z = 1.0;
float x = (normal.z + normal.y)/normal.x;
float3 result = {x,y,z};
return result;
}
}
void handleError(cudaError_t err, int line) {
if (err != cudaSuccess) {
fprintf(stderr, "Cuda Error %d", line);
exit(EXIT_FAILURE);
}
}
template<typename T>
string convert_to_binary_string(const T value, bool skip_leading_zeroes = false)
{
string str;
bool found_first_one = false;
const int bits = sizeof(T) * 8; // Number of bits in the type
for (int current_bit = bits - 1; current_bit >= 0; current_bit--)
{
if ((value & (1ULL << current_bit)) != 0)
{
if (!found_first_one)
found_first_one = true;
str += '1';
}
else
{
if (!skip_leading_zeroes || found_first_one)
str += '0';
}
}
return str;
}
__device__ __host__ inline
uint32_t binarySearch(uint64_t *values, uint64_t input, uint32_t len) {
int32_t imin = 0;
int32_t imax = len-1;
while (imin <= imax) {
uint32_t imid = imin + (imax - imin)/2;
if (input < values[imid]) {
imax = imid - 1;
}
else {
imin = imid + 1;
}
}
return (uint32_t) imin;
}
__global__
void compactPoints(float3idx *values, uint64_t *mortons, uint32_t *prefixQueryIndex, uint32_t *reverseIndices, float3idx *data, uint64_t *queryIndices, int numData, int numElements) {
uint64_t i = (uint64_t) blockIdx.x * blockDim.x + threadIdx.x;
if (i < numElements) {
int isQuery = mortons[i] & 1; //check LSD
if(isQuery) {
uint32_t nthQ = prefixQueryIndex[i];
uint64_t qi = i-nthQ+1;
uint32_t rev = reverseIndices[values[i].i];
uint32_t queryindex = rev - numData;
//printf("%d -> (%d - %d) = %d \n", values[i].i, rev, numData, queryindex);
queryIndices[queryindex] = (qi << 32) | i;
} else {
uint32_t numQueriesToLeft = prefixQueryIndex[i];
uint32_t di = i-numQueriesToLeft;
data[di] = values[i];
}
}
}
__global__
void compactPointsOld(uint4 *values, uint64_t *mortons, uint32_t *prefixQueryIndex, uint4 *data, uint64_t *queryIndices, int numData, int numElements) {
uint64_t i = (uint64_t) blockIdx.x * blockDim.x + threadIdx.x;
if (i < numElements) {
int isQuery = mortons[i] & 1; //check LSD
if(isQuery) {
uint32_t nthQ = prefixQueryIndex[i];
uint64_t qi = i-nthQ+1;
uint32_t queryindex = values[i].w - numData;
queryIndices[queryindex] = (qi << 32) | i;
} else {
uint32_t numQueriesToLeft = prefixQueryIndex[i];
uint32_t di = i-numQueriesToLeft;
data[di] = values[i];
}
}
}
__device__
uint32_t intSqrt(int64_t remainder) {
uint64_t place = (uint64_t) 1 << (sizeof (uint64_t) * 8 - 2); // calculated by precompiler = same runtime as: place = 0x40000000
while (place > remainder) {
place /= 4; // optimized by complier as place >>= 2
}
uint64_t root = 0;
while (place) {
if (remainder >= root+place) {
remainder -= root+place;
root += place * 2;
}
root /= 2;
place /= 4;
}
return (uint32_t) root;
}
__device__
uint64_t distanceSq(uint4 a, uint4 b) {
int64_t x = (int64_t) b.x - (int64_t) a.x;
int64_t y = (int64_t) b.y - (int64_t) a.y;
int64_t z = (int64_t) b.z - (int64_t) a.z;
int64_t n = (x*x) + (y*y) + (z*z);
return (uint64_t) intSqrt(n);
}
__device__
float fdist(float3 a, float3 b) {
float3 sub = {b.x - a.x, b.y - a.y, b.z - a.z};
return sqrt(dot(sub,sub));
}
__device__ inline
void Comparator(uint64_t &a, uint64_t &b, uint32_t dir) {
uint64_t t;
if((a > b) == dir) {
//Swap if a > b;
t = a;
a = b;
b = t;
}
}
__device__
void bitonicSort(uint64_t *values, uint32_t length, uint32_t i)
{
const uint32_t dir = ASCENDING;
for (uint32_t size = 2; size < length; size <<= 1) {
//Bitonic merge
uint32_t ddd = dir ^ ((i & (size / 2)) != 0);
for (uint32_t stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint32_t pos = 2 * i - (i & (stride - 1));
Comparator(
values[pos + 0],
values[pos + stride],
ddd
);
}
}
for (uint32_t stride = length / 2; stride > 0; stride >>= 1) {
__syncthreads();
uint32_t pos = 2 * i - (i & (stride - 1));
Comparator(
values[pos + 0],
values[pos + stride],
dir
);
}
}
__device__ inline void swap(uint64_t & a, uint64_t & b)
{
register uint64_t tmp = a;
a = b;
b = tmp;
}
__device__
void bitonicSort2(uint64_t *values, uint32_t size, uint32_t tid) {
// Parallel bitonic sort.
for (int k = 2; k <= size; k *= 2) {
// Bitonic merge:
for (int j = k / 2; j > 0; j /= 2) {
int ixj = tid ^ j;
if (ixj > tid) {
if ((tid & k) == 0) {
if (values[tid] > values[ixj]) {
swap(values[tid], values[ixj]);
}
}
else {
if (values[tid] < values[ixj]) {
swap(values[tid], values[ixj]);
}
}
}
__syncthreads();
}
}
}
__global__
void approxNearest(uint64_t *queryIndices, uint4 *values, float3 *floatvalues, uint4 *data, uint64_t *currentNearest, uint32_t k, uint32_t lambdak, int numQueries, int numData) {
extern __shared__ uint64_t candidates[];
uint32_t q = (uint32_t) blockIdx.x;
uint32_t numThreads = (uint32_t) blockDim.x;
const uint64_t maxDist = (uint64_t) 0xffffffff;
if(q < numQueries) {
uint64_t iq_both = queryIndices[q]; // data point right to query point
uint32_t iq = (uint32_t) (iq_both >> 32);
uint32_t index = (uint32_t) iq_both;
uint4 querypoint = values[index];
float3 queryp = floatvalues[querypoint.w];
uint32_t i = (uint32_t) threadIdx.x;
if(i < lambdak) {
iq = min(numData-lambdak-1, max(lambdak-1, iq));
uint32_t leftidx = iq-i;
uint32_t rightidx = iq+i+1;
uint4 left = data[leftidx];
uint4 right = data[rightidx];
float3 leftp = floatvalues[left.w];
float3 rightp = floatvalues[right.w];
float leftd = fdist(queryp, leftp);
float rightd = fdist(queryp, rightp);
uint64_t prec = (uint64_t) (1 << 30);
uint64_t leftdist = (uint64_t) (leftd * prec);
uint64_t rightdist = (uint64_t) (rightd * prec);
candidates[2*i] = (uint64_t) leftdist << 32 | left.w;
candidates[2*i+1] = (uint64_t) rightdist << 32 | right.w;
} else if(i < numThreads) {
candidates[2*i] = (maxDist << 32) | 0;
candidates[2*i+1] = (maxDist << 32) | 0;
}
__syncthreads();
if(i < numThreads) {
bitonicSort(candidates, 2*numThreads, i);
}
//Write to global memory
if(i < k) {
//printf("%u %u %u\n", q, i, (uint32_t) (candidates[i] >> 32) );
currentNearest[q*k+i] = candidates[i];
}
}
}
__device__
float3 toFloat3(uint4 value) {
float scale = 1.0 / (float) (1 << 21);
float x = value.x * scale;
float y = value.y * scale;
float z = value.z * scale;
float3 result = {x,y,z};
return result;
}
__host__ __device__
float3 div(float3 a, float3 b) {
float3 result = {
a.x / b.x,
a.y / b.y,
a.z / b.z
};
return result;
}
__host__ __device__
float3 mult(float3 a, float3 b) {
float3 result = {
a.x * b.x,
a.y * b.y,
a.z * b.z
};
return result;
}
__global__
void approxNearestEllipsoid(uint64_t *queryIndices, float3idx *values, float3idx *data, uint64_t *currentNearest, uint32_t k, uint32_t lambdak, int numQueries, int numData,
float3 normalScaling, float3 tangentScaling,
CoordinateSystem bucketSpace, float3 querynormals[]) {
extern __shared__ uint64_t candidates[];
float *toEllipsoid = (float *) &candidates[2*lambdak];
uint32_t q = (uint32_t) blockIdx.x;
uint32_t numThreads = (uint32_t) blockDim.x;
const uint64_t maxDist = (uint64_t) UINT32_MAX;
uint64_t iq_both;
uint32_t iq,index;
float3idx querypoint;
if(q < numQueries) {
iq_both = queryIndices[q]; // data point right to query point
iq = (uint32_t) (iq_both >> 32);
index = (uint32_t) iq_both;
querypoint = values[index];
}
if(q < numQueries) {
float3 querynormal = querynormals[querypoint.i - numData];
float3 tangent0 = normalize(findArbitraryTangent(querynormal));
float3 tangent1 = cross(querynormal, tangent0);
uint32_t i = (uint32_t) threadIdx.x;
if(i < 1) {
CoordinateSystem ellipsoidSpace = {
mult(tangent0, tangentScaling),
mult(querynormal, normalScaling),
mult(tangent1, tangentScaling)
};
calculateTransformMatrixKNN(bucketSpace, ellipsoidSpace, toEllipsoid);
}
__syncthreads();
if(i < lambdak) {
iq = min(numData-lambdak-1, max(lambdak-1, iq));
uint32_t leftidx = iq-i;
uint32_t rightidx = iq+i+1;
float3idx left = data[leftidx];
float3idx right = data[rightidx];
//printf("%u : %u - %u : %u \n", leftidx, rightidx, left.i, right.i);
// move to ellipsoid coords
float3 query = multiply4x4x3KNN(toEllipsoid, querypoint.value);
float3 fleft = multiply4x4x3KNN(toEllipsoid, left.value);
float3 fright = multiply4x4x3KNN(toEllipsoid, right.value);
float leftd = fdist(query, fleft);
float rightd = fdist(query, fright);
//printf("%f %f\n", leftd, rightd);
uint64_t prec = (uint64_t) (1 << 30);
uint64_t leftdist = (uint64_t) (leftd * prec);
uint64_t rightdist = (uint64_t) (rightd * prec);
candidates[2*i] = (leftdist << 32) | left.i;
candidates[2*i+1] = (rightdist << 32) | right.i;
} else if(i < numThreads) {
candidates[2*i] = (maxDist << 32) | 0xdeadbeef;
candidates[2*i+1] = (maxDist << 32) | 0xdeadbeef;
}
__syncthreads();
if(i < numThreads) {
bitonicSort(candidates, 2*numThreads, i);
}
__syncthreads();
//Write to global memory
if(i < k) {
uint32_t queryIndex = querypoint.i-numData;
uint32_t idx = queryIndex*k+i;
currentNearest[idx] = candidates[i];
}
}
}
uint32_t log2(uint32_t x) {
uint32_t l;
for(l=0; x>1; x = (x >> 1), l++);
return l;
}
void findCandidates(uint64_t *queryIndices, uint4 *values, float3* floatvalues, uint4 *data, uint64_t *nearest, const uint32_t k, int numQueries, int numData, uint32_t lambda) {
uint32_t lambdak = k*lambda;
int logn = log2(lambdak - 1);
int threadsPerBlock = 1 << (logn+1);
int blocksPerGrid = numQueries;
approxNearest<<<blocksPerGrid, threadsPerBlock, 2*threadsPerBlock*sizeof(uint64_t)>>>(queryIndices, values, floatvalues, data, nearest, k, lambdak, numQueries, numData);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
}
void findCandidatesEllipsoid(uint64_t *queryIndices, float3idx *values, float3idx *data, uint64_t *nearest, const uint32_t k, int numQueries, int numData,
float3 normalScaling, float3 tangentScaling,
CoordinateSystem bucketSpace, float3 querynormals[], uint32_t lambda) {
uint32_t lambdak = k*lambda;
int logn = log2(lambdak - 1);
int threadsPerBlock = 1 << (logn+1);
int blocksPerGrid = numQueries;
approxNearestEllipsoid<<<blocksPerGrid, threadsPerBlock, 2*threadsPerBlock*sizeof(uint64_t)+16*sizeof(float)>>>(
queryIndices, values, data, nearest, k, lambdak, numQueries, numData,
normalScaling, tangentScaling,
bucketSpace, querynormals
);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
}
void scaleValues(float3 *devValues, uint4 *devIntValues,
uint32_t shift,
int numElements,
float3 mins, float maxlen,
bool revert) {
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
cudaError_t err = cudaSuccess;
scalePointsOld<<<blocksPerGrid, threadsPerBlock>>>(devValues, devIntValues, numElements, shift, mins, maxlen, revert);
err = cudaGetLastError();
handleError(err, __LINE__);
}
void scaleValues(float3idx *devValues, float3idx *output,
int numElements,
float3 mins, float maxlen,
bool revert) {
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
cudaError_t err = cudaSuccess;
scalePoints<<<blocksPerGrid, threadsPerBlock>>>(devValues, output, numElements, mins, maxlen, revert);
err = cudaGetLastError();
handleError(err, __LINE__);
}
int getMortonsOld(uint4 *devIntValues, uint64_t *devMortons, const int numData, const int numQueries) {
int threadsPerBlock = 256;
int numElements = numData + numQueries;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
cudaError_t err = cudaSuccess;
computeMortons<<<blocksPerGrid, threadsPerBlock>>>(devIntValues, devMortons, numData, numQueries);
err = cudaGetLastError();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
int getMortons(float3idx *values, uint32_t shift, uint64_t *devMortons, const int numData, const int numQueries) {
int threadsPerBlock = 256;
int numElements = numData + numQueries;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
cudaError_t err = cudaSuccess;
computeMortons<<<blocksPerGrid, threadsPerBlock>>>(values, shift, devMortons, numData, numQueries);
err = cudaGetLastError();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
int pointCompaction(float3idx *values, uint64_t *devMortons, uint32_t *devPrefixQueryIndex, uint32_t *reverseIndices, float3idx *devData, uint64_t *devQueryIndices, int numData, int numElements) {
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
compactPoints<<<blocksPerGrid, threadsPerBlock>>>(values, devMortons, devPrefixQueryIndex, reverseIndices, devData, devQueryIndices, numData, numElements);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
int pointCompactionOld(uint4 *devIntValues, uint64_t *devMortons, uint32_t *devPrefixQueryIndex, uint4 *devData, uint64_t *devQueryIndices, int numData, int numElements) {
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
compactPointsOld<<<blocksPerGrid, threadsPerBlock>>>(devIntValues, devMortons, devPrefixQueryIndex, devData, devQueryIndices, numData, numElements);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
__global__
void prefixList(uint4 *values, uint32_t *prefix, int numData, int numElements) {
uint32_t i = (uint32_t) blockIdx.x * blockDim.x + threadIdx.x;
if(i < numElements) {
if(values[i].w < numData) {
prefix[i] = 0;
} else {
prefix[i] = 1;
}
}
}
__global__
void prefixList(float3idx *values, uint32_t *prefix, int numData, int numElements) {
uint32_t i = (uint32_t) blockIdx.x * blockDim.x + threadIdx.x;
if(i < numElements) {
if(values[i].i < numData) {
prefix[i] = 0;
} else {
prefix[i] = 1;
}
}
}
int createPrefixList(uint4 *devIntValues, uint32_t *devPrefixQueryIndex, int numData, int numElements) {
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
prefixList<<<blocksPerGrid, threadsPerBlock>>>(devIntValues, devPrefixQueryIndex, numData, numElements);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
// in-place prefix sum on list
thrust::inclusive_scan(thrust::device, devPrefixQueryIndex, devPrefixQueryIndex + numElements, devPrefixQueryIndex);
err = cudaGetLastError();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
int createPrefixList(float3idx *values, uint32_t *devPrefixQueryIndex, int numData, int numElements) {
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
prefixList<<<blocksPerGrid, threadsPerBlock>>>(values, devPrefixQueryIndex, numData, numElements);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
// in-place prefix sum on list
thrust::inclusive_scan(thrust::device, devPrefixQueryIndex, devPrefixQueryIndex + numElements, devPrefixQueryIndex);
err = cudaGetLastError();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
__global__
void mergeNearest(uint64_t *nearest, uint4 *values, float3 *floatvalues, uint4 *data, uint64_t *queryIndices, const uint32_t k, uint32_t lambdak, int numQueries, int numData) {
extern __shared__ uint64_t shared[];
uint32_t numThreads = (uint32_t) blockDim.x;
uint64_t *currentNN = (uint64_t *) shared;
uint32_t *counter = (uint32_t *) ¤tNN[k];
uint32_t *counter_scan = (uint32_t *) &counter[2*k];
uint64_t *updatedNN = (uint64_t *) &counter_scan[2*k];
uint32_t i = (uint32_t) threadIdx.x;
uint32_t q = (uint32_t) blockIdx.x;
if(i < k && q < numQueries) {
currentNN[i] = nearest[q*k+i];
}
if(i < 2*k && q < numQueries) {
counter[i] = i & 1; //1 if odd
}
__syncthreads();
uint32_t offset = 0;
uint64_t candidate;
uint32_t loc;
bool active = true;
if(q < numQueries) {
if(i < 2*lambdak) {
uint64_t iq_both = queryIndices[q]; // data point right to query point
uint32_t iq = iq_both >> 32;
uint32_t index = (uint32_t) iq_both;
uint4 querypoint = values[index];
float3 queryp = floatvalues[querypoint.w];
iq = min(numData-lambdak-1, max(lambdak-1, iq));
bool odd = (i & 1);
uint32_t idx;
if(odd) {
idx = iq+(i/2)+1;
} else {
idx = iq-(i/2);
}
uint4 datapoint = data[idx];
float3 p = floatvalues[datapoint.w];
uint64_t prec = (uint64_t) (1 << 30);
float d = fdist(queryp, p);
uint64_t dist = d * prec;
//uint64_t dist = distanceSq(querypoint,datspoint);
candidate = (dist << 32) | datapoint.w;
loc = binarySearch(currentNN, candidate, k);
if(loc == k) {
active = false;
} else {
uint32_t index = (loc != 0) ? (loc-1) : 0;
uint64_t current = currentNN[index];
if(current == candidate) {
active = false;
} else {
offset = atomicAdd(&counter[loc*2], (uint32_t) 1);
}
}
} else if(i < numThreads) {
candidate = UINT64_MAX;
active = false;
offset = 0;
}
}
// Do a block-wise parallel exclusive prefix sum over counter
__syncthreads();
if(i == 0) {
thrust::exclusive_scan(thrust::device, counter, counter + (2*k), counter_scan);
}
__syncthreads();
// for all current nearest
if(q < numQueries) {
if(i < k) {
uint32_t index = counter_scan[2*i + 1];
if(index < k) {
updatedNN[index] = currentNN[i];
}
}
__syncthreads();
// for all new candidate nearest
if(i < 2*lambdak && active == true) {
uint32_t index = counter_scan[2*loc] + offset;
if(index < k) {
updatedNN[index] = candidate;
}
}
__syncthreads();
if(i < k && q < numQueries) {
nearest[q*k+i] = updatedNN[i];
}
__syncthreads();
}
}
__global__
void mergeNearestEllipsoid(uint64_t *nearest, float3idx *values, float3idx *data, uint64_t *queryIndices, const uint32_t k, uint32_t lambdak, int numQueries, int numData,
float3 normalScaling, float3 tangentScaling,
CoordinateSystem bucketSpace, float3 querynormals[], uint32_t intShift) {
extern __shared__ uint64_t shared[];
uint32_t numThreads = (uint32_t) blockDim.x;
uint64_t *currentNN = (uint64_t *) shared;
uint32_t *counter = (uint32_t *) ¤tNN[k];
uint32_t *counter_scan = (uint32_t *) &counter[2*k];
uint64_t *updatedNN = (uint64_t *) &counter_scan[2*k];
float *toEllipsoid = (float *) &updatedNN[k];
uint32_t i = (uint32_t) threadIdx.x;
uint32_t q = (uint32_t) blockIdx.x;
uint64_t iq_both;
uint32_t iq,index;
float3idx querypoint;
if(q < numQueries) {
iq_both = queryIndices[q]; // data point right to query point
iq = (uint32_t) (iq_both >> 32);
index = (uint32_t) iq_both;
querypoint = values[index];
}
if(i < k && q < numQueries) {
uint32_t idx = (querypoint.i-numData)*k + i;
currentNN[i] = nearest[idx];
}
if(i < 2*k && q < numQueries) {
counter[i] = i & 1; //1 if odd
}
__syncthreads();
uint32_t offset = 0;
uint64_t candidate;
uint32_t loc;
bool active = true;
if(q < numQueries) {
float3 query = querypoint.value;
iq = min(numData-lambdak-1, max(lambdak-1, iq));
float3 querynormal = querynormals[querypoint.i - numData];
float3 tangent0 = normalize(findArbitraryTangent(querynormal));
float3 tangent1 = cross(querynormal, tangent0);
if(i < 1) {
CoordinateSystem ellipsoidSpace = {
mult(tangent0, tangentScaling),
mult(querynormal, normalScaling),
mult(tangent1, tangentScaling)
};
calculateTransformMatrixKNN(bucketSpace, ellipsoidSpace, toEllipsoid);
}
__syncthreads();
if(i < 2*lambdak) {
bool odd = (i & 1);
uint32_t idx;
if(odd) {
idx = iq+(i/2)+1;
} else {
idx = iq-(i/2);
}
float3idx datapoint = data[idx];
float3 point = datapoint.value;
// move to ellipsoid coords
query = multiply4x4x3KNN(toEllipsoid, query);
point = multiply4x4x3KNN(toEllipsoid, point);
float d = fdist(query, point);
uint64_t prec = (uint64_t) (1 << 30);
uint64_t dist = d * prec;
candidate = (dist << 32) | datapoint.i;
loc = binarySearch(currentNN, candidate, k);
if(loc == k) {
active = false;
} else {
uint32_t index = (loc != 0) ? (loc-1) : 0;
uint64_t current = currentNN[index];
uint64_t lowend = 0xffffffff;
if((current & lowend) == (candidate & lowend)) {
active = false;
} else {
offset = atomicAdd(&counter[loc*2], (uint32_t) 1);
}
}
} else if(i < numThreads) {
candidate = UINT64_MAX;
active = false;
offset = 0;
}
}
// Do a block-wise parallel exclusive prefix sum over counter
__syncthreads();
if(i == 0) {
thrust::exclusive_scan(thrust::device, counter, counter + (2*k), counter_scan);
}
__syncthreads();
// for all current nearest
if(q < numQueries) {
if(i < k) {
uint32_t index = counter_scan[2*i + 1];
if(index < k) {
updatedNN[index] = currentNN[i];
}
}
__syncthreads();
// for all new candidate nearest
if(i < 2*lambdak && active == true) {
uint32_t index = counter_scan[2*loc] + offset;
if(index < k) {
updatedNN[index] = candidate;
}
}
__syncthreads();
if(i < k) {
uint32_t idx = (querypoint.i-numData)*k + i;
nearest[idx] = updatedNN[i];
}
__syncthreads();
}
}
__global__
void sortMerged(uint64_t *nearest, uint32_t k, uint32_t numQueries, uint32_t numData, uint4 *values, uint64_t *queryIndices) {
extern __shared__ uint64_t toSort[];
uint32_t i = (uint32_t) threadIdx.x;
uint32_t q = (uint32_t) blockIdx.x;
uint32_t numThreads = (uint32_t) blockDim.x;
uint64_t iq_both;
uint4 querypoint;
if(q < numQueries) {
iq_both = queryIndices[q]; // data point right to query point
uint32_t index = (uint32_t) iq_both;
querypoint = values[index];
}
if(i < k && q < numQueries) {
toSort[i] = nearest[(querypoint.w-numData)*k+i];
__syncthreads();
} else if(i < numThreads && q < numQueries) {
toSort[i] = UINT64_MAX;
__syncthreads();
}
if(i < numThreads && q < numQueries) {
bitonicSort2(toSort, numThreads, i);
__syncthreads();
}
if(i < k && q < numQueries) {
nearest[(querypoint.w-numData)*k+i] = toSort[i];
}
}
__global__
void sortMerged(uint64_t *nearest, uint32_t k, uint32_t numQueries, uint32_t numData, float3idx *values, uint64_t *queryIndices) {
extern __shared__ uint64_t toSort[];
uint32_t i = (uint32_t) threadIdx.x;
uint32_t q = (uint32_t) blockIdx.x;
uint32_t numThreads = (uint32_t) blockDim.x;
uint64_t iq_both;
float3idx querypoint;
if(q < numQueries) {
iq_both = queryIndices[q]; // data point right to query point
uint32_t index = (uint32_t) iq_both;
querypoint = values[index];
}
if(i < k && q < numQueries) {
toSort[i] = nearest[(querypoint.i-numData)*k+i];
__syncthreads();
} else if(i < numThreads && q < numQueries) {
toSort[i] = UINT64_MAX;
__syncthreads();
}
if(i < numThreads && q < numQueries) {
bitonicSort2(toSort, numThreads, i);
__syncthreads();
}
if(i < k && q < numQueries) {
nearest[(querypoint.i-numData)*k+i] = toSort[i];
}
}
void mergeStep(uint64_t *nearest, uint4 *values, float3 *floatvalues, uint4 *data, uint64_t *queryIndices, const uint32_t k, int numQueries, int numData, uint32_t lambda) {
uint32_t lambdak = k * lambda;
uint32_t logn = log2(lambdak - 1);
int threadsPerBlock = 1 << (logn + 2); // 2*k
int blocksPerGrid = numQueries;
size_t sharedMemorySize = k*sizeof(uint64_t) + 2*k*sizeof(uint32_t) + 2*k*sizeof(uint32_t) + k*sizeof(uint64_t);
mergeNearest<<< blocksPerGrid, threadsPerBlock, sharedMemorySize >>>(nearest, values, floatvalues, data, queryIndices, k, lambdak, numQueries, numData);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
// nearest now unsorted - sort each block (data points for a query) by distance from q
uint32_t logn2 = log2(k - 1);
int sortSize = 1 << (logn2 + 1);
sortMerged<<< blocksPerGrid, k, sortSize*sizeof(uint64_t) >>>(nearest, k, numQueries, numData, values, queryIndices);
err = cudaGetLastError();
handleError(err, __LINE__);
}
void mergeStepEllipsoid(uint64_t *nearest, float3idx *values, float3idx *data, uint64_t *queryIndices, const uint32_t k, int numQueries, int numData,
float3 normalScaling, float3 tangentScaling,
CoordinateSystem bucketSpace, float3 querynormals[], uint32_t intShift, uint32_t lambda) {
uint32_t lambdak = k * lambda;
uint32_t logn = log2(lambdak - 1);
int threadsPerBlock = 1 << (logn + 2); // 2*k
int blocksPerGrid = numQueries;
size_t sharedMemorySize = k*sizeof(uint64_t) + 2*k*sizeof(uint32_t) + 2*k*sizeof(uint32_t) + k*sizeof(uint64_t) + 16 * sizeof(float);
mergeNearestEllipsoid<<< blocksPerGrid, threadsPerBlock, sharedMemorySize >>>(nearest, values, data, queryIndices, k, lambdak, numQueries, numData,
normalScaling, tangentScaling, bucketSpace, querynormals, intShift);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
handleError(err, __LINE__);
// nearest now unsorted - sort each block (data points for a query) by distance from q
uint32_t logn2 = log2(k - 1);
int sortSize = 1 << (logn2 + 1);
sortMerged<<< blocksPerGrid, sortSize, sortSize*sizeof(uint64_t) >>>(nearest, k, numQueries, numData, values, queryIndices);
err = cudaGetLastError();
handleError(err, __LINE__);
}
__global__
void copyAllValues(float3 *values, uint32_t numElements, float3 *result) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < numElements) {
result[i] = values[i];
}
}
struct compare_x {
__host__ __device__
bool operator()(float3 a, float3 b)
{
return a.x < b.x;
}
};
struct compare_y {
__host__ __device__
bool operator()(float3 a, float3 b)
{
return a.y < b.y;
}
};
struct compare_z {
__host__ __device__
bool operator()(float3 a, float3 b)
{
return a.z < b.z;
}
};
struct compare_x_idx {
__host__ __device__
bool operator()(float3idx a, float3idx b)
{
return a.value.x < b.value.x;
}
};
struct compare_y_idx {
__host__ __device__
bool operator()(float3idx a, float3idx b)
{
return a.value.y < b.value.y;
}
};
struct compare_z_idx {
__host__ __device__
bool operator()(float3idx a, float3idx b)
{
return a.value.z < b.value.z;
}
};
__global__
void markBucketKernel(int numData, int numQueries, float3 *querynormals, float3* bucketnormals, int buckets, int bucket, uint32_t *marks) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < numData) {
marks[i] = 0;
} else if(i < (numData + numQueries)) {
float3 normal = querynormals[i-numData];
float maxdot = -2.0;
uint32_t maxbucket = buckets;
for (uint32_t b = 0; b < buckets; b++) {
float d = dot(normal, bucketnormals[b]);
if(d >= maxdot) {
maxdot = d;
maxbucket = b;
}
}
if(maxbucket == bucket) {
marks[i] = 1;
} else {
marks[i] = 2;
}
}
}
void markInBucket(int numData, int numQueries, float3 *querynormals, float3 *bucketnormals, int buckets, int bucket, uint32_t *marks) {
int numElements = numData + numQueries;
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
markBucketKernel<<<blocksPerGrid, threadsPerBlock>>>(numData, numQueries, querynormals, bucketnormals, buckets, bucket, marks);
}
__global__
void notInBucketKernel(uint32_t *marks, int numElements) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < numElements) {
if(marks[i] == 2) {
marks[i] = 1;
} else {
marks[i] = 0;
}
}
}
void notInBucket(uint32_t *marks, int numElements) {
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
notInBucketKernel<<<blocksPerGrid, threadsPerBlock>>>(marks, numElements);
}
__global__
void originalIndices(int numElements, float3 *values, float3idx *output) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < numElements) {
float3 value = values[i];
float3idx result = {value, i};
output[i] = result;
}
}
void storeOriginalIndices(int numElements, float3 *values, float3idx *output) {
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
originalIndices<<<blocksPerGrid, threadsPerBlock>>>(numElements, values, output);
}
__global__
void bucketIndices(uint4 *intValues, int numElements, uint32_t *bucketIndices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < numElements) {
bucketIndices[intValues[i].w] = i;
}
}
void storeBucketIndices(uint4 *intValues, int numElements, uint32_t *bucketIdx) {
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
bucketIndices<<<blocksPerGrid, threadsPerBlock>>>(intValues, numElements, bucketIdx);
}
__global__
void calculateReverseIndices(float3idx *values, int numElements, uint32_t *revIndices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < numElements) {
revIndices[values[i].i] = i;
}
}
void reverseIndices(float3idx *values, int numElements, uint32_t *revIndices) {
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
calculateReverseIndices<<<blocksPerGrid, threadsPerBlock>>>(values, numElements, revIndices);
}
int nearestNeighborsEllipsoid(int numData, int numQueries, uint32_t k, float3 *values, float3 *querynormals, uint64_t *nearest, const uint32_t lambda, const float compressionRate) {
const int buckets = 18;
/*
Create icosahedron (even distribution) and
add the octahedron to create one bucket for each axis.
This results in slightly unveven buckets but points on the
same surface are likely to be in the same bucket.
*/
float d = sinf(PI/4.0);
float3 bucketnormals[buckets] = {
{0,0,1}, {0,1,0}, {1,0,0},
{0,0,-1}, {0,-1,0}, {-1,0,0},
{0, d, d}, {0, d, -d}, {0, -d, d}, {0, -d, -d},
{d, 0, d}, {d, 0, -d}, {-d, 0, d}, {-d, 0, -d},
{d, d, 0}, {d, -d, 0}, {-d, d, 0}, {-d, -d, 0}
};
size_t bucketNormalSize = buckets * sizeof(float3);
int numElements = numData + numQueries;
size_t indexedSize = numElements * sizeof(float3idx);
size_t dataSize = numData * sizeof(float3idx);
size_t qiSize = numQueries * sizeof(uint64_t);
size_t nearestSize = numQueries * k * sizeof(uint64_t);
size_t queryNormalSize = numQueries * sizeof(float3);
size_t mortonSize = numElements * sizeof(uint64_t);
size_t prefixSize = numElements * sizeof(uint32_t);
cudaError_t err = cudaSuccess;
size_t valueSize = numElements * sizeof(float3);
float3 *devValues = NULL;
err = cudaMalloc((void **) &devValues, valueSize);
handleError(err, __LINE__);
uint64_t *devMortons = NULL;
err = cudaMalloc((void **) &devMortons, mortonSize);
handleError(err, __LINE__);
uint32_t *devPrefixQueryIndex = NULL;
err = cudaMalloc((void **) &devPrefixQueryIndex, prefixSize);
handleError(err, __LINE__);
uint32_t *devMarks = NULL;
err = cudaMalloc((void **) &devMarks, prefixSize);
handleError(err, __LINE__);
uint64_t *devQueryIndices = NULL;
err = cudaMalloc((void **) &devQueryIndices, qiSize);
handleError(err, __LINE__);
float3idx *devIndexed = NULL;
err = cudaMalloc((void **) &devIndexed, indexedSize);
handleError(err, __LINE__);
float3idx *devData = NULL;
err = cudaMalloc((void **) &devData, dataSize);
handleError(err, __LINE__);
uint64_t *devNearest = NULL;
err = cudaMalloc((void **) &devNearest, nearestSize);
handleError(err, __LINE__);
float3 *devQueryNormals = NULL;
err = cudaMalloc((void **) &devQueryNormals, queryNormalSize);
handleError(err, __LINE__);
float3 *devBucketNormals = NULL;
err = cudaMalloc((void **) &devBucketNormals, bucketNormalSize);
handleError(err, __LINE__);
err = cudaMemcpy(devValues, values, valueSize, cudaMemcpyHostToDevice);
handleError(err, __LINE__);
err = cudaMemcpy(devQueryNormals, querynormals, queryNormalSize, cudaMemcpyHostToDevice);
handleError(err, __LINE__);
err = cudaMemcpy(devBucketNormals, bucketnormals, bucketNormalSize, cudaMemcpyHostToDevice);
handleError(err, __LINE__);
struct timeval tval_before, tval_after, tval_result;
gettimeofday(&tval_before, NULL);
CoordinateSystem unit = unitSystem();
float bucketScale = fmin(1.0, 2.0/compressionRate);
float3 bucketNormalScaling = {bucketScale, bucketScale, bucketScale};
float3 normalScaling = {1.0/compressionRate, 1.0/compressionRate, 1.0/compressionRate};
float3 tangentScaling = {1.0, 1.0, 1.0};
for (int bucket = 0; bucket < buckets; bucket++) {
storeOriginalIndices(numElements, devValues, devIndexed);
markInBucket(numData, numQueries, devQueryNormals, devBucketNormals, buckets, bucket, devMarks);
thrust::sort_by_key(thrust::device, devMarks, devMarks + numElements, devIndexed);
notInBucket(devMarks, numElements);
int numOutside = thrust::reduce(thrust::device, devMarks, devMarks + numElements);
int numQueriesInside = numQueries - numOutside;
if(numQueriesInside == 0) {
continue;
}
int numElementsInside = numData + numQueriesInside;
float3 bucketnormal = normalize(bucketnormals[bucket]);
float3 tangent0 = normalize(findArbitraryTangent(bucketnormal));
float3 tangent1 = cross(bucketnormal, tangent0);
CoordinateSystem bucketSpace = {
mult(tangent0, tangentScaling),
mult(bucketnormal, bucketNormalScaling),
mult(tangent1, tangentScaling)
};
moveToCoordSpace(unit, bucketSpace, devIndexed, numElements, devIndexed);
thrust::device_ptr<float3idx> dev_ptr = thrust::device_pointer_cast(devIndexed);
float3pairidx xpair = thrust::minmax_element(dev_ptr, dev_ptr + numElementsInside, compare_x_idx());
float3pairidx ypair = thrust::minmax_element(dev_ptr, dev_ptr + numElementsInside, compare_y_idx());
float3pairidx zpair = thrust::minmax_element(dev_ptr, dev_ptr + numElementsInside, compare_z_idx());
float3idx xmin = *(xpair.first);
float3idx xmax = *(xpair.second);
float minx = xmin.value.x;
float maxx = xmax.value.x;
float3idx ymin = *(ypair.first);
float3idx ymax = *(ypair.second);
float miny = ymin.value.y;
float maxy = ymax.value.y;
float3idx zmin = *(zpair.first);
float3idx zmax = *(zpair.second);
float minz = zmin.value.z;
float maxz = zmax.value.z;
float3 mins = {minx, miny, minz};
float maxlen = fmax(maxx - minx, fmax(maxy - miny, maxz - minz));
scaleValues(devIndexed, devIndexed, numElements, mins, maxlen, false);
uint32_t *devReverseIndices = devMarks;
reverseIndices(devIndexed, numElements, devReverseIndices);
for (int j = 0; j < 5; ++j) {
float shift = j*0.05;
uint32_t intShift = (uint32_t) (shift * (1 << 21));
getMortons(devIndexed, intShift, devMortons, numData, numQueriesInside);
thrust::sort_by_key(thrust::device, devMortons, devMortons + numElementsInside, devIndexed);
createPrefixList(devIndexed, devPrefixQueryIndex, numData, numElementsInside);
pointCompaction(devIndexed, devMortons, devPrefixQueryIndex, devReverseIndices, devData, devQueryIndices, numData, numElementsInside);
if(j == 0) {
findCandidatesEllipsoid(
devQueryIndices,
devIndexed,
devData,
devNearest,
k,
numQueriesInside,
numData,
normalScaling,
tangentScaling,
bucketSpace,
devQueryNormals,
lambda
);
} else {
mergeStepEllipsoid(
devNearest,
devIndexed,
devData,
devQueryIndices,
k,
numQueriesInside,
numData,
normalScaling,
tangentScaling,
bucketSpace,
devQueryNormals,
intShift,
lambda
);
}
}
scaleValues(devIndexed, devIndexed, numElements, mins, maxlen, true);
moveToCoordSpace(bucketSpace, unit, devIndexed, numElements, devIndexed);
}
gettimeofday(&tval_after, NULL);
timersub(&tval_after, &tval_before, &tval_result);
int64_t seconds = (int64_t) tval_result.tv_sec;
int64_t micros = (int64_t) tval_result.tv_usec;
uint64_t ms = seconds * 1000 + (micros / 1000);
if(ms == 0) {
ms = 1;
}
uint64_t qsperms = numQueries / ms;
printf("%d,%d,%u,%lu\n", numData, numQueries, k, qsperms);
err = cudaMemcpy(nearest, devNearest, nearestSize, cudaMemcpyDeviceToHost);
handleError(err, __LINE__);
// for(int i = 0; i < numQueries; i++){
// float3 query = values[i+numData];
// fprintf(stderr, "%u ", i);
// for(int j = 0; j< k; j++){
// uint32_t valueIndex = (uint32_t) nearest[i*k+j];
// //if(valueIndex > numData) {
// fprintf(stderr, "%u ", (uint32_t) nearest[k*i+j]);
// //}
// //float3 value = values[valueIndex];
// //fprintf(stderr,"%u(%u) (%f,%f,%f) - ", (uint32_t) nearest[i*k+j], (uint32_t) (nearest[i*k+j] >> 32), value.x, value.y, value.z);
// }
// fprintf(stderr,"\n");
// }
fprintf(stderr, "Time elapsed: %ld.%06ld\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
// Free device memory
err = cudaFree(devValues);
handleError(err, __LINE__);
err = cudaFree(devMortons);
handleError(err, __LINE__);
err = cudaFree(devPrefixQueryIndex);
handleError(err, __LINE__);
err = cudaFree(devQueryIndices);
handleError(err, __LINE__);
err = cudaFree(devData);
handleError(err, __LINE__);
err = cudaFree(devIndexed);
handleError(err, __LINE__);
err = cudaFree(devNearest);
handleError(err, __LINE__);
err = cudaFree(devMarks);
handleError(err, __LINE__);
// Free host memory
err = cudaDeviceReset();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
int nearestNeighbors(int numData, int numQueries, uint32_t k, float3 *values, uint64_t *nearest, const uint32_t lambda) {
int numElements = numData + numQueries;
size_t dataSize = numData * sizeof(uint4);
uint4 *data = (uint4 *) malloc(dataSize);
size_t qiSize = numQueries * sizeof(uint64_t);
uint64_t *queryIndices = (uint64_t *) malloc(qiSize);
size_t nearestSize = numQueries * k * sizeof(uint64_t);
size_t mortonSize = numElements * sizeof(uint64_t);
uint64_t *mortons = (uint64_t *) malloc(mortonSize);
size_t prefixSize = numElements * sizeof(uint32_t);
size_t intSize = numElements * sizeof(uint4);
cudaError_t err = cudaSuccess;
size_t valueSize = numElements * sizeof(float3);
float3 *devValues = NULL;
err = cudaMalloc((void **) &devValues, valueSize);
handleError(err, __LINE__);
float3 *uniqueValues = NULL;
err = cudaMalloc((void **) &uniqueValues, valueSize);
handleError(err, __LINE__);
uint4 *devIntValues = NULL;
err = cudaMalloc((void **) &devIntValues, intSize);
handleError(err, __LINE__);
uint64_t *devMortons = NULL;
err = cudaMalloc((void **) &devMortons, mortonSize);
handleError(err, __LINE__);
uint64_t *uniqueMortons = NULL;
err = cudaMalloc((void **) &uniqueMortons, mortonSize);
handleError(err, __LINE__);
uint32_t *devPrefixQueryIndex = NULL;
err = cudaMalloc((void **) &devPrefixQueryIndex, prefixSize);
handleError(err, __LINE__);
uint64_t *devQueryIndices = NULL;
err = cudaMalloc((void **) &devQueryIndices, qiSize);
handleError(err, __LINE__);
uint4 *devData = NULL;
err = cudaMalloc((void **) &devData, dataSize);
handleError(err, __LINE__);
uint64_t *devNearest = NULL;
err = cudaMalloc((void **) &devNearest, nearestSize);
handleError(err, __LINE__);
// for(int i = 0; i < numElements; i++) {
// float3 v = values[i];
// fprintf(stderr, "%d: %f %f %f\n", i, v.x, v.y, v.z);
// }
err = cudaMemcpy(devValues, values, valueSize, cudaMemcpyHostToDevice);
handleError(err, __LINE__);
//float3 *result_end_f = thrust::unique_copy(thrust::device, devValues, devValues + numElements, uniqueValues);
//fprintf(stderr, "Value duplicates %lu\n", numElements - (result_end_f - uniqueValues));
struct timeval tval_before, tval_after, tval_result;
gettimeofday(&tval_before, NULL);
thrust::device_ptr<float3> dev_ptr = thrust::device_pointer_cast(devValues);
float3pair xpair = thrust::minmax_element(dev_ptr, dev_ptr + numElements, compare_x());
float3pair ypair = thrust::minmax_element(dev_ptr, dev_ptr + numElements, compare_y());
float3pair zpair = thrust::minmax_element(dev_ptr, dev_ptr + numElements, compare_z());
float3 xmin = *(xpair.first);
float3 xmax = *(xpair.second);
float minx = xmin.x;
float maxx = xmax.x;
float3 ymin = *(ypair.first);
float3 ymax = *(ypair.second);
float miny = ymin.y;
float maxy = ymax.y;
float3 zmin = *(zpair.first);
float3 zmax = *(zpair.second);
float minz = zmin.z;
float maxz = zmax.z;
float3 mins = {minx, miny, minz};
float maxlen = fmax(maxx - minx, fmax(maxy - miny, maxz - minz));
printf("%f\n", maxlen);
for(int j = 0; j < 5; ++j) {
fprintf(stderr, "Iteration: %d\n",j);
float shift = j*0.05;
uint32_t intShift = (uint32_t) (shift * (1 << 21));
scaleValues(devValues, devIntValues, intShift, numElements, mins, maxlen, false);
getMortonsOld(devIntValues, devMortons, numData, numQueries);
uint64_t *result_end = thrust::unique_copy(thrust::device, devMortons, devMortons + numElements, uniqueMortons);
fprintf(stderr, "Num duplicates %lu\n", numElements - (result_end - uniqueMortons));
//sort values in morton code order
thrust::sort_by_key(thrust::device, devMortons, devMortons + numElements, devIntValues);
createPrefixList(devIntValues, devPrefixQueryIndex, numData, numElements);
pointCompactionOld(devIntValues, devMortons, devPrefixQueryIndex, devData, devQueryIndices, numData, numElements);
if(j == 0) {
findCandidates(devQueryIndices, devIntValues, devValues, devData, devNearest, k, numQueries, numData, lambda);
} else {
mergeStep(devNearest, devIntValues, devValues, devData, devQueryIndices, k, numQueries, numData, lambda);
}
scaleValues(devValues, devIntValues, intShift, numElements, mins, maxlen, true);
}
gettimeofday(&tval_after, NULL);
timersub(&tval_after, &tval_before, &tval_result);
int64_t seconds = (int64_t) tval_result.tv_sec;
int64_t micros = (int64_t) tval_result.tv_usec;
uint64_t ms = seconds * 1000 + (micros / 1000);
uint64_t qsperms = numQueries / ms;
printf("%d,%d,%u,%lu\n", numData, numQueries, k, qsperms);
err = cudaMemcpy(nearest, devNearest, nearestSize, cudaMemcpyDeviceToHost);
handleError(err, __LINE__);
//for(int i = 0; i < numQueries; i++){
// for(int j = 0; j< k; j++){
// fprintf(stderr,"%u(%u) ", (uint32_t) nearest[i*k+j], (uint32_t) (nearest[i*k+j] >> 32));
// }
// fprintf(stderr,"\n");
//}
fprintf(stderr, "Time elapsed: %ld.%06ld\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec);
// Free device memory
err = cudaFree(devValues);
handleError(err, __LINE__);
err = cudaFree(devIntValues);
handleError(err, __LINE__);
err = cudaFree(devMortons);
handleError(err, __LINE__);
err = cudaFree(devPrefixQueryIndex);
handleError(err, __LINE__);
err = cudaFree(devQueryIndices);
handleError(err, __LINE__);
err = cudaFree(devData);
handleError(err, __LINE__);
err = cudaFree(devNearest);
handleError(err, __LINE__);
// Free host memory
free(data);
free(queryIndices);
free(mortons);
err = cudaDeviceReset();
handleError(err, __LINE__);
return EXIT_SUCCESS;
}
/*
void calculateBounds(float3 *values, int numData, int numQuery, int dataElems, int end, float &minx, float &miny, float &minz, float &maxlen) {
minx = FLT_MAX;
miny = FLT_MAX;
minz = FLT_MAX;
float maxx = FLT_MIN;
float maxy = FLT_MIN;
float maxz = FLT_MIN;
for(int i = 0; i < numData; ++i) {
if(i >= dataElems) {
break;
}
if(values[i].x < minx) {
minx = values[i].x;
}
if(values[i].x > maxx) {
maxx = values[i].x;
}
if(values[i].y < miny) {
miny = values[i].y;
}
if(values[i].y > maxy) {
maxy = values[i].y;
}
if(values[i].z < minz) {
minz = values[i].z;
}
if(values[i].z > maxz) {
maxz = values[i].z;
}
}
for(int j = 0; j < numQuery; ++j) {
int i = j + dataElems;
if(i >= end) {
break;
}
if(values[i].x < minx) {
minx = values[i].x;
}
if(values[i].x > maxx) {
maxx = values[i].x;
}
if(values[i].y < miny) {
miny = values[i].y;
}
if(values[i].y > maxy) {
maxy = values[i].y;
}
if(values[i].z < minz) {
minz = values[i].z;
}
if(values[i].z > maxz) {
maxz = values[i].z;
}
}
float xl, yl, zl;
xl = maxx-minx;
yl = maxy-miny;
zl = maxz-minz;
maxlen = fmax(xl,fmax(yl, zl));
}
*/
|
19,884 | #include <unistd.h>
#include <stdio.h>
#include<cmath>
#include <iostream>
#include <fstream>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
#define N 100000
#define MAX 2000
#define two_pi 2.0*3.14159265358979323846
void streamOut (float *uniform_hostNums, float *gaussian_hostNums1 , float *gaussian_hostNums2);
// kernel takes array of states and seed and change in the device array of random numbers
__global__ void uniform_randoms(unsigned int seed, curandState_t* states, float* uniform_random_numbers) {
// initialize the random states
curand_init(seed, //must be different every run so the sequence of numbers change.
blockIdx.x, // the sequence number should be different for each core ???
0, //step between random numbers
&states[blockIdx.x]);
uniform_random_numbers[blockIdx.x] = curand_uniform(&states[blockIdx.x]);
}
__global__ void uniform_random_distribution(float* uniform_random_numbers, float *uniform_deviceNums1 , float *uniform_deviceNums2)
{ if (blockIdx.x < N/2){ //divind the unifrom device array into two arrays
uniform_deviceNums1[blockIdx.x]=uniform_random_numbers[blockIdx.x];
}
else if (blockIdx.x >= N/2 && blockIdx.x < N ){
uniform_deviceNums2[blockIdx.x-(N/2)]=uniform_random_numbers[blockIdx.x];
}}
__global__ void gaussian_random_distribution(float *uniform_deviceNums1,float *uniform_deviceNums2, float * gaussian_random_numbers1, float * gaussian_random_numbers2)
{
if (blockIdx.x < N/2){
gaussian_random_numbers1[blockIdx.x]= sqrt(-2*log(uniform_deviceNums1[blockIdx.x]))*cos(two_pi*uniform_deviceNums2[blockIdx.x]);
}
else if (blockIdx.x >= N/2 && blockIdx.x < N ){
gaussian_random_numbers2[blockIdx.x-(N/2)]= sqrt(-2*log(uniform_deviceNums1[blockIdx.x-N/2]))*sin(two_pi*uniform_deviceNums2[blockIdx.x-N/2]);
}
}
int main() {
curandState_t* states;
cudaMalloc((void**) &states, N * sizeof(curandState_t));
float *uniform_hostNums= (float*)malloc(sizeof(float) * N);
float *gaussian_hostNums1= (float*)malloc(sizeof(float) * (N/2));
float *gaussian_hostNums2= (float*)malloc(sizeof(float) * (N/2));
float* uniform_deviceNums = NULL;
cudaMalloc((void**) &uniform_deviceNums, N * sizeof(float));
float* uniform_deviceNums1;
cudaMalloc((void**) &uniform_deviceNums1, (N/2) * sizeof(float));
float* uniform_deviceNums2;
cudaMalloc((void**) &uniform_deviceNums2, (N/2) * sizeof(float));
float* gaussian_deviceNums1;
cudaMalloc((void**) &gaussian_deviceNums1, (N/2) * sizeof(float));
float* gaussian_deviceNums2;
cudaMalloc((void**) &gaussian_deviceNums2, (N/2) * sizeof(float));
uniform_randoms<<<N,1>>>( time(0), states, uniform_deviceNums);
uniform_random_distribution<<<N,1>>>(uniform_deviceNums,uniform_deviceNums1, uniform_deviceNums2);
gaussian_random_distribution<<<N,1>>>(uniform_deviceNums1,uniform_deviceNums2,gaussian_deviceNums1,gaussian_deviceNums2);
cudaMemcpy(uniform_hostNums, uniform_deviceNums, N * sizeof( float), cudaMemcpyDeviceToHost);
cudaMemcpy(gaussian_hostNums1, gaussian_deviceNums1, (N/2) * sizeof( float), cudaMemcpyDeviceToHost);
cudaMemcpy(gaussian_hostNums2, gaussian_deviceNums2, (N/2) * sizeof( float), cudaMemcpyDeviceToHost);
streamOut(&uniform_hostNums[0],&gaussian_hostNums1[0],&gaussian_hostNums2[0]);
cudaFree(states);
cudaFree(uniform_deviceNums);
cudaFree(gaussian_deviceNums1);
cudaFree(gaussian_deviceNums2);
cudaFree(uniform_deviceNums2);
cudaFree(uniform_deviceNums1);
free(uniform_hostNums);
free(gaussian_hostNums1);
free(gaussian_hostNums2);
return 0;
}
void streamOut(float *uniform_hostNums, float *gaussian_hostNums1 , float *gaussian_hostNums2)
{
std::ofstream resultFile;
resultFile.open("randomNumbers.csv");
if (resultFile.is_open())
{
for (int i = 0; i <N ; i++)
{
resultFile << uniform_hostNums[i] << endl;
}
for (int i = 0; i <N/2 ; i++)
{
resultFile << gaussian_hostNums1[i] << endl;
}
for (int i = 0; i <N/2 ; i++)
{
resultFile << gaussian_hostNums2[i] << endl;
}
resultFile.close();
}
else
{
std::cout << "Unable to open file";
}
}
|
19,885 | #include "includes.h"
__global__ void ifpairmabite( int * v, std::size_t size )
{
// Get the id of the thread ( 0 -> 99 ).
auto tid = threadIdx.x;
// Each thread fills a single element of the array.
if (!(v[tid] % 2))
v[ tid ] *= 2;
} |
19,886 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <cuda.h>
#include <chrono>
#include <vector>
#define BLOCK_SIZE 16
using namespace std::chrono;
using std::cout;
using std::endl;
//Matrix struct from NIVDIA's CUDA programming guide
typedef struct{
int width;
int height;
float* elements;
}Matrix;
//printer helper function I used to verfiy the output is correct
void printMatrix(float* toPrint,int N){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
printf("%5.1f ", toPrint[j + (i * N)]);
}
printf("\n");
}
}
void printVector(float* toPrint, int length){
for (int i = 0; i < length; i ++){
printf("%5.1f ", toPrint[i]);
}
printf("\n");
}
void matrixVector(float* A, float* B, float* out, int length){
for (int i = 0; i < length; i++){
float sum = 0;
for (int j = 0; j < length; j++){
sum += A[i*length + j] * B[j];
}
out[i] = sum;
}
}
int main(){
//define the size of the matricies to multiply
int N = 16 * 512; //need to test 16, 128, 1024, 2048, 8192
//Create a local matrix and vector to load onto the GPU
//Matrix A;
//A.width = N; A.height = N;
//A.elements = (float*)malloc(N*N*sizeof(float));
float *A;float* B;
A = (float*)malloc(N* N * sizeof(float));
B = (float*)malloc(N*sizeof(float));
// Fill the local matrix and vector with items to multiply
for (int i = 0; i < N*N; i++){
if (i < N){
B[i] = 1.0f;
}
A[i] = 1.0f;
}
// create a vector to hold the output of the matrix-vector multiplication;
float* out = (float*)malloc(N * sizeof(float));
auto start = high_resolution_clock::now();
matrixVector(A, B, out, N);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<nanoseconds>(stop-start);
cout << duration.count()<< endl;
//printMatrix(A,N);
//printVector(B,N);
//printVector(out,N);
free(A);
free(B);
free(out);
}
|
19,887 | #include <iostream>
// includes CUDA Runtime
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
// maybe you need also helpers
/*
written by George Strauch on 4/19/2020
c++ program for matrix multiply using 1d arrays on the GPU
the GPU makes use of parallelism to make processes like this much faster
This implementation only uses square matrices as they are much
easier to debug, calculate and work with, however all functions can work with
non-square matrices too.
this implementation uses a block for every column and every element is computed
by a different thread
This program uses shared memory between the host CPU and the GPU.
using dedicated device memory can make the program run faster however its
make it much more difficult to work with certain datatypes such as the
struct to represent a matrix.
Execution syntax:
$ ./exec {int matrix_size} {int print_option}
where the print option can be:
1: Prints the whole of each matrix for debugging
and best used with smaller matrices <= 10.
2: Shows only the first and last element of the result.
other or no option: does not print anything.
Example run:
$ nvcc gpu_mm_block_as_colvec.cu -arch='sm_35' -rdc=true -lineinfo -lcudadevrt -o gpumm
$ time ./gpumm 10 1
$ time ./gpumm 1000 2
$ nvprof ./gpu 500 2
*/
typedef long long int lli;
// struct to make working with matrices much easier
struct Matrix
{
lli *values;
int rows;
int cols;
};
// fills a matrix with values
__host__
void fillMat(Matrix m)
{
for (size_t j = 0; j < m.rows*m.cols; j++) {
m.values[j] = j% m.cols;
}
}
// get a Matrix object with shared memory that can be accessed by the device
__host__
Matrix get_shared(int rows, int cols)
{
Matrix *m;
cudaMallocManaged(&m, sizeof(Matrix));
cudaMallocManaged(&m->values, rows*cols*sizeof(lli));
m->cols = cols;
m->rows = rows;
return *m;
}
// calculate a single element of the matrix result of m1*m2
// res_x = res cols = m2 cols max
// res_y = res rows = m1 rows max
// common = m1_cols and m2_rows
__global__
void matmul(Matrix m1, Matrix m2, Matrix res)
{
int id = threadIdx.x*blockDim.x + blockIdx.x;
res.values[id] = 0;
for (size_t i = 0; i < m1.cols; i++) {
res.values[id] += m1.values[threadIdx.x*blockDim.x+i] * m2.values[(i*m2.cols)+blockIdx.x];
}
}
// host side function to display matrix
__host__
void displayMatrix(Matrix mat)
{
for (size_t i = 0; i < mat.rows; i++) {
for (size_t j = 0; j < mat.cols; j++) {
std::cout << mat.values[i*mat.cols + j] << ' ';
}
std::cout << '\n';
}
std::cout << '\n';
}
// frees memory
__host__
void free_matrix(Matrix mat)
{
cudaFree(mat.values);
}
// returns a copy of a matrix
__host__
Matrix copyMatrix(Matrix m)
{
Matrix nm = get_shared(m.rows, m.cols);
for (size_t i = 0; i < m.cols*m.rows; i++) {
nm.values[i] = m.values[i];
}
return nm;
}
// host side function to transpose
__host__
void transpose(Matrix &mat)
{
Matrix new_mat = get_shared(mat.cols, mat.rows);
for (size_t a = 0; a < mat.rows; a++) {
for (size_t b = 0; b < mat.cols; b++) {
new_mat.values[b*mat.cols + a] = mat.values[a*mat.cols + b];
}
}
free_matrix(mat);
mat = new_mat;
}
int main(int argc, char const *argv[])
{
// gets the matrix size from user, see header
int N = atoi(argv[1]);
std::cout << "N: " << N << '\n';
// cudaProfilerStart();
Matrix t1 = get_shared(N, N);
fillMat(t1);
Matrix t2 = copyMatrix(t1);
transpose(t2);
Matrix res = get_shared(t1.rows, t2.cols);
// options for building the block grid. Subject to Change.
//------------------------------------------
int threads_in_block = res.rows;
int blocks = res.cols;
//------------------------------------------
// performs matrix multiply on the GPU, each thread will handle one element
// then copys the result to host memory
std::cout << "\nstart" << '\n';
matmul<<<blocks, threads_in_block>>>(t1, t2, res);
// Wait for GPU to finish
cudaDeviceSynchronize();
std::cout << "done\n" << '\n';
// display array
// display options listed in header
if (argc > 2) {
if (atoi(argv[2]) == 1) {
std::cout << "matrix 1: " << '\n';
displayMatrix(t1);
std::cout << "matrix 2: " << '\n';
displayMatrix(t2);
std::cout << "result: " << '\n';
displayMatrix(res);
}
else if (atoi(argv[2]) == 2) {
std::cout << "first: " << res.values[0] << '\n';
std::cout << "last: " << res.values[N*N-1] << '\n';
std::cout << '\n';
}
}
free_matrix(t1);
free_matrix(t2);
free_matrix(res);
// cudaProfilerStop();
return 0;
}
//
|
19,888 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
int main(int argc,char* argv[]){
cudaError_t res;
int i,j,k;
size_t width,height,depth,prevpitch = 512;
size_t prevwidth=0,prevheight=0,prevdepth=0;
struct cudaPitchedPtr pitchedDevPtr;
struct cudaExtent extent;
for(i = 0 ; i < 10000 ; i += 100){
for(j = 0 ; j < 2000 ; j += 10){
for(k = 0 ; k < 2000 ; k += 10){
width = 1*(i+1);
height = 1*(j+1);
depth = 1*(k+1);
extent.width = width;
extent.height = height;
extent.depth = depth;
res = cudaMalloc3D(&pitchedDevPtr,extent);
if(res != cudaSuccess){
printf("extent.width : %lu\n",extent.width);
printf("extent.height : %lu\n",extent.height);
printf("extent.depth : %lu\n",extent.depth);
printf("What ...?(%d)\n",res);
exit(-1);
}else{
if(prevpitch != pitchedDevPtr.pitch){
printf("comfirm differing pitch\n");
printf("width:%lu\theight:%lu\tdepth:%lu\tpitch%lu\n",prevwidth,prevheight,prevdepth,pitchedDevPtr.pitch);
printf("width:%lu\theight:%lu\tdepth:%lu\tpitch%lu\n",width,height,depth,pitchedDevPtr.pitch);
prevpitch = pitchedDevPtr.pitch;
}
}
prevwidth = width;
prevheight = height;
prevdepth = depth;
cudaFree(pitchedDevPtr.ptr);
size_t expected_pitch;
expected_pitch = (width/512 + 1)*512;
if(expected_pitch != pitchedDevPtr.pitch){
printf("Oh ...\n");
exit(-1);
}
/*
size_t expected_pitch;
// expected_pitch = extent.width*extent.height*extent.depth != 0 ? ((extent.width*extent.height*extent.depth-1)/(5120000)+1)*512 : 0;
expected_pitch = extent.width*extent.height != 0 ? ((extent.width*extent.height-1)/(51200*extent.depth)+1)*512 : 0;
if(expected_pitch != pitchedDevPtr.pitch){
// printf("extent.width : %lu\n",extent.width);
// printf("extent.height : %lu\n",extent.height);
// printf("extent.depth : %lu\n",extent.depth);
printf("width*height*depth : %lu\n",extent.width*extent.height*extent.depth);
// printf("\txsize : %lu\n",pitchedDevPtr.xsize);
// printf("\tysize : %lu\n",pitchedDevPtr.ysize);
printf("\tpitch : %lu\n",pitchedDevPtr.pitch);
// printf("\tptr : %p \n",pitchedDevPtr.ptr);
// printf("expected_pitch : %lu\n",expected_pitch);
// exit(1);
}else{
printf("OK *** i,j,k : %d,%d,%d\n",i,j,k);
}
*/
}
}
}
return 0;
}
|
19,889 | #include "includes.h"
__global__ void RBMInputForwardKernel( float *inputPtr, float *outputPtr, float *biasPtr, bool applyBias, int thisLayerSize )
{
// i: current neuron id
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < thisLayerSize)
{
float result = inputPtr[i];
if (applyBias)
result += biasPtr[i];
outputPtr[i] = inputPtr[i];
}
} |
19,890 |
#define BLOCK_SIZE 1024
//numRows = numCols since L is square
// y,x
#define L_Matrix(row,col) matL[((row)*numRows + (col))]
__global__ void gpu_simple_solver_kernel(double* matL, double* vecX, double* vecB, int numRows, int i)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= numRows)
return;
//update the B value for every thread by subtracting off the known x (which was calculating last iteration)
//multiplied by the corresponding L element
if (i != 0)
vecB[idx] = vecB[idx] - L_Matrix(idx,i-1)*vecX[i-1];
if (idx == i)
{
vecX[i] = vecB[i] / L_Matrix(i,i);
}
}
__global__ void gpu_square_update_kernel(double* matL, double* vecX, double* vecB, int numRows)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int y = idy*2;
int x = idx*2;
int top_tri_idx = y;
if (x == 0)
{
vecB[y+1] = (vecB[y+1] - L_Matrix(top_tri_idx+1,top_tri_idx)/L_Matrix(top_tri_idx,top_tri_idx)*vecB[y])/L_Matrix(top_tri_idx+1,top_tri_idx+1);
vecB[y] = vecB[y]/L_Matrix(top_tri_idx,top_tri_idx);
}
if (idx >= numRows/2 || idy >= numRows/2)
return;
if(idy <= idx)
return;
//element 1,0 (y,x) (row,col)
L_Matrix(y+1,x) = (L_Matrix(y+1,x) - L_Matrix(top_tri_idx+1,top_tri_idx)/L_Matrix(top_tri_idx,top_tri_idx)*L_Matrix(y,x))/L_Matrix(top_tri_idx+1,top_tri_idx+1);
//element 1,1 (y,x) (row,col)
L_Matrix(y+1,x+1) = (L_Matrix(y+1,x+1) - L_Matrix(top_tri_idx+1,top_tri_idx)/L_Matrix(top_tri_idx,top_tri_idx)*L_Matrix(y,x+1))/L_Matrix(top_tri_idx+1,top_tri_idx+1);
//element 0,0 (y,x) (row,col)
L_Matrix(y,x) = L_Matrix(y,x)/L_Matrix(top_tri_idx,top_tri_idx);
//element 0,1 (y,x) (row,col)
L_Matrix(y,x+1) = L_Matrix(y,x+1)/L_Matrix(top_tri_idx,top_tri_idx);
}
__global__ void gpu_square_solve_kernel_simple(double* matL, double* vecX, double* vecB, int numRows, int i)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int col_index = i*2;
if (col_index >= numRows)
return;
int row_index = idx;
if (row_index < (i+1)*2 || row_index >= numRows)
return;
double value = L_Matrix(row_index,col_index)*vecB[col_index] + L_Matrix(row_index,col_index+1)*vecB[col_index + 1];
vecB[row_index] = vecB[row_index] - value;
}
__global__ void gpu_square_solve_kernel_optimized(double* matL, double* vecX, double* vecB, int numRows, int i)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
//eliminates all of the upper elements in matrix (amount increases as i increases)
if (idx >= (idy-(idy%i)-i) && idx < numRows-1)
return;
//eliminates the rows that we should not be modifying
if (idy % (i*2) < i)
return;
//bounds check for if a block goes outside of bounds
if (idy >= numRows && idx >= numRows)
return;
//update vecB using the last column of threads
if (idx == numRows-1)
{
double value = vecB[idy];
int offset = idy % i;
for (int j = 0; j < i; j++)
{
int column = idy-offset-j-1;
value = value - vecB[column]*L_Matrix(idy,column);
//L_Matrix(idy,column) = 0;
}
vecB[idy] = value;
}
else //update the L matrix values
{
double value = L_Matrix(idy,idx);
int offset = idy % i;
for (int j = 0; j < i; j++)
{
int column = idy-offset-j-1;
value = value - L_Matrix(column,idx)*L_Matrix(idy,column);
}
L_Matrix(idy,idx) = value;
}
}
__global__ void gpu_square_solve_kernel_optimized_sh(double* matL, double* vecX, double* vecB, int numRows, int i)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if (idx >= (idy-(idy%i)-i) && idx < numRows-32)
return;
if (idy % (i*2) < i)
return;
if (idy >= numRows && idx >= numRows)
return;
__shared__ double dsb_row_elements[32][32];
__shared__ double dsb_row_multipliers[32][32];
int offset = idy % i;
//int sh_offset = idx % i;
if (idx >= numRows-32) //threads that do not modify the matrix
{
double value;
if (idx == numRows - 1)
value = vecB[idy];
for (int k = 0; k < i; k +=32)
{
__syncthreads();
//load values from shared memory
dsb_row_multipliers[threadIdx.y][threadIdx.x] = L_Matrix(idy, idy - offset - (32+k) + threadIdx.x);
if (idx == numRows - 1)
dsb_row_elements[threadIdx.y][0] = vecB[idy - offset - (32+k) + threadIdx.y];
__syncthreads();
if (idx == numRows - 1) // only use the last thread to update vec values
{
for (int j = 0; j < 32; j++)
{
value = value - dsb_row_multipliers[threadIdx.y][j]*dsb_row_elements[j][0];
}
}
}
if (idx == numRows - 1)
vecB[idy] = value;
}
else
{
//loop through tiles
double value = L_Matrix(idy,idx);
for (int k = 0; k < i; k +=32)
{
__syncthreads();
//load values from shared memory
dsb_row_elements[threadIdx.y][threadIdx.x] = L_Matrix(idy - offset - (32+k) + threadIdx.y, idx);
dsb_row_multipliers[threadIdx.y][threadIdx.x] = L_Matrix(idy, idy - offset - (32+k) + threadIdx.x);
__syncthreads();
for (int j = 0; j < 32; j++)
value -= dsb_row_multipliers[threadIdx.y][j]*dsb_row_elements[j][threadIdx.x];
}
L_Matrix(idy,idx) = value;
}
}
void cpu_solver(double* matL, double* vecX, double* vecB, int numRows)
{
for (int i = 0; i < numRows; i++)
{
double val = vecB[i];
for (int j = 0; j < i; j++)
{
val = val - L_Matrix(i,j)*vecX[j];
}
vecX[i] = val / L_Matrix(i,i);
}
}
void gpu_simple_solver(double* matL, double* vecX, double* vecB, int numRows)
{
const unsigned int numThreadsPerBlock = BLOCK_SIZE;
const unsigned int numBlocks = (numRows - 1)/numThreadsPerBlock + 1;
for (int i = 0; i < numRows; i++)
gpu_simple_solver_kernel<<<numBlocks , numThreadsPerBlock>>>(matL, vecX, vecB, numRows, i);
}
void gpu_complex_solver(double* matL, double* vecX, double* vecB, int numRows)
{
dim3 dimGrid((numRows/2-1)/32+1,(numRows/2-1)/32+1,1);
dim3 dimBlock(32,32,1);
gpu_square_update_kernel<<<dimGrid,dimBlock>>>(matL, vecX, vecB, numRows);
const unsigned int numThreadsPerBlock = BLOCK_SIZE;
const unsigned int numBlocks = (numRows - 1)/numThreadsPerBlock + 1;
for (int i = 0; i < (numRows / 2); i++)
{
gpu_square_solve_kernel_simple<<<numBlocks , numThreadsPerBlock>>>(matL, vecX, vecB, numRows, i);
}
//copy B to X for the verification code in main.cu
cudaMemcpy(vecX, vecB, numRows * sizeof(double),cudaMemcpyDeviceToDevice);
}
void gpu_complex_solver_optimized(double* matL, double* vecX, double* vecB, int numRows)
{
dim3 dimGrid((numRows/2-1)/32+1,(numRows/2-1)/32+1,1);
dim3 dimBlock(32,32,1);
gpu_square_update_kernel<<<dimGrid,dimBlock>>>(matL, vecX, vecB, numRows);
dim3 dimGrid2((numRows-1)/32+1,(numRows-1)/32+1,1);
dim3 dimBlock2(32,32,1);
for (int i = 2; i < numRows; i *= 2)
{
gpu_square_solve_kernel_optimized<<<dimGrid2,dimBlock2>>>(matL, vecX, vecB, numRows, i);
}
//copy B to X for the verification code in main.cu
cudaMemcpy(vecX, vecB, numRows * sizeof(double),cudaMemcpyDeviceToDevice);
}
void gpu_complex_solver_optimized_sh(double* matL, double* vecX, double* vecB, int numRows)
{
dim3 dimGrid((numRows/2-1)/32+1,(numRows/2-1)/32+1,1);
dim3 dimBlock(32,32,1);
gpu_square_update_kernel<<<dimGrid,dimBlock>>>(matL, vecX, vecB, numRows);
dim3 dimGrid2((numRows-1)/32+1,(numRows-1)/32+1,1);
dim3 dimBlock2(32,32,1);
for (int i = 2; i < numRows; i *= 2)
{
if (i < 32)
gpu_square_solve_kernel_optimized<<<dimGrid2,dimBlock2>>>(matL, vecX, vecB, numRows, i);
else
gpu_square_solve_kernel_optimized_sh<<<dimGrid2,dimBlock2>>>(matL, vecX, vecB, numRows, i);
}
//copy B to X for the verification code in main.cu
cudaMemcpy(vecX, vecB, numRows * sizeof(double),cudaMemcpyDeviceToDevice);
}
#define L_Matrix_t(col,row) matL[((row)*numRows + (col))]
__global__ void gpu_square_update_kernel_transposed(double* matL, double* vecX, double* vecB, int numRows)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int y = idy*2;
int x = idx*2;
int top_tri_idx = y;
if (x == 0)
{
vecB[y+1] = (vecB[y+1] - L_Matrix_t(top_tri_idx+1,top_tri_idx)/L_Matrix_t(top_tri_idx,top_tri_idx)*vecB[y])/L_Matrix_t(top_tri_idx+1,top_tri_idx+1);
vecB[y] = vecB[y]/L_Matrix_t(top_tri_idx,top_tri_idx);
}
if (idx >= numRows/2 || idy >= numRows/2)
return;
if(idy <= idx)
return;
//element 1,0 (y,x) (row,col)
L_Matrix_t(y+1,x) = (L_Matrix_t(y+1,x) - L_Matrix_t(top_tri_idx+1,top_tri_idx)/L_Matrix_t(top_tri_idx,top_tri_idx)*L_Matrix_t(y,x))/L_Matrix_t(top_tri_idx+1,top_tri_idx+1);
//element 1,1 (y,x) (row,col)
L_Matrix_t(y+1,x+1) = (L_Matrix_t(y+1,x+1) - L_Matrix_t(top_tri_idx+1,top_tri_idx)/L_Matrix_t(top_tri_idx,top_tri_idx)*L_Matrix_t(y,x+1))/L_Matrix_t(top_tri_idx+1,top_tri_idx+1);
//element 0,0 (y,x) (row,col)
L_Matrix_t(y,x) = L_Matrix_t(y,x)/L_Matrix_t(top_tri_idx,top_tri_idx);
//element 0,1 (y,x) (row,col)
L_Matrix_t(y,x+1) = L_Matrix_t(y,x+1)/L_Matrix_t(top_tri_idx,top_tri_idx);
}
__global__ void gpu_square_solve_kernel_simple_transposed(double* matL, double* vecX, double* vecB, int numRows, int i)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int col_index = i*2;
if (col_index >= numRows)
return;
int row_index = idx;
if (row_index < (i+1)*2 || row_index >= numRows)
return;
double value = L_Matrix_t(row_index,col_index)*vecB[col_index] + L_Matrix_t(row_index,col_index+1)*vecB[col_index + 1];
vecB[row_index] = vecB[row_index] - value;
}
void gpu_complex_solver_transposed(double* matL, double* vecX, double* vecB, int numRows)
{
dim3 dimGrid((numRows/2-1)/32+1,(numRows/2-1)/32+1,1);
dim3 dimBlock(32,32,1);
gpu_square_update_kernel_transposed<<<dimGrid,dimBlock>>>(matL, vecX, vecB, numRows);
const unsigned int numThreadsPerBlock = BLOCK_SIZE;
const unsigned int numBlocks = (numRows - 1)/numThreadsPerBlock + 1;
for (int i = 0; i < (numRows / 2); i++)
{
gpu_square_solve_kernel_simple_transposed<<<numBlocks , numThreadsPerBlock>>>(matL, vecX, vecB, numRows, i);
}
//copy B to X for the verification code in main.cu
cudaMemcpy(vecX, vecB, numRows * sizeof(double),cudaMemcpyDeviceToDevice);
}
|
19,891 | #include <stdio.h>
#define N 1024000
__global__ void add(int *data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
data[i]++;
}
}
int main() {
int data[N];
int *dev_data;
int i;
// Allocate memory on the GPU.
cudaMalloc((void**)&dev_data, N * sizeof(int));
// Initialize data.
for (i=0; i<N; i++) {
data[i] = 0;
}
// Copy data to the GPU.
cudaMemcpy(dev_data, data, N * sizeof(int), cudaMemcpyHostToDevice);
for (i=0; i<100; i++) {
add<<<32, 1024>>>(dev_data);
}
cudaDeviceSynchronize();
// Copy data from the GPU.
cudaMemcpy(data, dev_data, N * sizeof(int), cudaMemcpyDeviceToHost);
// Free memory allocated on the GPU.
cudaFree(dev_data);
for (i=0; i<N; i++) {
printf("%d\n", data[i]);
}
return 0;
}
|
19,892 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#ifdef LARGE
#define RENDER_WIDTH 1024
#define RENDER_HEIGHT 1024
#else
#define RENDER_WIDTH 128
#define RENDER_HEIGHT 128
#endif
#define TILE_SIZE 16
#define STACK_CAPACITY 128
#define SHARED_MEM_CAP STACK_CAPACITY * RENDER_WIDTH * RENDER_HEIGHT
#define SPP 1024
#define RR_RATE 0.9
#define PI 3.1415926
// BMP Operation
// 文件信息头结构体
typedef struct
{
unsigned int bfSize; // 文件大小 以字节为单位(2-5字节)
unsigned short bfReserved1; // 保留,必须设置为0 (6-7字节)
unsigned short bfReserved2; // 保留,必须设置为0 (8-9字节)
unsigned int bfOffBits; // 从文件头到像素数据的偏移 (10-13字节)
} _BITMAPFILEHEADER;
//图像信息头结构体
typedef struct
{
unsigned int biSize; // 此结构体的大小 (14-17字节)
int biWidth; // 图像的宽 (18-21字节)
int biHeight; // 图像的高 (22-25字节)
unsigned short biPlanes; // 表示bmp图片的平面属,显然显示器只有一个平面,所以恒等于1 (26-27字节)
unsigned short biBitCount; // 一像素所占的位数,一般为24 (28-29字节)
unsigned int biCompression; // 说明图象数据压缩的类型,0为不压缩。 (30-33字节)
unsigned int biSizeImage; // 像素数据所占大小, 这个值应该等于上面文件头结构中bfSize-bfOffBits (34-37字节)
int biXPelsPerMeter; // 说明水平分辨率,用象素/米表示。一般为0 (38-41字节)
int biYPelsPerMeter; // 说明垂直分辨率,用象素/米表示。一般为0 (42-45字节)
unsigned int biClrUsed; // 说明位图实际使用的彩色表中的颜色索引数(设为0的话,则说明使用所有调色板项)。 (46-49字节)
unsigned int biClrImportant; // 说明对图象显示有重要影响的颜色索引的数目,如果是0,表示都重要。(50-53字节)
} _BITMAPINFOHEADER;
__host__ void save_image(unsigned char* target_img, int width, int height)
{
FILE* file_ptr = fopen("RenderResult.bmp", "wb+");
unsigned short fileType = 0x4d42;
_BITMAPFILEHEADER fileHeader;
_BITMAPINFOHEADER infoHeader;
fileHeader.bfSize = (width) * (height) * 3 + 54;
fileHeader.bfReserved1 = 0;
fileHeader.bfReserved2 = 0;
fileHeader.bfOffBits = 54;
infoHeader.biSize = 40;
infoHeader.biWidth = width;
infoHeader.biHeight = height;
infoHeader.biPlanes = 1;
infoHeader.biBitCount = 24;
infoHeader.biCompression = 0;
infoHeader.biSizeImage = (width) * (height) * 3;
infoHeader.biXPelsPerMeter = 0;
infoHeader.biYPelsPerMeter = 0;
infoHeader.biClrUsed = 0;
infoHeader.biClrImportant = 0;
fwrite(&fileType, sizeof(unsigned short), 1, file_ptr);
fwrite(&fileHeader, sizeof(_BITMAPFILEHEADER), 1, file_ptr);
fwrite(&infoHeader, sizeof(_BITMAPINFOHEADER), 1, file_ptr);
fwrite(target_img, sizeof(unsigned char), (height) * (width) * 3, file_ptr);
fclose(file_ptr);
}
// 3D resources
struct Trianle {
float3 tri_a;
float3 tri_b;
float3 tri_c;
float3 normal_line;
bool is_light;
float brdf_rate;
};
/*
// test scene
// Light triagles
#define LIGHT_TRI_COUNT 2
__constant__ float d_light_irradiance = 40;
// object triagles
// No BVH
#define BRDF_rate 0.6
#define OBJ_TRI_COUNT 24
Trianle h_scene_objects[] = {
// light tri
Trianle{float3{110, 110, 300}, float3{110, 190, 300}, float3{190, 110, 300}, float3{0, 0, -1}, true, BRDF_rate},
Trianle{float3{190, 110, 300}, float3{110, 190, 300}, float3{190, 190, 300}, float3{0, 0, -1}, true, BRDF_rate},
// Trianle{float3{110, 110, 301}, float3{110, 190, 301}, float3{190, 110, 301}, float3{0, 0, 1}, true, BRDF_rate},
// Trianle{float3{190, 110, 301}, float3{110, 190, 301}, float3{190, 190, 301}, float3{0, 0, 1}, true, BRDF_rate},
// internal box 100 * 100 * 30
// top
Trianle{float3{100, 100, 100}, float3{200, 100, 100}, float3{100, 200, 100}, float3{0, 0, 1}, false, BRDF_rate},
Trianle{float3{200, 100, 100}, float3{200, 200, 100}, float3{100, 200, 100}, float3{0, 0, 1}, false, BRDF_rate},
// bottom
Trianle{float3{100, 100, 70}, float3{200, 100, 70}, float3{100, 200, 70}, float3{0, 0, -1}, false, BRDF_rate},
Trianle{float3{200, 100, 70}, float3{200, 200, 70}, float3{100, 200, 70}, float3{0, 0, -1}, false, BRDF_rate},
// front
Trianle{float3{100, 100, 100}, float3{200, 100, 100}, float3{100, 100, 70}, float3{0, -1, 0}, false, BRDF_rate},
Trianle{float3{100, 100, 70}, float3{200, 100, 70}, float3{200, 100, 100}, float3{0, -1, 0}, false, BRDF_rate},
// behind
Trianle{float3{100, 200, 100}, float3{200, 200, 100}, float3{100, 200, 70}, float3{0, 1, 0}, false, BRDF_rate},
Trianle{float3{100, 200, 70}, float3{200, 200, 70}, float3{200, 200, 100}, float3{0, 1, 0}, false, BRDF_rate},
// left
Trianle{float3{100, 100, 100}, float3{100, 200, 100}, float3{100, 100, 70}, float3{-1, 0, 0}, false, BRDF_rate},
Trianle{float3{100, 100, 70}, float3{100, 200, 70}, float3{100, 200, 100}, float3{-1, 0, 0}, false, BRDF_rate},
// right
Trianle{float3{200, 100, 100}, float3{200, 200, 100}, float3{200, 100, 70}, float3{1, 0, 0}, false, BRDF_rate},
Trianle{float3{200, 100, 70}, float3{200, 200, 70}, float3{200, 200, 100}, float3{1, 0, 0}, false, BRDF_rate},
// general box 300 * 300 * 300.001
// top
Trianle{float3{0, 0, 300.001}, float3{0, 300, 300.001}, float3{300, 0, 300.001}, float3{0, 0, -1}, false, BRDF_rate},
Trianle{float3{0, 300, 300.001}, float3{300, 0, 300.001}, float3{300, 300, 300.001}, float3{0, 0, -1}, false, BRDF_rate},
// bottom
Trianle{float3{0, 0, 0}, float3{0, 300, 0}, float3{300, 0, 0}, float3{0, 0, 1}, false, BRDF_rate},
Trianle{float3{0, 300, 0}, float3{300, 0, 0}, float3{300, 300, 0}, float3{0, 0, 1}, false, BRDF_rate},
// left
Trianle{float3{0, 0, 0}, float3{0, 0, 300.001}, float3{0, 300, 300.001}, float3{1, 0, 0}, false, BRDF_rate},
Trianle{float3{0, 300, 300.001}, float3{0, 300, 0}, float3{0, 0, 0}, float3{1, 0, 0}, false, BRDF_rate},
// right
Trianle{float3{300, 0, 0}, float3{300, 0, 300.001}, float3{300, 300, 300.001}, float3{-1, 0, 0}, false, BRDF_rate},
Trianle{float3{300, 300, 300.001}, float3{300, 300, 0}, float3{300, 0, 0}, float3{-1, 0, 0}, false, BRDF_rate},
// behind
Trianle{float3{0, 300, 0}, float3{0, 300, 300.001}, float3{300, 300, 0}, float3{0, -1, 0}, false, BRDF_rate},
Trianle{float3{300, 300, 0}, float3{300, 300, 300.001}, float3{0, 300, 300.001}, float3{0, -1, 0}, false, BRDF_rate}
};
__constant__ Trianle d_scene_objects[OBJ_TRI_COUNT];
// camera position
__constant__ float3 d_camera_position = float3{150, -400, 150};
__constant__ float3 d_camera_direction = float3{0, 1, 0};
__constant__ float3 d_camera_up_direction = float3{0, 0, 1};
__constant__ float3 d_camera_left_direction = float3{1, 0, 0};
// 浮点精度考虑,设置较大焦距和成像平面
__constant__ float d_camera_focal_length = 200;
__constant__ float d_camera_width = 150;
__constant__ float d_camera_height = 150;
__constant__ float d_camera_pixel_width = 150.0 / RENDER_WIDTH;
__constant__ float d_camera_pixel_height= 150.0 / RENDER_HEIGHT;
*/
// Cornell box
#define LIGHT_TRI_COUNT 2
__constant__ float d_light_irradiance = 42;
#define BRDF_rate 0.74
#define OBJ_TRI_COUNT 32
// Trianle{float3{}, float3{}, float3{}, float3{}, false, BRDF_rate},
Trianle h_scene_objects[] = {
// Light triagles
Trianle{float3{343.0, 548.799, 227.0}, float3{343.0, 548.799, 332.0}, float3{213.0, 548.799, 332.0}, float3{0, -1, 0}, true, BRDF_rate},
Trianle{float3{343.0, 548.799, 227.0}, float3{213.0, 548.799, 227.0}, float3{213.0, 548.799, 332.0}, float3{0, -1, 0}, true, BRDF_rate},
// Floor
Trianle{float3{552.8, 0.0, 0.0}, float3{0.0, 0.0, 0.0}, float3{0.0, 0.0, 559.2}, float3{0, 1, 0}, false, BRDF_rate},
Trianle{float3{552.8, 0.0, 0.0}, float3{549.6, 0.0, 559.2}, float3{0.0, 0.0, 559.2}, float3{0, 1, 0}, false, BRDF_rate},
// Ceiling
Trianle{float3{556.0, 548.8, 0.0}, float3{556.0, 548.8, 559.2}, float3{0.0, 548.8, 559.2}, float3{0, -1, 0}, false, BRDF_rate},
Trianle{float3{556.0, 548.8, 0.0}, float3{0.0, 548.8, 0.0}, float3{0.0, 548.8, 559.2}, float3{0, -1, 0}, false, BRDF_rate},
// Back wall
Trianle{float3{549.6, 0.0, 559.2}, float3{0.0, 0.0, 559.2}, float3{0.0, 548.8, 559.2}, float3{0, 0, -1}, false, BRDF_rate},
Trianle{float3{549.6, 0.0, 559.2}, float3{556.0, 548.8, 559.2}, float3{0.0, 548.8, 559.2}, float3{0, 0, -1}, false, BRDF_rate},
// Right wall
Trianle{float3{0.0, 0.0, 559.2}, float3{0.0, 0.0, 0.0}, float3{0.0, 548.8, 0.0}, float3{1, 0, 0}, false, BRDF_rate},
Trianle{float3{0.0, 0.0, 559.2}, float3{0.0, 548.8, 559.2}, float3{0.0, 548.8, 0.0}, float3{1, 0, 0}, false, BRDF_rate},
// Left wall
Trianle{float3{552.8, 0.0, 0.0}, float3{549.6, 0.0, 559.2}, float3{556.0, 548.8, 559.2}, float3{-1, 0, 0}, false, BRDF_rate},
Trianle{float3{552.8, 0.0, 0.0}, float3{556.0, 548.8, 0.0}, float3{556.0, 548.8, 559.2}, float3{-1, 0, 0}, false, BRDF_rate},
// Short block
// Top
Trianle{float3{130.0, 165.0, 65.0}, float3{82.0, 165.0, 225.0}, float3{240.0, 165.0, 272.0}, float3{0, 1, 0}, false, BRDF_rate},
Trianle{float3{130.0, 165.0, 65.0}, float3{290.0, 165.0, 114.0}, float3{240.0, 165.0, 272.0}, float3{0, 1, 0}, false, BRDF_rate},
// Left
Trianle{float3{290.0, 0.0, 114.0}, float3{290.0, 165.0, 114.0}, float3{240.0, 165.0, 272.0}, float3{-0.9534, 0, -0.301709}, false, BRDF_rate},
Trianle{float3{290.0, 0.0, 114.0}, float3{240.0, 0.0, 272.0}, float3{240.0, 165.0, 272.0}, float3{-0.9534, 0, -0.301709}, false, BRDF_rate},
// Front
Trianle{float3{130.0, 0.0, 65.0}, float3{130.0, 165.0, 65.0}, float3{290.0, 165.0, 114.0}, float3{-0.292826, 0, -0.956166}, false, BRDF_rate},
Trianle{float3{130.0, 0.0, 65.0}, float3{290.0, 0.0, 114.0}, float3{290.0, 165.0, 114.0}, float3{-0.292826, 0, -0.956166}, false, BRDF_rate},
// Right
Trianle{float3{82.0, 0.0, 225.0}, float3{82.0, 165.0, 225.0}, float3{130.0, 165.0, 65.0}, float3{-0.957826, 0, -0.287348}, false, BRDF_rate},
Trianle{float3{82.0, 0.0, 225.0}, float3{130.0, 0.0, 65.0}, float3{130.0, 165.0, 65.0}, float3{-0.957826, 0, -0.287348}, false, BRDF_rate},
// Behind
Trianle{float3{240.0, 0.0, 272.0}, float3{240.0, 165.0, 272.0}, float3{82.0, 165.0, 225.0}, float3{-0.285121, 0, -0.958492}, false, BRDF_rate},
Trianle{float3{240.0, 0.0, 272.0}, float3{82.0, 0.0, 225.0}, float3{82.0, 165.0, 225.0}, float3{-0.285121, 0, -0.958492}, false, BRDF_rate},
// Tall block
// Top
Trianle{float3{423.0, 330.0, 247.0}, float3{265.0, 330.0, 296.0}, float3{314.0, 330.0, 456.0}, float3{0, 1, 0}, false, BRDF_rate},
Trianle{float3{423.0, 330.0, 247.0}, float3{472.0, 330.0, 406.0}, float3{314.0, 330.0, 456.0}, float3{0, 1, 0}, false, BRDF_rate},
// Left
Trianle{float3{423.0, 0.0, 247.0}, float3{423.0, 330.0, 247.0}, float3{472.0, 330.0, 406.0}, float3{0.955649, 0, -0.294508}, false, BRDF_rate},
Trianle{float3{423.0, 0.0, 247.0}, float3{472.0, 0.0, 406.0}, float3{472.0, 330.0, 406.0}, float3{0.955649, 0, -0.294508}, false, BRDF_rate},
// Behind
Trianle{float3{472.0, 0.0, 406.0}, float3{472.0, 330.0, 406.0}, float3{314.0, 330.0, 456.0}, float3{-0.301709, 0, -0.953400}, false, BRDF_rate},
Trianle{float3{472.0, 0.0, 406.0}, float3{314.0, 0.0, 456.0}, float3{314.0, 330.0, 456.0}, float3{-0.301709, 0, -0.953400}, false, BRDF_rate},
// Right
Trianle{float3{314.0, 0.0, 456.0}, float3{314.0, 330.0, 456.0}, float3{265.0, 330.0, 296.0}, float3{0.956166, 0, -0.292826}, false, BRDF_rate},
Trianle{float3{314.0, 0.0, 456.0}, float3{265.0, 0.0, 296.0}, float3{265.0, 330.0, 296.0}, float3{0.956166, 0, -0.292826}, false, BRDF_rate},
// Front
Trianle{float3{265.0, 0.0, 296.0}, float3{265.0, 330.0, 296.0}, float3{423.0, 330.0, 247.0}, float3{0.296209, 0, 0.955123}, false, BRDF_rate},
Trianle{float3{265.0, 0.0, 296.0}, float3{423.0, 0.0, 247.0}, float3{423.0, 330.0, 247.0}, float3{-0.296209, 0, -0.955123}, false, BRDF_rate}
};
__constant__ Trianle d_scene_objects[OBJ_TRI_COUNT];
// camera position
__constant__ float3 d_camera_position = float3{278, 273, -800};
__constant__ float3 d_camera_direction = float3{0, 0, 1};
__constant__ float3 d_camera_up_direction = float3{0, 1, 0};
__constant__ float3 d_camera_left_direction = float3{-1, 0, 0};
__constant__ float d_camera_focal_length = 3.5;
__constant__ float d_camera_width = 2.5;
__constant__ float d_camera_height = 2.5;
__constant__ float d_camera_pixel_width = 2.5 / RENDER_WIDTH;
__constant__ float d_camera_pixel_height= 2.5 / RENDER_HEIGHT;
__device__ inline float mixed_product(float3 vec_a, float3 vec_b, float3 vec_c)
{
return vec_a.x * (vec_b.y * vec_c.z - vec_b.z * vec_c.y) +
vec_a.y * (vec_b.z * vec_c.x - vec_b.x * vec_c.z) +
vec_a.z * (vec_b.x * vec_c.y - vec_b.y * vec_c.x);
}
__device__ inline float3 sub_float3(float3 opr1, float3 opr2)
{
return make_float3(opr1.x - opr2.x, opr1.y - opr2.y, opr1.z - opr2.z);
}
__device__ inline float3 scalar_mult_float3(float3 vec, float scalar)
{
return make_float3(vec.x * scalar, vec.y * scalar, vec.z * scalar);
}
__device__ float dot(float3 opr1, float3 opr2)
{
return opr1.x * opr2.x + opr1.y * opr2.y + opr1.z * opr2.z;
}
__device__ inline float3 add_float3(float3 opr1, float3 opr2)
{
return make_float3(opr1.x + opr2.x, opr1.y + opr2.y, opr1.z + opr2.z);
}
__device__ float size(Trianle triangle)
{
float3 vec1 = sub_float3(triangle.tri_b, triangle.tri_a);
float3 vec2 = sub_float3(triangle.tri_c, triangle.tri_a);
float3 cross_product = make_float3(vec1.y * vec2.z - vec1.z * vec2.y, vec1.z * vec2.x - vec1.x * vec2.z, vec1.x * vec2.y - vec1.y * vec2.x);
return 0.5 * norm3df(cross_product.x, cross_product.y, cross_product.z);
}
__device__ float3 check_obj_hit(int src_tri_idx, float3 src_point, float3 direction, int& hit_obj_idx)
{
// normalize direction
float div_length = 1 / norm3df(direction.x, direction.y, direction.z);
float3 normal_direction = make_float3(direction.x * div_length, direction.y * div_length, direction.z * div_length);
hit_obj_idx = -1;
float3 hit_point;
float min_distance = 2147483647;
for (int i = 0; i < OBJ_TRI_COUNT; ++i) {
if (i == src_tri_idx) {
continue;
}
// make shadow
Trianle shadow_tri = Trianle{sub_float3(d_scene_objects[i].tri_a, scalar_mult_float3(normal_direction, dot(normal_direction, sub_float3(d_scene_objects[i].tri_a, src_point)))),
sub_float3(d_scene_objects[i].tri_b, scalar_mult_float3(normal_direction, dot(normal_direction, sub_float3(d_scene_objects[i].tri_b, src_point)))),
sub_float3(d_scene_objects[i].tri_c, scalar_mult_float3(normal_direction, dot(normal_direction, sub_float3(d_scene_objects[i].tri_c, src_point)))),
normal_direction};
// check in center
float3 vec_pa = sub_float3(shadow_tri.tri_a, src_point);
float3 vec_pb = sub_float3(shadow_tri.tri_b, src_point);
float3 vec_pc = sub_float3(shadow_tri.tri_c, src_point);
float papb = mixed_product(normal_direction, vec_pa, vec_pb);
float pbpc = mixed_product(normal_direction, vec_pb, vec_pc);
float pcpa = mixed_product(normal_direction, vec_pc, vec_pa);
if ((papb > 0 && pbpc > 0 && pcpa > 0) || (papb < 0 && pbpc < 0 && pcpa < 0)) {
// in center
// get hit point
// get coordinary, reuse vec_pb ,vec_pc
vec_pb = sub_float3(shadow_tri.tri_b, shadow_tri.tri_a);
vec_pc = sub_float3(shadow_tri.tri_c, shadow_tri.tri_a);
vec_pa = sub_float3(src_point, shadow_tri.tri_a);
float divider = vec_pb.x * vec_pc.y - vec_pb.y * vec_pc.x;
float rate_a = (vec_pc.y * vec_pa.x - vec_pc.x * vec_pa.y) / divider;
float rate_b = (-vec_pb.y * vec_pa.x + vec_pb.x * vec_pa.y) / divider;
vec_pb = sub_float3(d_scene_objects[i].tri_b, d_scene_objects[i].tri_a);
vec_pc = sub_float3(d_scene_objects[i].tri_c, d_scene_objects[i].tri_a);
vec_pa.x = d_scene_objects[i].tri_a.x + rate_a * vec_pb.x + rate_b * vec_pc.x;
vec_pa.y = d_scene_objects[i].tri_a.y + rate_a * vec_pb.y + rate_b * vec_pc.y;
vec_pa.z = d_scene_objects[i].tri_a.z + rate_a * vec_pb.z + rate_b * vec_pc.z;
float distance = dot(sub_float3(vec_pa, src_point), normal_direction);
// printf("Rate : %f %f %f\n", rate_a, rate_b, distance / norm3df(vec_pa.x - src_point.x, vec_pa.y - src_point.y, vec_pa.z - src_point.z));
if (distance > 0) {
// printf("In Center : %f, %f, %f %f\n", papb, pbpc, pcpa, distance);
// ray will hit object
if (distance < min_distance) {
min_distance = distance;
hit_point = vec_pa;
hit_obj_idx = i;
}
}
}
}
// printf("Src : %d Dst : %d Direction : %f, %f, %f\n", src_tri_idx, hit_obj_idx, direction.x, direction.y, direction.z);
return hit_point;
}
__device__ float3 check_light_hit(int src_tri_idx, float3 src_point, float3 direction, int& hit_obj_idx)
{
float3 hit_point = check_obj_hit(src_tri_idx, src_point, direction, hit_obj_idx);
if (hit_obj_idx > -1 && !d_scene_objects[hit_obj_idx].is_light) {
hit_obj_idx = -1;
}
return hit_point;
}
/*
__device__ float shade_recurse(int object_idx, float3 src_point, float3 direction, curandState* curand_state)
{
// Contribution from the light source.
float l_dir = 0;
for (int i = 0; i < LIGHT_TRI_COUNT; ++i) {
// random select a point on light triangle
float rand_x = curand_uniform(curand_state);
float rand_y = curand_uniform(curand_state);
if (rand_x + rand_y > 1) {
rand_x = 1 - rand_x;
rand_y = 1 - rand_y;
}
float3 random_point = add_float3(d_scene_objects[i].tri_a, add_float3(scalar_mult_float3(sub_float3(d_scene_objects[i].tri_b, d_scene_objects[i].tri_a), rand_x), scalar_mult_float3(sub_float3(d_scene_objects[i].tri_c, d_scene_objects[i].tri_a), rand_y)));
// test block
float3 obj_light_direction = sub_float3(random_point, src_point);
int test_block_idx;
check_obj_hit(-1, src_point, obj_light_direction, test_block_idx);
// printf("Direction %f %f %f %d\n", obj_light_direction.x, obj_light_direction.y, obj_light_direction.z, test_block_idx);
if (test_block_idx == i) {
// printf("Hit Light!\n");
float direction_length_square = obj_light_direction.x * obj_light_direction.x + obj_light_direction.y * obj_light_direction.y + obj_light_direction.z * obj_light_direction.z;
l_dir += d_light_irradiance * BRDF_rate * dot(d_scene_objects[object_idx].normal_line, obj_light_direction) * -1 * dot(d_scene_objects[i].normal_line, obj_light_direction)
/ direction_length_square / direction_length_square * size(d_scene_objects[i]);
// printf("Shade %d %f %f\n", i, dot(d_light_triangle[i].normal_line, obj_light_direction), l_dir);
}
}
return l_dir;
// Contribution from other reflectors.
float l_indir = 0;
// test Russian Roulette
float rr_result = curand_uniform(curand_state);
if (rr_result < RR_RATE) {
// random select a ray from src_point
float cosine_theta = 2 * (curand_uniform(curand_state) - 0.5);
float sine_theta = sqrtf(1 - cosine_theta * cosine_theta);
float fai_value = 2 * PI * curand_uniform(curand_state);
float3 ray_direction = make_float3(sine_theta * cosf(fai_value), sine_theta * sinf(fai_value), cosine_theta);
if (dot(ray_direction, d_scene_objects[object_idx].normal_line) < 0) {
ray_direction.x *= -1;
ray_direction.y *= -1;
ray_direction.z *= -1;
cosine_theta *= -1;
}
int hit_obj_idx;
float3 hit_point = check_obj_hit(object_idx, src_point, ray_direction, hit_obj_idx);
if (hit_obj_idx > -1 && !d_scene_objects[hit_obj_idx].is_light) {
// printf("Hit Object!\n");
ray_direction.x *= -1;
ray_direction.y *= -1;
ray_direction.z *= -1;
l_indir = shade(hit_obj_idx, hit_point, ray_direction, curand_state) * BRDF_rate * dot(ray_direction, d_scene_objects[hit_obj_idx].normal_line) * 2 * PI / RR_RATE;
}
}
// printf("Shade %f\n", l_dir + l_indir);
return l_dir + l_indir;
}
*/
__device__ float stack_dir[SHARED_MEM_CAP];
__device__ float stack_indir_rate[SHARED_MEM_CAP];
__device__ float shade(int object_idx, float3 src_point, float3 direction, curandState* curand_state)
{
// __shared__ float stack_dir[SHARED_MEM_CAP];
// __shared__ float stack_indir_rate[SHARED_MEM_CAP];
// int stack_size = 0;
float l_dir = 0;
int stack_offset = ((blockIdx.y * TILE_SIZE + threadIdx.y) * RENDER_WIDTH + (blockIdx.x * TILE_SIZE + threadIdx.x)) * STACK_CAPACITY;
int stack_ori = stack_offset;
float3 out_direction = direction; // use in BRDF, here is ignored.
float3 ray_src = src_point;
int src_object_idx = object_idx;
while (true) {
// Contribution from the light source.
l_dir = 0;
for (int i = 0; i < LIGHT_TRI_COUNT; ++i) {
// random select a point on light triangle
float rand_x = curand_uniform(curand_state);
float rand_y = curand_uniform(curand_state);
if (rand_x + rand_y > 1) {
rand_x = 1 - rand_x;
rand_y = 1 - rand_y;
}
float3 random_point = add_float3(d_scene_objects[i].tri_a, add_float3(scalar_mult_float3(sub_float3(d_scene_objects[i].tri_b, d_scene_objects[i].tri_a), rand_x), scalar_mult_float3(sub_float3(d_scene_objects[i].tri_c, d_scene_objects[i].tri_a), rand_y)));
// test block
float3 obj_light_direction = sub_float3(random_point, ray_src);
int test_block_idx;
check_obj_hit(-1, ray_src, obj_light_direction, test_block_idx);
// printf("Direction %f %f %f %d\n", obj_light_direction.x, obj_light_direction.y, obj_light_direction.z, test_block_idx);
if (test_block_idx == i) {
// printf("Hit Light!\n");
float direction_length_square = obj_light_direction.x * obj_light_direction.x + obj_light_direction.y * obj_light_direction.y + obj_light_direction.z * obj_light_direction.z;
l_dir += d_light_irradiance * d_scene_objects[src_object_idx].brdf_rate * fabs(dot(d_scene_objects[src_object_idx].normal_line, obj_light_direction) * dot(d_scene_objects[i].normal_line, obj_light_direction))
/ direction_length_square / direction_length_square * size(d_scene_objects[i]);
}
}
// Contribution from other reflectors.
// test Russian Roulette
float rr_result = curand_uniform(curand_state);
if (rr_result < RR_RATE) {
float indir_rate = 0;
// random select a ray from src_point
float cosine_theta = 2 * (curand_uniform(curand_state) - 0.5);
float sine_theta = sqrtf(1 - cosine_theta * cosine_theta);
float fai_value = 2 * PI * curand_uniform(curand_state);
float3 ray_direction = make_float3(sine_theta * cosf(fai_value), sine_theta * sinf(fai_value), cosine_theta);
if (dot(ray_direction, d_scene_objects[src_object_idx].normal_line) * dot(out_direction, d_scene_objects[src_object_idx].normal_line) < 0) {
ray_direction.x *= -1;
ray_direction.y *= -1;
ray_direction.z *= -1;
cosine_theta *= -1;
}
int hit_obj_idx;
float3 hit_point = check_obj_hit(src_object_idx, ray_src, ray_direction, hit_obj_idx);
if (hit_obj_idx > -1 && !d_scene_objects[hit_obj_idx].is_light) {
// printf("Hit Object!\n");
ray_direction.x *= -1;
ray_direction.y *= -1;
ray_direction.z *= -1;
indir_rate = d_scene_objects[hit_obj_idx].brdf_rate * fabs(dot(ray_direction, d_scene_objects[hit_obj_idx].normal_line)) / RR_RATE;
src_object_idx = hit_obj_idx;
ray_src = hit_point;
out_direction = ray_direction;
stack_dir[stack_offset] = l_dir;
stack_indir_rate[stack_offset] = indir_rate;
++stack_offset;
}
else {
// stack_dir[stack_offset] = l_dir;
// stack_indir_rate[stack_offset] = indir_rate;
// ++stack_offset;
break;
}
}
else {
break;
}
}
// calc final irradiance
for (int i = stack_offset - 1; i >= stack_ori; --i) {
// printf("%f %f\n", stack_indir_rate[i], stack_dir[i]);
l_dir *= stack_indir_rate[i];
l_dir += stack_dir[i];
}
return l_dir;
}
__device__ __forceinline__ float ray_generation(float3 pixel_center_position, curandState* curand_states)
{
float pixel_radiance = 0;
for (int i = 0; i < SPP; ++i) {
float width_bias = d_camera_pixel_width * (curand_uniform(&curand_states[threadIdx.x]) - 0.5);
float height_bias = d_camera_pixel_height * (curand_uniform(&curand_states[threadIdx.x]) - 0.5);
int hit_obj_idx;
// printf("Pixel bias : %f %f\n", width_bias, height_bias);
float3 ray_direction = sub_float3(add_float3(pixel_center_position, make_float3(width_bias, 0, height_bias)), d_camera_position);
float3 hit_light_point = check_light_hit(-1, d_camera_position, ray_direction, hit_obj_idx);
if (hit_obj_idx > -1) {
// printf("Ray Hit!\n");
pixel_radiance += 1.0 / SPP * d_light_irradiance;
}
else {
float3 hit_point = check_obj_hit(-1, d_camera_position, ray_direction, hit_obj_idx);
if (hit_obj_idx > -1) {
// printf("Obj Hit!\n");
float3 reverse_ray_direction = make_float3(-ray_direction.x, -ray_direction.y, -ray_direction.z);
pixel_radiance += 1.0 / SPP * shade(hit_obj_idx, hit_point, reverse_ray_direction, &curand_states[threadIdx.x]);
// printf("Ray Obj General : %f\n", pixel_radiance);
}
}
}
// printf("Ray General : %f\n", 1.0 / SPP * d_light_irradiance);
return pixel_radiance;
}
__global__ void render_pixel(unsigned char* target_img, curandState* curand_states)
{
int target_pixel_width = blockIdx.x * TILE_SIZE + threadIdx.x;
int target_pixel_height = blockIdx.y * TILE_SIZE + threadIdx.y;
// printf("%d, %d\n", target_pixel_width, target_pixel_height);
// printf("%f %f %f\n", d_camera_position.x, d_camera_position.y, d_camera_position.z);
float3 delta_left = scalar_mult_float3(d_camera_left_direction, (target_pixel_width + 0.5 - RENDER_WIDTH / 2.0) * d_camera_pixel_width);
float3 delta_up = scalar_mult_float3(d_camera_up_direction, (target_pixel_height + 0.5 - RENDER_HEIGHT / 2.0) * d_camera_pixel_height);
float3 delta = add_float3(delta_left, add_float3(delta_up, scalar_mult_float3(d_camera_direction, d_camera_focal_length)));
// float3 delta = make_float3((target_pixel_width + 0.5 - RENDER_WIDTH / 2.0) * d_camera_pixel_width, d_camera_focal_length, (target_pixel_height + 0.5 - RENDER_HEIGHT / 2.0) * d_camera_pixel_height);
float3 pixel_center = make_float3(d_camera_position.x + delta.x, d_camera_position.y + delta.y, d_camera_position.z + delta.z);
float pixel_radiance = ray_generation(pixel_center, curand_states);
// float pixel_radiance = d_light_irradiance * curand_uniform(&curand_states[threadIdx.x]);
// Gamma correction
pixel_radiance /= d_light_irradiance;
if (pixel_radiance > 1) {
pixel_radiance = 1;
}
pixel_radiance = powf(pixel_radiance, 0.454545454545);
unsigned char rgb_value = (unsigned char)(pixel_radiance * 255);
// printf("%d, %d : %d\n", target_pixel_width, target_pixel_height, rgb_value);
int base_idx = 3 * (target_pixel_height * RENDER_WIDTH + target_pixel_width);
target_img[base_idx] = rgb_value;
target_img[base_idx + 1] = rgb_value;
target_img[base_idx + 2] = rgb_value;
}
__global__ void init_curand(curandState* curand_states, int seed)
{
curand_init(seed, threadIdx.x, 0, &(curand_states[threadIdx.x]));
}
int main()
{
dim3 grid{RENDER_WIDTH / TILE_SIZE, RENDER_HEIGHT / TILE_SIZE, 1};
dim3 block{TILE_SIZE, TILE_SIZE, 1};
unsigned char* d_target_img;
cudaMalloc(&d_target_img, RENDER_WIDTH * RENDER_HEIGHT * 3);
curandState* curand_states;
cudaMalloc(&curand_states, TILE_SIZE * sizeof(curandState));
init_curand <<<1, TILE_SIZE>>> (curand_states, 0);
cudaDeviceSynchronize();
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "curand init launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaMemcpyToSymbol(d_light_triangle, h_light_triangle, sizeof(Trianle) * LIGHT_TRI_COUNT, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_scene_objects, h_scene_objects, sizeof(h_scene_objects));
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "before render launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
render_pixel <<<grid, block>>> (d_target_img, curand_states);
unsigned char* h_target_img = (unsigned char*)malloc(RENDER_WIDTH * RENDER_HEIGHT * 3);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "render launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaMemcpy(h_target_img, d_target_img, RENDER_WIDTH * RENDER_HEIGHT * 3, cudaMemcpyDeviceToHost);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "copy launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
save_image(h_target_img, RENDER_WIDTH, RENDER_HEIGHT);
free(h_target_img);
cudaFree(d_target_img);
cudaFree(curand_states);
cudaDeviceReset();
return 0;
} |
19,893 | #include <iostream>
#include <string.h>
#include <stdio.h>
#include <math.h>
using namespace std;
namespace GPUfunc{
static int nb = 64; //1024*1024*64*2; // max 1024*1024*64*2
static int nthre = 1; // max 65535
static int nthre_total = nb*nthre;
static int nword = 1024*1024*8;
static int mem_size = sizeof(double) * nword;
static int mem_size_o = nthre_total*sizeof(double);
static double* hmem_i;
static double* hmem_o;
static double* dmem_i;
static double* dmem_o;
#define NLOOP (1000)
#define NX (14705)
__device__ double myDeviceFunc_0(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_1(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_2(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_3(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__global__ void kernel(double* in, double* out, int nword)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int global_id = blockDim.x*bid + tid;
double z;
int kernel_num = global_id % 4;
switch(kernel_num){
case 0:
z = myDeviceFunc_0(in, nword);
break;
case 1:
z = myDeviceFunc_1(in, nword);
break;
case 2:
z = myDeviceFunc_2(in, nword);
break;
case 3:
z = myDeviceFunc_3(in, nword);
break;
default:
z = myDeviceFunc_0(in, nword);
}
out[global_id] = z;
}
void initialize()
{
static bool is_first = true;
if(false == is_first) return;
// input buffer (Host)
hmem_i = (double*) malloc(mem_size);
for(int i=0; i<nword; i++) hmem_i[i] = (double)i;
// input buffer (GPU)
cudaMalloc( (void**) &dmem_i, mem_size);
cudaMemcpy(dmem_i, hmem_i, mem_size, cudaMemcpyHostToDevice);
// output buffer (Host/GPU)
cudaMalloc( (void**) &dmem_o, mem_size_o);
hmem_o = (double*) malloc(mem_size_o);
printf("# threads: %d \n", nthre_total);
printf("mem_size: %d MB\n", mem_size >> 20);
printf("mem_size_o: %d kB\n", mem_size_o >> 10);
is_first = false;
}
void run()
{
kernel<<< nb, nthre>>>(dmem_i, dmem_o, nword);
cudaMemcpy(hmem_o, dmem_o, mem_size_o, cudaMemcpyDeviceToHost);
/*
for(int i=0; i<nthre_total; i++){
double z = hmem_o[i];
if(i>(nthre_total-4)) printf("%d, %f\n", i, z);
}
*/
printf("%d, %e\n", nthre_total-1, hmem_o[nthre_total-1]);
return;
}
void finalize(){
free(hmem_i);
free(hmem_o);
cudaFree(dmem_i);
cudaFree(dmem_o);
}
}
|
19,894 | #include <stdio.h>
#include <stdlib.h>
#define block_size 32
__global__ void calculation( int *a,
int *b,
int *c,
int constant,
int vector_size ) {
// write your code here
// Declare shared memory
// Bring in the data from global memory into shared memory
// Synchronize
// Do calculation using the values in shared memory
// Write output
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 2) {
// Tell the user how to run the program
printf ("Usage: %s vector_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
// It will be either 0 or 1
cudaSetDevice(0);
// Time Variables
cudaEvent_t start_cpu, start_gpu;
cudaEvent_t stop_cpu, stop_gpu;
cudaEventCreate (&start_cpu);
cudaEventCreate (&start_gpu);
cudaEventCreate (&stop_cpu);
cudaEventCreate (&stop_gpu);
float time;
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
int constant = 4;
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Filling up input arrays with random values between 1 and 10.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start_cpu,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
// Read in inputs
int prev_a = a[i>0?i-1:(vector_size-1)];
int curr_a = a[i];
int post_a = a[i<(vector_size-1)?i+1:0];
int curr_b = b[i];
// Do computation
int output_c = (prev_a-post_a)*curr_b + curr_a*constant;
// Write result
c_cpu[i] = output_c;
}
cudaEventRecord(stop_cpu,0);
cudaEventSynchronize(stop_cpu);
cudaEventElapsedTime(&time, start_cpu, stop_cpu);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
//////////////////
printf("Running parallel job.\n");
cudaEventRecord(start_gpu,0);
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, vector_size * sizeof(int) );
cudaMalloc( (void**)&dev_b, vector_size * sizeof(int) );
cudaMalloc( (void**)&dev_c, vector_size * sizeof(int) );
// set arrays to 0
cudaMemset(dev_a, 0, vector_size * sizeof(int));
cudaMemset(dev_b, 0, vector_size * sizeof(int));
cudaMemset(dev_c, 0, vector_size * sizeof(int));
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, vector_size * sizeof(int),
cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, vector_size * sizeof(int),
cudaMemcpyHostToDevice );
// run kernel
calculation<<<grid_size,block_size>>>( dev_a,
dev_b,
dev_c,
constant,
vector_size );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c_gpu, dev_c, vector_size * sizeof(int),
cudaMemcpyDeviceToHost );
cudaEventRecord(stop_gpu,0);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&time, start_gpu, stop_gpu);
printf("\tParallel Job Time: %.2f ms\n", time);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
// free cuda events
cudaEventDestroy (start_cpu);
cudaEventDestroy (start_gpu);
cudaEventDestroy (stop_cpu);
cudaEventDestroy (stop_gpu);
// free CPU memory
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return 0;
}
|
19,895 | __device__ __forceinline__ double sigmoid (double a) { return 1.0 / (1.0 + exp (-a)); }
__device__ __forceinline__ int idx_2d(int x, int y, int width) { return x*width+y; }
__global__ void lstm_gemm(float *input,
float *initial_hiddens,
float *weights,
float *bias,
float *out_gates,
int M, int K, int N,
int input_size, int hidden_size, int timestep) {
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
int c_wr_idx = idx_2d(m,n,N);
float acc = 0.;
for (int k = 0; k < K; k++) {
int b_rd_idx = idx_2d(k,n,N);
float a_matrix_elem = k < input_size ? input[idx_2d(m,k,input_size) + M*input_size*timestep ]
: initial_hiddens[idx_2d(m,k-input_size,hidden_size) + M*hidden_size*timestep];
acc += a_matrix_elem * weights[b_rd_idx];
}
out_gates[c_wr_idx] = acc + bias[n];
}
__global__ void lstm_eltwise(float* inout_cell,
float *out_gates,
float*hidden_out,
int hidden_size,
int batch_size,
int timestep) {
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
int i_idx = idx_2d(m,0*hidden_size+n,4*hidden_size);
int f_idx = idx_2d(m,1*hidden_size+n,4*hidden_size);
int g_idx = idx_2d(m,2*hidden_size+n,4*hidden_size);
int o_idx = idx_2d(m,3*hidden_size+n,4*hidden_size);
float i = out_gates[i_idx];
float f = out_gates[f_idx];
float g = out_gates[g_idx];
float o = out_gates[o_idx];
float cell = sigmoid(f) * inout_cell[idx_2d(m, n, hidden_size) + batch_size * hidden_size * timestep] + sigmoid(i) * tanh(g);
float hidden = sigmoid(o) * tanh(cell);
int hidden_wr_timestep_offset = batch_size * hidden_size * (timestep+1);
int hidden_wr_idx = idx_2d(m, n, hidden_size) + hidden_wr_timestep_offset;
int cell_wr_idx = idx_2d(m, n, hidden_size) + hidden_wr_timestep_offset;
hidden_out[hidden_wr_idx] = hidden;
inout_cell[cell_wr_idx] = cell;
}
|
19,896 | // Using CUDA device to calculate pi
#include <stdio.h>
#include <cuda.h>
extern "C" double getTime(void);
#define NBIN 1000000000 // Number of bins
#define NUM_BLOCK atoi(argv[1]) // Number of thread blocks
#define NUM_THREAD atoi(argv[2]) // Number of threads per block
// Kernel that executes on the CUDA device
__global__ void cal_pi(double *sum, int nbin, double step, int nthreads, int nblocks) {
int i;
double x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
// Main routine that executes on the host
int main(int argc, char *argv[]) {
printf("NUM_BLOCK: %d NUMTHREAD_THREAD: %d, ", NUM_BLOCK, NUM_THREAD);
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions
double *sumHost, *sumDev; // Pointer to host & device arrays
double pi = 0;
int tid;
double step = 1.0/NBIN; // Step size
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(double); //Array memory size
sumHost = (double *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev, size); // Allocate array on device
double start = getTime();
// Initialize array in device to 0
cudaMemset(sumDev, 0, size);
// Do calculation on device
cal_pi <<<dimGrid, dimBlock>>> (sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
// Print results
FILE *fp;
double delta = getTime() - start;
printf("PI = %.16g computed in %.4g seconds\n", pi, delta);
fp = fopen("timing_plot.out", "a");
fprintf(fp, "%d, %d, %.4g\n", NUM_BLOCK, NUM_THREAD, delta);
// Cleanup
free(sumHost);
cudaFree(sumDev);
return 0;
}
|
19,897 | #include <stdio.h>
__global__
void hello(void){
printf("Hello from the GPU\n");
}
int main(void){
printf("Hello from the CPU\n");
dim3 gridDim(2,3,1);
// setup 6 blocks
dim3 blockDim(10,2,3);
// setup 6 blocks, a diff setup;
hello<<<gridDim,blockDim>>>();
cudaDeviceReset();
return 0;
}
|
19,898 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <ctype.h>
#include <vector>
#include <string>
typedef std::vector<double> double_vec;
int main()
{
double_vec stocks;
std::string value, prev_val;
while (true)
{
std::getline(std::cin, value);
if (!isdigit(value[0]))
{
break;
}
else
{
stocks.push_back(std::stod(value));
}
}
thrust::host_vector<double> host(int(stocks.size()));
host = stocks;
thrust::device_vector<double> dev(host);
thrust::device_vector<double> ganho_diario(int(stocks.size()) - 1, 0);
// ganho diário
thrust::transform(
dev.begin() + 1,
dev.end(),
dev.begin(),
ganho_diario.begin(),
thrust::minus<double>()
);
double sumDiffs = thrust::reduce(ganho_diario.begin(), ganho_diario.end(), 0.0, thrust::plus<double>());
double avgDiffs = sumDiffs / int(stocks.size());
std::cout << "O ganho médio foi de: US$ " << avgDiffs << std::endl;
} |
19,899 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
void init_timing(struct timeval* tstart)
{
gettimeofday(tstart, NULL);
}
float ellapsed_time(struct timeval tstart)
{
struct timeval tmp;
long long diff;
gettimeofday(&tmp, NULL);
diff = tmp.tv_usec - tstart.tv_usec;
diff += (tmp.tv_sec - tstart.tv_sec) * 1000000;
return ((float)diff*1.0e-6);
}
int cu_threads = 128;
__global__ void op_kernel(float *tab, int size)
{
//int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
float prev, next;
if(i < size)
{
if(i == 0)
prev = 0;
else
prev = tab[i-1];
if(i == size-1)
next = 0;
else
next = tab[i+1];
tab[i] = tab[i] * prev / next;
}
}
int main()
{
int i;
int N = 65536;
float *table, *device_table;
float prev, next;
struct timeval timer;
int cu_blocks;
table = (float*) malloc(N*sizeof(float));
for(i = 0; i < N; i++)
{
table[i] = i;
}
cudaMalloc(&device_table, N*sizeof(float));
cudaMemcpy(device_table, table, N*sizeof(float), cudaMemcpyHostToDevice);
cu_blocks = (N + cu_threads - 1) / cu_threads;
init_timing(&timer);
op_kernel<<< cu_blocks, cu_threads >>>(device_table, N);
cudaDeviceSynchronize();
printf("Kernel time : %f\n", ellapsed_time(timer));
cudaMemcpy(table, device_table, N*sizeof(float), cudaMemcpyDeviceToHost);
init_timing(&timer);
for(i = 0; i < N; i++)
{
if(i == 0)
prev = 0;
else
prev = table[i-1];
if(i == N-1)
next = 0;
else
next = table[i+1];
table[i] = table[i] * prev / next;
}
printf("CPU time : %f\n", ellapsed_time(timer));
for(i = 0; i < N; i++)
printf("%d\n",table[i]);
free(table);
cudaFree(device_table);
exit(EXIT_SUCCESS);
}
|
19,900 |
#include <type_traits>
using tt = std::true_type;
using ft = std::false_type;
int __host__ static_cuda11_func(int x)
{
return x * x + std::integral_constant<int, 17>::value;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.