serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
10,801 | #include <cstdio>
#include <cuda.h>
static const int ThreadsPerBlock = 512;
static int* d_maxlen;
static __global__ void collatz(const long start, const long stop, int* const maxlen)
{
// todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project)
const long i = threadIdx.x + blockIdx.x * (long)blockDim.x;
if(i+start < stop ) // Each thread does work if and only if less than the stop value
{
long val = 2*(i +((start-1)/2))+1;
int len = 1;
while(val != 1){
len++;
if((val % 2) == 0)//even
{val = val / 2;}
else //Odd
{val = 3 * val +1;}
} if(len > *maxlen){ atomicMax(maxlen, len);} // If greater than greatest length, becomes new max len;
}
}
void GPU_Init()
{
int maxlen = 0;
if (cudaSuccess != cudaMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);}
}
void GPU_Exec(const long start, const long stop)
{
if (start <= stop) {
collatz<<<((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start, stop, d_maxlen);
}
}
int GPU_Fini()
{
int maxlen;
// todo: copy the result from the device to the host and free the device memory
if(cudaSuccess != cudaMemcpy(&maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)){fprintf(stderr, "Error: copying to host failed\n"); exit(-1);}
cudaFree(d_maxlen);
return maxlen;
}
|
10,802 | #include <stdlib.h>
#include <stdio.h>
#define N 10
__global__ void VecAdd(float* A, float* B, float* C) {
int i = threadIdx.x;
printf("tid: x=%d\n", i);
C[i] = A[i] + B[i];
}
int main() {
float A[N] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
float B[N] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
float *C = (float*)malloc(sizeof(float) * N);
float *dA, *dB, *dC;
cudaMalloc((void**)&dA, N * sizeof(float));
cudaMalloc((void**)&dB, N * sizeof(float));
cudaMalloc((void**)&dC, N * sizeof(float));
cudaMemcpy(dA, A, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, N * sizeof(float), cudaMemcpyHostToDevice);
// Kernel invocation with N threads
VecAdd<<<1, N>>>(dA, dB, dC);
cudaMemcpy(C, dC, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
for(int i=0; i<N; i++) {
printf("%d: %f\n", i, *(C+i));
}
free(C);
getchar();
return 0;
}
|
10,803 | #include "cuda.h"
__global__ void return_double_(int n, double *b, const double*a){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n) b[i] = 2*a[i];
}
void return_double(int n, double *b, const double*a){
return_double_<<<(n+255)/256, 256>>>(n, b, a);
} |
10,804 | /*** Original Question : https://stackoverflow.com/questions/13215614/
I am new to CUDA. I am trying to parallelize the following code. Right now it's sitting on kernel but is not using threads at all, thus slow. I tried to use this answer but to no avail so far.
The kernel is supposed to generate first n prime numbers, put them into device_primes array and this array is later accessed from host. The code is correct and works fine in serial version but I need to speed it up, perhaps with use of shared memory.
//CUDA kernel code
__global__ void generatePrimes(int* device_primes, int n)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int counter = 0;
int c = 0;
for (int num = 2; counter < n; num++)
{
for (c = 2; c <= num - 1; c++)
{
if (num % c == 0) //not prime
{
break;
}
}
if (c == num) //prime
{
device_primes[counter] = num;
counter++;
}
}
}
My current, preliminary, and definitely wrong attempt to parallelize this looks like the following:
//CUDA kernel code
__global__ void generatePrimes(int* device_primes, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int num = i + 2;
int c = j + 2;
int counter = 0;
if ((counter >= n) || (c > num - 1))
{
return;
}
if (num % c == 0) //not prime
{
}
if (c == num) //prime
{
device_primes[counter] = num;
counter++;
}
num++;
c++;
}
But this code populates the array with data that does not make sense. In addition, many values are zeroes. Thanks in advance for any help, it's appreciated.
***/
__global__ void getPrimes(int *device_primes,int n)
{
int c = 0;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int num = thread_id;
if (thread_id == 0) device_primes[0] = 1;
__syncthreads();
while(device_primes[0] < n)
{
for (c = 2; c <= num - 1; c++)
{
if (num % c == 0) //not prime
{
break;
}
}
if (c == num) //prime
{
int pos = atomicAdd(&device_primes[0],1);
device_primes[pos] = num;
}
num += blockDim.x * gridDim.x; // Next number for this thread
}
}
|
10,805 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include<curand_kernel.h>
//
__device__ float calcDistance(int target_row ,int target_col,int source_row, int source_col , double * target_block ,double * source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
int r = 3;
double dif=0;
double dif0=1,dif1=1,dif2=1;
if( target_row-r<0 ) target_row = r;
if( target_col-r<0 ) target_col =r;
if( source_row-r<0 ) source_row = r;
if( source_col-r<0 ) source_col = r;
if( target_row+r>=target_rows ) target_row = target_cols-1-r;
if( target_col+r>= target_cols ) target_col = target_rows-1-r;
if( source_row+r>=source_rows ) source_row = source_rows-1-r;
if( source_col+r>= source_cols ) source_col = source_cols-1-r;
for(int i=-r ;i<=r;i++){
for(int j=-r ;j<=r;j++){
int temp = 3*((source_row+i)*source_cols+source_col+j) ;
int temp2 = 3*((target_row+i)*target_cols+target_col+j) ;
dif0 = source_block[ temp+ 0] - target_block[temp2+ 0] ;
dif1 = source_block[ temp+ 1] - target_block[temp2+ 1] ;
dif2 = source_block[ temp+ 2] - target_block[temp2+ 2] ;
dif += sqrt(dif0*dif0 +dif1*dif1 +dif2*dif2);
}
}
return dif;
}
__device__ int calcDistance(int target_row ,int target_col,int source_row1, int source_col1,int source_row2, int source_col2 ,int source_row3, int source_col3,double * target_block , double *source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
float first2Second = calcDistance(target_row, target_col , source_row1, source_col1,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Third = calcDistance( target_row, target_col , source_row2, source_col2,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Fourth = calcDistance( target_row,target_col , source_row3, source_col3,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if (first2Second<=first2Third)
{
if (first2Second<=first2Fourth)
return 1;
else
return 3;
}
else if (first2Third<= first2Fourth)
return 2;
else
return 3;
}
__device__ int calcDistance(int target_row,int target_col ,int source_row1, int source_col1,int source_row2, int source_col2 ,double * target_block ,double * source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
float first2Second = calcDistance(target_row, target_col , source_row1, source_col1,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Third = calcDistance( target_row, target_col , source_row2, source_col2,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if (first2Second <= first2Third)
return 1;
return 2;
}
__global__ void extern PropagationGPU(double * target_block ,double * source_block , int * relation_block , int target_rows , int target_cols ,int source_rows , int source_cols)
{
//ݹ̣
int y = blockIdx.x;
int x = threadIdx.x;
//뾶
int c_r0 = relation_block[ 2*(y*target_cols+x) + 0 ];
int c_c0 = relation_block[ 2*(y*target_cols+x) + 1];
int c_r1 = relation_block[ 2*((y+1)*target_cols+x) + 0 ]-1;
int c_c1 = relation_block[ 2*((y+1)*target_cols+x) + 1 ];
int c_r2 = relation_block[ 2*(y*target_cols+x+1) + 0];
int c_c2 = relation_block[ 2*(y*target_cols+x+1) + 1]-1;
int patchNumber = calcDistance(y , x , c_r0 , c_c0 , c_r1, c_c1 , c_r2 , c_c2 , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
switch(patchNumber)
{
case 2:
relation_block[ 2*(y*target_cols+x) + 0 ]= c_r1;
relation_block[ 2*(y*target_cols+x) + 1 ]= c_c1;
break;
case 3:
relation_block[ 2*(y*target_cols+x) + 0 ] = c_r2;
relation_block[ 2*(y*target_cols+x) + 1 ] = c_c2;
break;
}
}
__global__ void extern RandomSearchGPU(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols){
//ݹ̣
int y = blockIdx.x;
int x = threadIdx.x;
//뾶
int c_r0 = relation_block[ 2*(y*target_cols+x) + 0 ];
int c_c0 = relation_block[ 2*(y*target_cols+x) + 1];
int c_r1 = relation_block[ 2*((y-2)*target_cols+x) + 0 ]+2;
int c_c1 = relation_block[ 2*((y-2)*target_cols+x) + 1 ];
int c_r2 = relation_block[ 2*(y*target_cols+x-2) + 0];
int c_c2 = relation_block[ 2*(y*target_cols+x-2) + 1]+2;
int patchNumber = calcDistance(y , x , c_r0 , c_c0 , c_r1, c_c1 , c_r2 , c_c2 , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
switch(patchNumber)
{
case 2:
relation_block[ 2*(y*target_cols+x) + 0 ]= c_r1;
relation_block[ 2*(y*target_cols+x) + 1 ]= c_c1;
break;
case 3:
relation_block[ 2*(y*target_cols+x) + 0 ] = c_r2;
relation_block[ 2*(y*target_cols+x) + 1 ] = c_c2;
break;
}
}
__global__ void extern baoli(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols,double *distance){
int y = threadIdx.y;
int x = threadIdx.x;
for(int i = 0 ; i<12;i++){
for(int j = 0 ;j< 12 ; j++){
double c = calcDistance(y , x , i , j , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if( c < distance[ y*target_cols+x ]){
relation_block[ 2*(y*target_cols+x) + 0 ]= 1;
relation_block[ 2*(y*target_cols+x) + 1 ]= 1;
distance[ y*target_cols+x ] = c;
}
}
}
}
void extern bridge(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols, double * distance){
//̴߳Сƣ
/**/ for(int i = 0;i<130 ;i++){
PropagationGPU<<<target_rows ,target_cols>>>(target_block, source_block, relation_block , target_rows , target_cols , source_rows , source_cols);
cudaThreadSynchronize();
RandomSearchGPU<<<target_rows ,target_cols>>>(target_block, source_block, relation_block,target_rows,target_cols,source_rows, source_cols);
cudaThreadSynchronize();
}
//baoli<<<target_rows ,target_cols>>>(target_block, source_block, relation_block , target_rows , target_cols , source_rows , source_cols ,distance);
} |
10,806 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define N 9
__global__ void sum(int *a,int *o)
{
int of;
int id=threadIdx.x;
for(of=N/2 ; of > 0 ;of=of/2)
{
if(id<of)
{
a[id]+=a[id+of];
}
}
if(N%2==1)
{
a[0]=a[0]+a[N-1];
}
o[0]=a[0];
}
int main()
{
int *h_a,*d_a,*oh_a,*od_a;
int size= N * sizeof(int);
h_a=(int*)malloc(size);
oh_a=(int*)malloc(size);
cudaMalloc(&d_a,size);
cudaMalloc(&od_a,size);
int i;
for(i=0 ;i<N ;i++)
{
h_a[i] = random() % N;
}
printf("\n\nNumbers =>");
for(i=0 ;i<N ;i++)
{
printf("%d ",h_a[i]);
}
cudaMemcpy(d_a, h_a,size,cudaMemcpyHostToDevice);
sum<<<1, N/2>>>(d_a,od_a);
cudaMemcpy(oh_a, od_a,size,cudaMemcpyDeviceToHost);
printf("\n\nSum => %d",oh_a[0]);
float arithmeticMean=(float)oh_a[0]/N;
printf("\n\nArithmetic Mean => %f",arithmeticMean);
cudaFree(d_a);
cudaFree(od_a);
free(h_a);
free(oh_a);
return 0;
}
|
10,807 | #include "matmul.hh"
#include "../runtime/node.hh"
namespace gpu
{
namespace
{
__global__
void matmul(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t arows, std::size_t acols, std::size_t bcols)
{
std::size_t row = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= arows || col >= bcols)
return;
dbl_t x = 0;
for (std::size_t i = 0; i < acols; ++i)
x += a[row * acols + i] * b[i * bcols + col];
out[row * bcols + col] = x;
}
__global__
void mvrow_add(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t rows, std::size_t cols)
{
std::size_t row = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= rows || col >= cols)
return;
out[row * cols + col] = a[row * cols + col] + b[col];
}
__global__
void matmul_add(const dbl_t* a, const dbl_t* b, const dbl_t* c, dbl_t* out,
std::size_t arows, std::size_t acols, std::size_t bcols)
{
std::size_t row = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= arows || col >= bcols)
return;
dbl_t x = 0;
for (std::size_t i = 0; i < acols; ++i)
x += a[row * acols + i] * b[i * bcols + col];
out[row * bcols + col] = x + c[col];
}
__global__
void tmat_mat_mul(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t acols, std::size_t arows, std::size_t bcols)
{
std::size_t row = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= acols || col >= bcols)
return;
dbl_t x = 0;
for (std::size_t i = 0; i < arows; ++i)
x += a[i * acols + row] * b[i * bcols + col];
out[row * bcols + col] = x;
}
__global__
void mat_tmat_mul(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t arows, std::size_t acols, std::size_t brows)
{
std::size_t row = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t col = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= arows || col >= brows)
return;
dbl_t x = 0;
for (std::size_t i = 0; i < acols; ++i)
x += a[row * acols + i] * b[col * acols + i];
out[row * brows + col] = x;
}
}
void kernel_mat_mat_mul(rt::Node* node)
{
std::size_t arows = node->len1;
std::size_t acols = node->len2;
std::size_t bcols = node->len3;
std::size_t block_size = 32;
dim3 threads_per_block (block_size, block_size);
std::size_t nb_blocks_x = (arows + block_size - 1) / block_size;
std::size_t nb_blocks_y = (bcols + block_size - 1) / block_size;
dim3 blocks_per_grid (nb_blocks_x, nb_blocks_y);
matmul<<<blocks_per_grid, threads_per_block>>>(node->in1, node->in2, node->out1,
arows, acols, bcols);
}
void kernel_mat_rvect_add(rt::Node* node)
{
std::size_t rows = node->len1;
std::size_t cols = node->len2;
std::size_t block_size = 32;
dim3 threads_per_block (block_size, block_size);
std::size_t nb_blocks_x = (rows + block_size - 1) / block_size;
std::size_t nb_blocks_y = (cols + block_size - 1) / block_size;
dim3 blocks_per_grid (nb_blocks_x, nb_blocks_y);
mvrow_add<<<blocks_per_grid, threads_per_block>>>(node->in1, node->in2, node->out1,
rows, cols);
}
void kernel_mat_mul_add(rt::Node* node)
{
std::size_t arows = node->len1;
std::size_t acols = node->len2;
std::size_t bcols = node->len3;
std::size_t block_size = 32;
dim3 threads_per_block (block_size, block_size);
std::size_t nb_blocks_x = (arows + block_size - 1) / block_size;
std::size_t nb_blocks_y = (bcols + block_size - 1) / block_size;
dim3 blocks_per_grid (nb_blocks_x, nb_blocks_y);
matmul_add<<<blocks_per_grid, threads_per_block>>>(node->in1, node->in2, node->in3,
node->out1,
arows, acols, bcols);
}
void kernel_tmat_mat_mul(rt::Node* node)
{
std::size_t acols = node->len1;
std::size_t arows = node->len2;
std::size_t bcols = node->len3;
std::size_t block_size = 32;
dim3 threads_per_block (block_size, block_size);
std::size_t nb_blocks_x = (acols + block_size - 1) / block_size;
std::size_t nb_blocks_y = (bcols + block_size - 1) / block_size;
dim3 blocks_per_grid (nb_blocks_x, nb_blocks_y);
tmat_mat_mul<<<blocks_per_grid, threads_per_block>>>(node->in1, node->in2, node->out1,
acols, arows, bcols);
}
void kernel_mat_tmat_mul(rt::Node* node)
{
std::size_t arows = node->len1;
std::size_t acols = node->len2;
std::size_t brows = node->len3;
std::size_t block_size = 32;
dim3 threads_per_block (block_size, block_size);
std::size_t nb_blocks_x = (arows + block_size - 1) / block_size;
std::size_t nb_blocks_y = (brows + block_size - 1) / block_size;
dim3 blocks_per_grid (nb_blocks_x, nb_blocks_y);
mat_tmat_mul<<<blocks_per_grid, threads_per_block>>>(node->in1, node->in2, node->out1,
arows, acols, brows);
}
}
|
10,808 | #include "includes.h"
__global__ void kernel_cudaPrepareProjectionIndexes(char *d_v_is_projection, int *d_nearest_neighbour_indexes, int number_of_points)
{
int ind=blockIdx.x*blockDim.x+threadIdx.x;
if(ind<number_of_points)
{
if(d_v_is_projection[ind] == 0)
{
d_nearest_neighbour_indexes[ind] = -1;
}else
{
d_nearest_neighbour_indexes[ind] = ind;
}
}
} |
10,809 | /*
@Author: 3sne ( Mukur Panchani )
@FileName: q2MatrixSummer.cu
@Task: CUDA program compute sums of two matrices, using different parallelism techniques.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void addMatRowThreads(int *a, int *b, int *c, int m, int n) {
int id = threadIdx.x;
for ( int i = 0; i < n; i++ ) {
int ind = id * n + i;
c[ind] = a[ind] + b[ind];
}
}
__global__ void addMatColThreads(int *a, int *b, int *c, int m, int n) {
int id = threadIdx.x;
for ( int i = 0; i < m; i++ ) {
int ind = i * n + id;
c[ind] = a[ind] + b[ind];
}
}
__global__ void addMatElementThread(int *a, int *b, int *c, int m, int n) {
int ci = threadIdx.x;
int ri = threadIdx.y;
int id = ri * m + ci;
c[id] = a[id] + b[id];
}
int main() {
int *matA, *matB, *matC;
int *da, *db, *dc;
int m, n;
printf("== Enter Dimension of Matrix A and B (m x n) ==\n");
printf("m >> "); scanf("%d", &m);
printf("n >> "); scanf("%d", &n);
matA = (int*)malloc(sizeof(int) * m * n);
matB = (int*)malloc(sizeof(int) * m * n);
matC = (int*)malloc(sizeof(int) * m * n);
printf("== Matrix A Elements ==\n");
for(int i = 0; i < m * n; i++) {
scanf("%d", &matA[i]);
}
printf("== Matrix B Elements ==\n");
for(int i = 0; i < m * n; i++) {
scanf("%d", &matB[i]);
}
cudaMalloc((void **) &da, sizeof(int) * m * n);
cudaMalloc((void **) &db, sizeof(int) * m * n);
cudaMalloc((void **) &dc, sizeof(int) * m * n);
cudaMemcpy(da, matA, sizeof(int) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(db, matB, sizeof(int) * m * n, cudaMemcpyHostToDevice);
printf("\nChoose a degree of parallelism >> \n");
printf("1. Thread handles row\n");
printf("2. Thread handles column\n");
printf("3. Thread handles element\nChoice >> ");
int choice = 0;
scanf("%d", &choice);
dim3 block_conf (n, m);
switch(choice) {
case 1://Part A: 1 Thread handles 1 row >>
printf("Chose: Thread handles row\n");
addMatRowThreads<<<1,m>>>(da, db, dc, m, n);
break;
case 2://Part B: 1 Thread handles 1 column >>
printf("Chose: Thread handles column\n");
addMatColThreads<<<1,n>>>(da, db, dc, m, n);
break;
case 3://Part C: 1 Thread handles 1 element >>
printf("Chose: Thread handles element\n");
addMatElementThread<<<1, block_conf>>>(da, db, dc, m, n);
break;
default:
printf("Bad Option, exiting ...\n");
exit(EXIT_FAILURE);
break;
}
cudaMemcpy(matC, dc, sizeof(int) * m * n, cudaMemcpyDeviceToHost);
printf("== Matrix C Elements (computed by choice %d)==\n", choice);
for ( int i = 0; i < m; i++ ) {
for ( int j = 0; j < n; j++ ) {
printf("%d ", matC[i * n + j]);
}
printf("\n");
}
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(matA);
free(matB);
free(matC);
return 0;
} |
10,810 | #include<stdlib.h>
#include<stdio.h>
const int N = 32;
__global__ void mul(int* A, int* B, int* C){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int lig = blockIdx.y * blockDim.y + threadIdx.y;
int index = lig * N + col;
if (col < N && lig < N){
int inter = 0;
for (int i = 0; i<N; ++i){
inter += A[lig*N + i] * B[i*N + col];
}
C[index] = inter;
}
}
__host__ void affiche(int *A, int z){
for( int i=0;i<z;i++){
for (int j=0; j<z;j++){
printf(" %d ",A[i*z+j]);
}
printf("\n");
}
}
int main(void){
int *A, *B, *C, *da, *db, *dc;
int size = N * N * sizeof(int);
cudaMalloc((void **) & da, size);
cudaMalloc((void **) & db, size);
cudaMalloc((void **) & dc, size);
A = (int *)malloc(size);
B = (int *)malloc(size);
C = (int *)malloc(size);
for (int i=0; i<N * N; ++i){
A[i]=1; B[i]=1;
}
cudaMemcpy(da, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, B, size, cudaMemcpyHostToDevice);
//dim3 dimBlock(N, N);
dim3 dimGrid(N, N);
mul<<<dimGrid, dimGrid>>>(da, db, dc);
cudaMemcpy(C, dc, size, cudaMemcpyDeviceToHost);
affiche(C, N);
free(A); free(B); free(C);
cudaFree(da); cudaFree(db); cudaFree(dc);
return 0;
}
|
10,811 | #include <stdio.h>
__global__ void array_sum(double *x, double *y) {
int i=0;
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
x[i] -= y[i];
x[i] += y[i];
}
|
10,812 | #include "includes.h"
__global__ void reduce_v0(float* in,float* out, int n){
int tx = threadIdx.x;
int bx = blockIdx.x;
int BX = blockDim.x; //same as THEAD_MAX
int i = bx*BX+tx;
__shared__ float S[THEAD_MAX];
S[tx] = i < n ? in[i] : 0;
__syncthreads();
for(int s=1; s<BX ;s*=2){
if(tx%(2*s)==0)
S[tx] += S[tx+s];
__syncthreads();
}
if(tx==0)
out[bx] = S[0];
} |
10,813 | /*
*
* Carlos Roman Rivera - A01700820
*
* Programming Languages - Cuda Quiz
*
*/
#include <stdio.h>
#define N 9
#define K N/3
#define ThreadsPerBlock K
#define NumBlocks K
__global__ void compress(float *mat, int n, float *comp, int k){
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (row < k && col < k) {
comp[col + row * k] = 0;
for (int i_row = 0 ; i_row < k ; i_row++) {
for (int j_col = 0 ; j_col < k ; j_col++) {
comp[col + row * k] += mat[(col + j_col) + (row + i_row) * n];
}
}
}
}
void print_mat(float *mat, int n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%.1f\t", mat[i*n+j]);
}
printf("\n");
}
printf("\n");
}
void fill_mat(float *mat, int n){
int c = 0;
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
mat[i*n+j] = c++;
}
}
}
int main(){
float *h_compress, *h_matrix;
float *d_compress, *d_matrix;
h_compress = (float *)malloc(sizeof(float) * K * K);
h_matrix = (float *)malloc(sizeof(float) * N * N);
fill_mat(h_matrix, N);
fill_mat(h_compress, K);
printf("Input matrix:\n");
print_mat(h_matrix, N);
cudaMemcpy(d_matrix, h_matrix, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_compress, h_compress, sizeof(float) * K * K, cudaMemcpyHostToDevice);
dim3 Blocks(K,K);
dim3 Threads(K,K);
compress<<<Blocks, Threads>>>(d_matrix, N, d_compress, K);
cudaMemcpy(h_compress, d_compress, sizeof(float) * K * K, cudaMemcpyDeviceToHost);
printf("Compressed matrix:\n");
print_mat(h_compress, K);
free(h_matrix);
free(h_compress);
cudaFree(d_matrix);
cudaFree(d_compress);
}
|
10,814 | #include "includes.h"
__device__ double min2(double a, double b)
{
if (b < a) return b;
return a;
}
__device__ double max2(double a, double b)
{
if (b > a) return b;
return a;
}
__global__ void ConditionCFLKernel2D1 (double *Rsup, double *Rinf, double *Rmed, int nsec, int nrad, double *Vresidual, double *Vtheta, double *Vmoy, int FastTransport, double *SoundSpeed, double *Vrad, double *DT2D)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dxrad, dxtheta, invdt1, invdt2, invdt3, invdt4, dvr, dvt, dt;
if (i > 0 && i<nrad && j<nsec){
dxrad = Rsup[i]-Rinf[i];
dxtheta = Rmed[i]*2.0*PI/(double)nsec;
if (FastTransport) Vresidual[i*nsec + j] = Vtheta[i*nsec + j]-Vmoy[i]; /* Fargo algorithm */
else Vresidual[i*nsec + j] = Vtheta[i*nsec + j]; /* Standard algorithm */
//Vresidual[i*nsec + nsec] = Vresidual[i*nsec];
invdt1 = SoundSpeed[i*nsec + j]/(min2(dxrad,dxtheta));
invdt2 = fabs(Vrad[i*nsec + j])/dxrad;
invdt3 = fabs(Vresidual[i*nsec + j])/dxtheta;
dvr = Vrad[(i+1)*nsec + j]-Vrad[i*nsec + j];
dvt = Vtheta[i*nsec + (j+1)%nsec]-Vtheta[i*nsec + j];
if (dvr >= 0.0) dvr = 1e-10;
else dvr = -dvr;
if (dvt >= 0.0) dvt = 1e-10;
else dvt = -dvt;
invdt4 = max2(dvr/dxrad, dvt/dxtheta);
invdt4*= 4.0*CVNR*CVNR;
dt = CFLSECURITY/sqrt(invdt1*invdt1+invdt2*invdt2+invdt3*invdt3+invdt4*invdt4);
DT2D[i*nsec + j] = dt; // array nrad*nsec size dt
}
} |
10,815 | #include "scan.h"
__global__ void
scan_v1_kernel(float *d_output, float *d_input, int length)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
float element = 0.f;
for (int offset = 0; offset < length; offset++) {
if (idx - offset >= 0)
element += d_input[idx - offset];
}
d_output[idx] = element;
}
void scan_v1(float *d_output, float *d_input, int length)
{
dim3 dimBlock(BLOCK_DIM);
dim3 dimGrid((length + BLOCK_DIM - 1) / BLOCK_DIM);
scan_v1_kernel<<<dimGrid, dimBlock>>>(d_output, d_input, length);
} |
10,816 | #include <stdio.h>
struct City {
int x, y;
char name;
};
inline void GPUassert(cudaError_t code, char * file, int line, bool Abort=true)
{
if (code != 0) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),file,line);
if (Abort) exit(code);
}
}
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
#define NUMCITIES 9
#define CITYSIZE sizeof(struct City)
// #define CITYSIZE 1
__host__ __device__ void printCity(struct City c){
printf("[x=%i, y=%i]\n", c.x, c.y);
}
__host__ __device__ void swap(struct City *a, int x, int y){
struct City temp;
temp = a[x];
a[x] = a[y];
a[y] = temp;
}
__device__ double get_distance(struct City c1, struct City c2){
double x = double(c1.x - c2.x);
double y = double(c1.y - c2.y);
return sqrt((x*x) + (y*y));
}
__device__ long total_distance(struct City *path){
long distance = 0;
for(int i = 0; i < NUMCITIES - 1; i++){
distance += get_distance(path[i], path[i+1]);
}
return (long)distance;
}
__device__ void shortest_path(struct City *path){
int best_path_idx = 0;
for(int i = 0; i < NUMCITIES - 1; i++){
printCity(path[i]);
}
}
__device__ void print_path(struct City *path){
for(int i = 0; i < NUMCITIES; i++){
printf("%c>", path[i].name);
}
printf("\n");
}
__device__ void format_path(struct City *path, char *str){
for(int i = 0; i < NUMCITIES; i++){
*str = path[i].name;
str++;
*str = '>';
str++;
}
str--; *str = 0;
}
__device__ void permutations_kernel(struct City *a, char **paths, double *distances, int i, int length, int tid, int *count) {
if (length == i){
long distance = total_distance(a);
//format_path(a, paths[count[0]]);
count[0] = count[0] + 1;
} else {
for (int j = i; j < length; j++) {
swap(a, i, j);
// CUDA
// permutations(a, i+1, length, tid, count);
permutations_kernel(a, paths, distances, i+1, length, tid, count);
swap(a, i, j);
}
}
}
__global__ void permute_kernel(struct City *dev_cities, char **paths, double *distances, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int count[1];
count[0] = 0;
struct City local_array[NUMCITIES];
for (int i=0; i<size; i++){
local_array[i] = dev_cities[i];
}
//swap(local_array + threadIdx.x, local_array);
//swap(local_array, threadIdx.x, 0);
permutations_kernel(local_array, paths, distances, 0, NUMCITIES, tid, count);
}
long factorial(int i) {
long result = 1;
while(i > 0) {
result *= i;
i--;
}
return result;
}
int main(){
struct City host_cities[NUMCITIES];
for(int c = 0; c < NUMCITIES; c++){
host_cities[c].name = 'A' + c;
host_cities[c].x = rand() % 20 + 5;
host_cities[c].y = rand() % 20 + 5;
}
//char host_paths [ factorial(NUMCITIES) ][ NUMCITIES*NUMCITIES ];
char host_paths [0][0];
char **device_paths;
//double host_distances[ factorial(NUMCITIES) ];
double host_distances[0];
double *device_distances;
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
struct City *device_cities;
cudaMalloc((void**) &device_cities, sizeof(host_cities));
//cudaMalloc((void**) &device_paths, sizeof(host_paths));
cudaMalloc((void**) &device_paths, sizeof(char) * NUMCITIES * NUMCITIES * factorial(NUMCITIES));
cudaMalloc((void**) &device_distances, sizeof(host_distances));
GPUerrchk(cudaMemcpy(device_distances, host_distances, sizeof(host_distances), cudaMemcpyHostToDevice));
GPUerrchk(cudaMemcpy(device_cities, host_cities, sizeof(host_cities), cudaMemcpyHostToDevice));
//GPUerrchk(cudaMemcpy(device_paths, host_paths, sizeof(host_paths), cudaMemcpyHostToDevice));
GPUerrchk(cudaMemcpy(device_paths, host_paths, sizeof(char) * NUMCITIES * NUMCITIES * factorial(NUMCITIES), cudaMemcpyHostToDevice));
cudaEventRecord(start,0);
permute_kernel<<<1, NUMCITIES>>>(device_cities, device_paths, device_distances, NUMCITIES);
cudaEventRecord(stop,0);
GPUerrchk(cudaPeekAtLastError());
GPUerrchk(cudaDeviceSynchronize());
cudaEventElapsedTime( &time, start, stop );
printf("\nTiempo de Ejecucion: %f mSeg\n\n", time);
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaFree(device_cities);
return 0;
}
|
10,817 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<unistd.h>
#include<time.h>
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *rowptr)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int sum=0;
int i;
int colidx=tid/2;
// printf("\n tid=%d", tid);
// printf("\ncols[%d]=%d",colidx,cols[colidx]);
for(i=0;i<cols[colidx];i++)
{
sum += vec[sccol[rowptr[tid]+i]]*scval[rowptr[tid]+i];
if(tid==1)
{
printf("\n HAHAHAHA");
printf("\nrowptr[%d]=%d",tid, rowptr[tid]);
// printf("\ntid=%d, %d*%d=%d",tid, scval[rowptr[tid]+i],vec[sccol[rowptr[tid]+i]],vec[sccol[rowptr[tid]+i]]*scval[rowptr[tid]+i]);
// printf("\nsccol[%d]=%d",rowptr[tid]+i, sccol[rowptr[tid]+i]);
// printf("\nvec[%d]=%d",sccol[rowptr[tid]+i], vec[sccol[rowptr[tid]+i]]);
printf("\nSum=%d", sum);
}
printf("\n");
}
// __syncthreads();
result[tid]=sum;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
int counters=0;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d \t", matrix[counters]);
counters++;
}
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=6;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_rowptr;
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
//int val[10],col[10],row[10];
arr=fopen("mat.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vec.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
printf("\n Vector is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",vecX[i]);
}
//Reading the matrix
for(i=0;i<N;i++)
{
printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
//gettimeofday(&end, NULL);
//double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
//printf("\n scval[%d][%d]=%d",i,k,scval[i][k]);
//sleep(1);
sccol[i][k]=j;
//printf("\n sccol[%d][%d]=%d",i,k,sccol[i][k]);
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{ //printf("\nrow[%d] width=%d\n",i,maxrowwidth);
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
for(i=0;i<N-1;i++)
{
for(j=0;j<N-1;j++)
{
if(rowwidth[j]<rowwidth[j+1])
{ /*printf("\nrow %d width=%d",j,rowwidth[j]);
printf("\nscval[%d]=",j);
for(k=0;k<rowwidth[j];k++)
{
printf("%d ", scval[j][k]);
}
printf("\nscval[%d]=",j+1);
for(k=0;k<rowwidth[j+1];k++)
{
printf("%d ", scval[j+1][k]);
}
*/
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
if(i%2==1)
N=i+1;
else
N=i;
printf("\nmaxrowwidth=%d\n",maxrowwidth);
printmat(scval,N,N);
printtofile(scval,N,"scval.txt");
printtofile(sccol,N,"sccol.txt");
printf("\n Vector is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",vecX[i]);
}
//printmatscreen<<<1,1>>>(dev_b,N);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
int c=2;
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
printf("\n cols[%d]=%d",j,cols[j]);
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
printf("%d \t",varsccol[i*c+j][k]);
}
}
}
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* rowptr=Make1DIntArray(N+1);
rowptr[0]=0;
int countcols=0;
int z=0;
for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
countcols=0;
for (k=0;k<cols[i];k++)
{
scval_flat[counters]=varscval[i*c+j][k];
if (scval_flat[counters]!=0)
{
sccol_flat[counters]=varsccol[i*c+j][k];
}
counters=counters+1;
countcols=countcols+1;
}
rowptr[z+1]=rowptr[z]+countcols;
z=z+1;
}
}
printf("\n rowptrs:\n");
for(i=0;i<N;i++)
printf("%d ",rowptr[i]);
printf("\n");
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol, sizeof(int)*varsize);
cudaMalloc((void**)&dev_cols, sizeof(int)*(N/c));
cudaMalloc((void**)&dev_rowptr, sizeof(int)*N);
cudaMemcpy(dev_vec, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_result, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cols, cols, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaMemcpy(dev_rowptr, rowptr, sizeof(int)*N, cudaMemcpyHostToDevice);
printf("\nrowwidth:\n");
for (i=0;i<N;i++)
printf("%d ",cols[i]);
printf("\n");
printmatscreen<<<1,1>>>(dev_scval,varsize);
printmatscreen<<<1,1>>>(dev_sccol,varsize);
printmatscreen<<<1,1>>>(dev_cols,(N/c));
printmatscreen<<<1,1>>>(dev_rowptr,N);
//sleep(5);
multiply<<<N/c,c>>>(dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_rowptr);
//__syncthreads();
cudaMemcpy(result, dev_result, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\nrow[%d]=%d",i,rows[i]);
resultsordered[rows[i]]=result[i];
}
for (i=0;i<N;i++)
{
printf("\n%d",resultsordered[i]);
}
cudaFree(dev_vec);
cudaFree(dev_scval);
cudaFree(dev_result);
cudaFree(dev_sccol);
cudaFree(dev_cols);
return 0;
}
|
10,818 | #include "includes.h"
/*******************************************************************************
*
*******************************************************************************/
/*************************************************************************
/*************************************************************************/
/*************************************************************************/
__global__ void drawGray(unsigned char* optr, const float* outSrc) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float val = outSrc[offset];
val = (val / 50.0) + 0.5; //get {-25 to 25} range into {0 to 1} range
if (val < 0) val = 0;
if (val > 1) val = 1;
optr[offset * 4 + 0] = 255 * val; // red
optr[offset * 4 + 1] = 255 * val; // green
optr[offset * 4 + 2] = 255 * val; // blue
optr[offset * 4 + 3] = 255; // alpha (opacity)
} |
10,819 | #include "includes.h"
__global__ void cumulativeOffspringToAncestorKernel(const int* cumulativeOffspring, int* ancestor, int numParticles) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= numParticles || idx < 0) return;
int start = idx == 0 ? 0 : cumulativeOffspring[idx - 1];
int numCurrentOffspring = cumulativeOffspring[idx] - start;
for(int j = 0; j < numCurrentOffspring; j++)
ancestor[start+j] = idx;
} |
10,820 | #include "includes.h"
__global__ void UseForceKernel( float *force, float forceFactor, float *pointsCoordinates, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * 3)
{
pointsCoordinates[threadId] += forceFactor * force[threadId];
}
} |
10,821 | #include "includes.h"
// wrapper pour une option d'achat
__global__ void mc_kernel_call(float * d_s, float T, float K, float S0, float sigma, float mu, float r, float dt, float * d_normals, unsigned N_STEPS, unsigned N_PATHS)
{
const unsigned tid = threadIdx.x; // id du thread dans le bloc
const unsigned bid = blockIdx.x; // id du bloc
const unsigned bsz = blockDim.x; // taille du bloc
int s_idx = tid + bid * bsz;
int n_idx = tid + bid * bsz;
float s_curr = S0;
if (s_idx < N_PATHS) {
int n = 0;
do {
s_curr = s_curr + mu*s_curr*dt + sigma*s_curr*d_normals[n_idx];
n_idx++;
n++;
} while (n < N_STEPS);
double payoff = (s_curr>K ? s_curr - K : 0.0);
__syncthreads(); // on attend que tous les threads aient fini avant de passer à la prochaine simulation
d_s[s_idx] = exp(-r*T) * payoff;
}
} |
10,822 | #include "includes.h"
__global__ void MotionVec(float *new_image_dev, float *old_image_dev, uchar4 *Image_dev, int w, int h )
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
float diff = 0;
diff = old_image_dev[w*iy + ix] - new_image_dev[w*iy + ix];
diff *= diff;
float threshold = 5000;
if (diff > threshold)
{
Image_dev[w*iy + ix].x = 0; //B /* MODIFY CODE HERE*/
Image_dev[w*iy + ix].y = 0; //G /* MODIFY CODE HERE*/
Image_dev[w*iy + ix].z = 255; //R /* MODIFY CODE HERE*/
}
} |
10,823 | #include "includes.h"
__global__ void polynomial_expansion (float* poly, int degree, int n, float* array) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < n )
{
float out = 0.0;
float xtothepowerof = 1.0;
for ( int x = 0; x <= degree; ++x)
{
out += xtothepowerof * poly[x];
xtothepowerof *= array[index];
}
array[index] = out;
}
} |
10,824 | #include "includes.h"
__global__ void FillOnes(float *vec, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = 1.0f;
} |
10,825 | #include "includes.h"
__global__ void sumArraysZeroCopy(int *A, int *B, int *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
} |
10,826 | #include <stdio.h>
__global__ void kernel(int* a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int answer = idx;
int i = 0;
if (answer != 0) {
while (answer != 1) {
if (answer % 2 == 0) { answer /= 2; }
else { answer = 3 * answer + 1; }
i++;
}
}
a[idx] = i;
}
int main() {
int dimx = 3907*256;
int num_bytes = dimx * sizeof(int);
int *d_a = 0, *h_a = 0;
h_a = (int*)malloc(num_bytes);
cudaMalloc((void**)&d_a, num_bytes);
if (0==h_a || 0==d_a) {
printf("can't allocate memory");
}
cudaMemset(d_a, 0, num_bytes);
cudaMemcpy(d_a, h_a, num_bytes, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel<<<3907, 256>>>(d_a);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
cudaEventDestroy(start); cudaEventDestroy(stop);
printf("kernel execution time: %8.6fms\n", et);
cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost);
int max = 0;
for(int i=0; i<dimx; i++) {
// printf("%d ", h_a[i]);
if (h_a[i] > max) max = h_a[i];
}
printf("max is %d\n", max);
free(h_a);
cudaFree(d_a);
return 0;
}
|
10,827 | /*
Erick Juarez
CPSC 479 Sec 1
HOMEWORK 6 - 4/20/20
tested using nvcc - CUDA compiler driver release 9.0, V9.0.176
*/
#include <stdio.h>
#include <cuda.h>
// device function: perform calculations to square a matrix
__global__ void square(int *matrix, int *result, int matrix_size){
// solves 1 row per thread
int row_id = threadIdx.x;
for (int col_ix = 0; col_ix < matrix_size; col_ix++){
for (int row_ix = 0; row_ix < matrix_size; row_ix++){
result[row_id * matrix_size + col_ix] += matrix[row_id * matrix_size + row_ix] * matrix[row_ix * matrix_size + col_ix];
}
}
}
// device function is used to initalize both matrices in parallel
__global__ void init(int * matrix, int * result){
result[threadIdx.x] = 0;
matrix[threadIdx.x] = threadIdx.x +1;
}
// Main program: initialization
int main(int argc, char* argv[]){
const int RLEN = 32; // matrix of size RLEN x RLEN (matrix has to be a sqaure)
const int MSIZE = RLEN * RLEN; // total number of elements in the matrix
// Allocate memory on host for inpaut and output
int *h_matrix, *h_result;
// allocate memory on device for copy of input and output
int *d_matrix, *d_result;
int byte_size = MSIZE * sizeof(int);
dim3 b_dim(MSIZE);
cudaMalloc((void**) &d_matrix, byte_size);
cudaMalloc((void**) &d_result, byte_size);
// initialize device matrix and result matrix. Then square original matrix and place result in result matrix
init<<<1, b_dim>>>(d_matrix, d_result);
square<<<1, b_dim>>>(d_matrix, d_result, RLEN);
// copy device ouput to host and cleanup
h_matrix = (int *) malloc(byte_size);
h_result = (int *) malloc(byte_size);
cudaMemcpy(h_matrix, d_matrix, byte_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_result, d_result, byte_size, cudaMemcpyDeviceToHost);
cudaFree(d_matrix);
cudaFree(d_result);
// Print results
printf("==========Original Matrix==========\n");
for (int i = 0; i < RLEN; i++){
for (int k = 0; k < RLEN; k++){
printf("[%d] ", h_matrix[RLEN * i + k]);
}
printf("\n");
}
printf("==========Squared Matrix==========\n");
for (int i = 0; i < RLEN; i++){
for (int k = 0; k < RLEN; k++){
printf("[%d] ", h_result[RLEN * i + k]);
}
printf("\n");
}
free(h_matrix);
free(h_result);
return 0;
} |
10,828 | #include <iostream>
#include <fstream>
#include <math.h>
#include <string>
#include <ctime>
#include <cstdlib>
#include <cuda.h>
#include <stdio.h>
const int n_x = 784; //Rozmiar warstwy wejsciowej, jednoczesnie rozmiar badanych obrazow (28x28)
const int n_h = 7; //Ilosc neuronow w warstwie ukrytej
const int n_y = 1; //Ilosc neuronow na wyjsciu sieci
float learning_rate = 0.075; //Predkosc uczenia
const int train_samples = 209; //Ilosc obrazow przechodzacych przez siec w fazie treningu
const int test_samples = 50; //Ilosc obrazow przechodzacych przez siec w fazie testu
int num_samples = train_samples; //Aktualna ilosc obrazow przechodzacych przez siec
const int iter_num = 100; //Ilosc przejsc zestawu treningowego przez siec
const int print_freq = 20; //Czestotliwosc wyswietlania wartosci funkcji kosztu
//Struktura przechowujaca poszczegolne gradienty
struct grad_data {
float dW1[n_h][n_x];
float dW2[n_y][n_h];
float db1;
float db2;
float dA0[n_x][train_samples];
float dA1[n_h][train_samples];
float dA2[n_y][train_samples];
float dZ1[n_h][train_samples];
float dZ2[n_y][train_samples];
};
//Struktura przechowujaca parametry oraz wyjscia poszczegolnych warstw
struct param_data {
float train_x[n_x][train_samples];
float test_x[n_x][test_samples];
float train_y[n_y][train_samples];
float test_y[n_y][test_samples];
float W1[n_h][n_x];
float W2[n_y][n_h];
float b1;
float b2;
float A1[n_h][train_samples];
float A2[n_y][train_samples];
float Z1[n_h][train_samples];
float Z2[n_y][train_samples];
//Tablice pomocnicze
float AT0[train_samples][n_x];
float AT1[train_samples][n_h];
float WT1[n_x][n_h];
float WT2[n_h][n_y];
};
//Funkcje obslugujace struktury
void load_data(param_data&, grad_data&);
void delete_data(param_data&, grad_data&);
//Funkcja obliczajaca Z1 oraz dW1
__global__ void kernel(float *arr1,float *arr2, float *arr_out, float *b, int *size_1, int *size_2)
{
int x = blockIdx.x;
int y = blockIdx.y;
if((*size_1) == train_samples) { //Wtedy liczone jest dW1
arr_out[x + (*size_2)*y] = 0;
for(int i = 0; i < (*size_1) ; i++)
arr_out[x +(*size_2)*y] += (arr1[i + (*size_1)*y] * arr2[x + (*size_2)*i]) / train_samples;
} else { //Wtedy liczone jest Z1
arr_out[x + (*size_2)*y] = *b;
for(int i = 0; i < (*size_1) ; i++)
arr_out[x +(*size_2)*y] += (arr1[i + (*size_1)*y] * arr2[x + (*size_2)*i]);
}
}
//Funkcja aktywacji warstwy wyjsciowej (Sigmoid)
void sigmoid(param_data ¶meters) {
int rows = 1;
int cols = num_samples;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
parameters.A2[i][j] = 1 / (1 + exp(-parameters.Z2[i][j]));
}
}
}
//Funkcja aktywacji warstwy ukrytej (ReLu)
void relu(param_data ¶meters) {
int rows = n_h;
int cols = num_samples;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if (parameters.Z1[i][j] <= 0) parameters.A1[i][j] = 0;
else parameters.A1[i][j] = parameters.Z1[i][j];
}
}
}
//Funkcja wyznaczajaca dZ1 za pomoca pochodnej z nieliniowej funkcji aktywacji (ReLu) oraz dA1
void relu_backward(param_data ¶meters, grad_data &grads) {
int rows = n_h;
int cols = num_samples;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if (parameters.Z1[i][j] <= 0) grads.dZ1[i][j] = 0;
else grads.dZ1[i][j] = grads.dA1[i][j];
}
}
}
//Funkcja wyznaczajaca dZ2 za pomocą pochodnej z nieliniowej funkcji aktywacji (Sigmoid) oraz dA2
void sigmoid_backward(param_data ¶meters, grad_data &grads) {
int rows = 1;
int cols = num_samples;
float s;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
s = 1 / (1 + exp(-parameters.Z2[i][j]));
grads.dZ2[i][j] = grads.dA2[i][j] * s * (1 - s);
}
}
}
//Z1 = W1*X + b1 <-- liczone na GPU
void linear_forward_relu(param_data ¶meters) {
int rows_w = n_h;
int cols_w = n_x;
int rows_z = n_h;
int cols_z = num_samples;
float *arr1,*arr2,*arr_out, *b;
int *size_1, *size_2;
cudaMalloc((void **)&arr1, rows_w*cols_w*sizeof(float));
cudaMalloc((void **)&arr2, cols_w*cols_z*sizeof(float));
cudaMalloc((void **)&arr_out, rows_z*cols_z*sizeof(float));
cudaMalloc((void **)&b, sizeof(float));
cudaMalloc((void **)&size_1, sizeof(int));
cudaMalloc((void **)&size_2, sizeof(int));
cudaMemcpy(arr1,parameters.W1, rows_w*cols_w*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(arr2,parameters.train_x, n_x*cols_z*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b,¶meters.b1, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(size_1,&n_x, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(size_2,&num_samples, sizeof(int), cudaMemcpyHostToDevice);
dim3 grid(cols_z,rows_z);
kernel<<<grid,1>>>(arr1,arr2,arr_out,b,size_1,size_2);
//cudaDeviceSynchronize();
cudaMemcpy(parameters.Z1,arr_out,rows_z*cols_z*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(arr1);
cudaFree(arr2);
cudaFree(arr_out);
cudaFree(b);
cudaFree(size_1);
cudaFree(size_2);
// Alternatywny kod na CPU:
// for (int i_z = 0; i_z < rows_z; i_z++) {
// for (int j_z = 0; j_z < cols_z; j_z++) {
// for (int j_w = 0; j_w < cols_w; j_w++) {
// parameters.Z1[i_z][j_z] += parameters.W1[i_z][j_w] * parameters.train_x[j_w][j_z];
// }
// parameters.Z1[i_z][j_z] += parameters.b1;
// }
// }
}
//Z2 = W2*A1 + b2
void linear_forward_sigm(param_data ¶meters) {
int rows_w = n_y;
int cols_w = n_h;
int rows_z = n_y;
int cols_z = num_samples;
for (int i_z = 0; i_z < rows_z; i_z++) {
for (int j_z = 0; j_z < cols_z; j_z++) {
for (int j_w = 0; j_w < cols_w; j_w++) {
parameters.Z2[i_z][j_z] += parameters.W2[i_z][j_w] * parameters.A1[j_w][j_z];
}
parameters.Z2[i_z][j_z] += parameters.b2;
}
}
}
//Funkcja wybierajaca tryb aktywacji
void linear_activation_forward(param_data ¶meters, std::string activation) {
if (activation.compare("sigmoid") == 0) {
linear_forward_sigm(parameters);
sigmoid(parameters);
}
else {
linear_forward_relu(parameters);
relu(parameters);
}
}
//Funkcja obliczajaca wartosc kosztu po pojedynczym przejsciu zestawu treningowego przez siec
float compute_cost(param_data ¶meters) {
float cost = 0;
float m = train_samples;
for (int i = 0; i < m; i++) {
cost += (-1 / m) * ( parameters.train_y[0][i] * log(parameters.A2[0][i]) + (1 - parameters.train_y[0][i]) * log(1 - parameters.A2[0][i]));
}
return cost;
}
//dW1 = (dZ1 * X.T) / train_samples <-- liczone na GPU
//dA0 = (W1).T * dZ1 <-- nie musi być liczone
//db1 = sum(dZ1) / train_samples <-- liczone na CPU
void linear_backward_relu(param_data ¶meters, grad_data &grads) {
int rows_dw1 = n_h;
int cols_dw1 = n_x;
int rows_dz1 = n_h;
int cols_dz1 = train_samples;
int rows_da0 = n_x;
int cols_da0 = train_samples;
int cols_wt1 = n_h;
for (int i = 0; i < rows_da0; i++) {
for (int j = 0; j < cols_da0; j++) {
parameters.AT0[j][i] = parameters.train_x[i][j];
}
}
for (int i = 0; i < rows_dw1; i++) {
for (int j = 0; j < cols_dw1; j++) {
parameters.WT1[j][i] = parameters.W1[i][j];
}
}
for (int i = 0; i < rows_dz1; i++) {
for (int j = 0; j < cols_dz1; j++) {
grads.db1 += grads.dZ1[i][j] / train_samples;
}
}
float *arr1,*arr2,*arr_out, *b;
int *size_1, *size_2;
cudaMalloc((void **)&arr1, rows_dz1*cols_dz1*sizeof(float));
cudaMalloc((void **)&arr2, cols_da0*rows_da0*sizeof(float));
cudaMalloc((void **)&arr_out, rows_dw1*cols_dw1*sizeof(float));
cudaMalloc((void **)&b, sizeof(float));
cudaMalloc((void **)&size_1, sizeof(int));
cudaMalloc((void **)&size_2, sizeof(int));
cudaMemcpy(arr1, grads.dZ1, rows_dz1*cols_dz1*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(arr2, parameters.AT0, cols_da0*rows_da0*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b, ¶meters.b1, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(size_1, &train_samples, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(size_2, &n_x, sizeof(int), cudaMemcpyHostToDevice);
dim3 grid(cols_dw1,rows_dw1);
kernel<<<grid,1>>>(arr1,arr2,arr_out,b,size_1,size_2);
// cudaDeviceSynchronize();
cudaMemcpy(grads.dW1,arr_out,rows_dw1*cols_dw1*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(arr1);
cudaFree(arr2);
cudaFree(arr_out);
cudaFree(b);
cudaFree(size_1);
cudaFree(size_2);
//Alternatywna wersja na CPU:
// for (int i_dw1 = 0; i_dw1 < rows_dw1; i_dw1++) {
// for (int j_dw1 = 0; j_dw1 < cols_dw1; j_dw1++) {
// for (int j_dz1 = 0; j_dz1 < cols_dz1; j_dz1++) {
// grads.dW1[i_dw1][j_dw1] += (grads.dZ1[i_dw1][j_dz1] * parameters.AT0[j_dz1][j_dw1]) / float(train_samples);
// }
// }
// }
// for (int i_da0 = 0; i_da0 < rows_da0; i_da0++) {
// for (int j_da0 = 0; j_da0 < cols_da0; j_da0++) {
// for (int j_wt1 = 0; j_wt1 < rows_dz1; j_wt1++) {
// grads.dA0[i_da0][j_da0] += parameters.WT1[i_da0][j_wt1] * grads.dZ1[j_wt1][j_da0];
// }
// }
// }
}
//dW2 = dZ2 * (A1).T / train_samples
//dA1 = (W2).T * dZ2 <-- Wszystko liczone na CPU
//db2 = sum(dZ2) /train_samples
void linear_backward_sigm(param_data ¶meters, grad_data &grads) {
int rows_dw2 = n_y;
int cols_dw2 = n_h;
int rows_dz2 = n_y;
int cols_dz2 = train_samples;
int rows_da1 = n_h;
int cols_da1 = train_samples;
int cols_wt2 = n_y;
for (int i = 0; i < rows_da1; i++) {
for (int j = 0; j < cols_da1; j++) {
parameters.AT1[j][i] = parameters.A1[i][j];
}
}
for (int i = 0; i < rows_dw2; i++) {
for (int j = 0; j < cols_dw2; j++) {
parameters.WT2[j][i] = parameters.W2[i][j];
}
}
for (int i = 0; i < rows_dz2; i++) {
for (int j = 0; j < cols_dz2; j++) {
grads.db2 += grads.dZ2[i][j] / train_samples;
}
}
for (int i_dw2 = 0; i_dw2 < rows_dw2; i_dw2++) {
for (int j_dw2 = 0; j_dw2 < cols_dw2; j_dw2++) {
for (int j_dz2 = 0; j_dz2 < cols_dz2; j_dz2++) {
grads.dW2[i_dw2][j_dw2] += (grads.dZ2[i_dw2][j_dz2] * parameters.AT1[j_dz2][j_dw2]) / train_samples;
}
}
}
for (int i_da1 = 0; i_da1 < rows_da1; i_da1++) {
for (int j_da1 = 0; j_da1 < cols_da1; j_da1++) {
for (int j_wt2 = 0; j_wt2 < cols_wt2; j_wt2++) {
grads.dA1[i_da1][j_da1] += parameters.WT2[i_da1][j_wt2] * grads.dZ2[j_wt2][j_da1];
}
}
}
}
//Funkcja wybierajaca tryb obliczania gradientow
void linear_activation_backward(param_data ¶meters, grad_data &grads, std::string activation) {
if (activation.compare("relu") == 0) {
relu_backward(parameters, grads);
linear_backward_relu(parameters, grads);
}
else {
sigmoid_backward(parameters, grads);
linear_backward_sigm(parameters, grads);
}
}
//Aktualizowanie parametrów sieci po jednej iteracji
void update_parameters(param_data ¶meters, grad_data &grads) {
int rows_W1 = n_h;
int cols_W1 = n_x;
int rows_W2 = n_y;
int cols_W2 = n_h;
for (int i = 0; i < rows_W1; i++) {
for (int j = 0; j < cols_W1; j++) {
parameters.W1[i][j] -= learning_rate * grads.dW1[i][j];
}
}
for (int i = 0; i < rows_W2; i++) {
for (int j = 0; j < cols_W2; j++) {
parameters.W2[i][j] -= learning_rate * grads.dW2[i][j];
}
}
parameters.b1 -= learning_rate * grads.db1;
parameters.b2 -= learning_rate * grads.db2;
}
//Glowna funkcja przechodzaca przez siec
void two_layer_model(param_data ¶meters, grad_data &grads) {
float cost = 0;
for (int i = 0; i < iter_num + 1; i++) {
delete_data(parameters, grads);
linear_activation_forward(parameters, "relu");
linear_activation_forward(parameters, "sigmoid");
cost = compute_cost(parameters);
for (int j = 0; j < train_samples; j++) {
grads.dA2[0][j] = -((parameters.train_y[0][j] / parameters.A2[0][j]) - ((1. - parameters.train_y[0][j]) / (1. - parameters.A2[0][j])));
}
linear_activation_backward(parameters, grads, "sigmoid");
linear_activation_backward(parameters, grads, "relu");
update_parameters(parameters, grads);
if (i % print_freq == 0) {
std::cout << "Koszt po iteracji " << i << ": " << cost << "\n\n";
}
}
}
//Sprawdzanie skutecznosci sieci
void accuracy_check_train(param_data ¶meters){
float accuracy = 0;
linear_activation_forward(parameters, "relu");
linear_activation_forward(parameters, "sigmoid");
for (int j = 0; j < train_samples; j++) {
if(parameters.A2[0][j] >= 0.5 && parameters.train_y[0][j] == 1) accuracy += 1;
else if(parameters.A2[0][j] < 0.5 && parameters.train_y[0][j] == 0) accuracy += 1;
}
std::cout << "Accuracy (training): " << accuracy / train_samples << "\n";
num_samples = test_samples;
accuracy = 0;
linear_activation_forward(parameters, "relu");
linear_activation_forward(parameters, "sigmoid");
for (int j = 0; j < test_samples; j++) {
if(parameters.A2[0][j] >= 0.5 && parameters.test_y[0][j] == 1) accuracy += 1;
else if(parameters.A2[0][j] < 0.5 && parameters.test_y[0][j] == 0) accuracy += 1;
}
std::cout << "Accuracy (test): " << accuracy / test_samples << "\n";
}
int main() {
param_data parameters;
grad_data grads;
load_data(parameters, grads);
two_layer_model(parameters, grads);
accuracy_check_train(parameters);
return 0;
}
void load_data(param_data ¶meters, grad_data &grads) {
srand(time(NULL));
std::cout << "Ladowanie zestawu treningowego i testowego.\n";
std::string path = "train_x.txt";
std::ifstream input(path.c_str());
for (int i = 0; i < n_x; i++)
for (int j = 0; j < train_samples; j++) input >> parameters.train_x[i][j];
path = "test_x.txt";
std::ifstream input2(path.c_str());
for (int i = 0; i < n_x; i++)
for (int j = 0; j < test_samples; j++) input2 >> parameters.test_x[i][j];
std::cout << "Wczytano zestaw treningowy i testowy.\n";
path = "train_y.txt";
std::ifstream input3(path.c_str());
for (int j = 0; j < train_samples; j++) input3 >> parameters.train_y[0][j];
path = "test_y.txt";
std::ifstream input4(path.c_str());
for (int j = 0; j < test_samples; j++) input4 >> parameters.test_y[0][j];
std::cout << "Wczytano zestaw klas.\n";
for (int i = 0; i < n_h; i++)
for (int j = 0; j < n_x; j++) parameters.W1[i][j] = (rand()%10000 - 5000) * 0.000001;
for (int i = 0; i < n_y; i++)
for (int j = 0; j < n_h; j++) parameters.W2[i][j] = (rand()%10000 - 5000) * 0.000001;
parameters.b1 = 0;
parameters.b2 = 0;
grads.db1 = 0;
grads.db2 = 0;
for (int i = 0; i < n_h; i++)
for (int j = 0; j < train_samples; j++) parameters.Z1[i][j] = 0;
for (int i = 0; i < n_y; i++)
for (int j = 0; j < train_samples; j++) parameters.Z2[i][j] = 0;
}
void delete_data(param_data& parameters, grad_data& grads) {
for (int i = 0; i < n_h; i++)
for (int j = 0; j < train_samples; j++)
parameters.Z1[i][j] = 0;
for (int i = 0; i < n_y; i++)
for (int j = 0; j < train_samples; j++)
parameters.Z2[i][j] = 0;
for (int i = 0; i < n_h; i++)
for (int j = 0; j < train_samples; j++)
parameters.A1[i][j] = 0;
for (int i = 0; i < n_y; i++)
for (int j = 0; j < train_samples; j++)
parameters.A2[i][j] = 0;
for (int i = 0; i < n_h; i++)
for (int j = 0; j < n_x; j++)
grads.dW1[i][j] = 0;
for (int i = 0; i < n_y; i++)
for (int j = 0; j < n_h; j++)
grads.dW2[i][j] = 0;
for (int i = 0; i < n_h; i++)
for (int j = 0; j < train_samples; j++)
grads.dA1[i][j] = 0;
for (int i = 0; i < n_y; i++)
for (int j = 0; j < train_samples; j++)
grads.dA2[i][j] = 0;
for (int i = 0; i < n_h; i++)
for (int j = 0; j < train_samples; j++)
grads.dZ1[i][j] = 0;
for (int i = 0; i < n_y; i++)
for (int j = 0; j < train_samples; j++)
grads.dZ2[i][j] = 0;
for (int i = 0; i < n_x; i++)
for (int j = 0; j < train_samples; j++)
grads.dA0[i][j] = 0;
grads.db1 = 0;
grads.db2 = 0;
}
|
10,829 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define D 1000
#define re -0.5
#define im 0.45
#define scale 1.5
__device__ int julia( float x, float y){
float xj = scale * (float)(D/2 - x)/(D/2);
float yj = scale * (float)(D/2 - y)/(D/2);
for( int i=0; i<200; ++i){
float a = xj;
float b = yj;
xj = a*a-b*b+re;
yj = 2*a*b+im;
}
if( xj*xj + yj*yj < 4)
return 1;
else
return 0;
}
__global__ void generuj( int * picture ){
//sprawdz czy pkt nalezy do zbioru julii
int i = blockIdx.x;
int j = threadIdx.x;
if( julia(i, j ) )
picture[ i * D + j ] = 1;
else
picture[ i * D + j ] = 0;
}
int main()
{
FILE *fp;
if ((fp=fopen("obraz.pbm", "w"))==NULL) {
printf ("Nie mogê otworzyæ pliku test.txt do zapisu!\n");
exit(1);
}
fprintf( fp, "P1\n%d %d\n", D, D);
//deklarujê tablicê na karcie graficznej
int * dev_obraz;
cudaMalloc( &dev_obraz, sizeof(int) * D *D );
printf("udalo sie zaalokowac na karcie graficznej\n\n\n");
//generacja obrazu
generuj <<< D, D >>> ( dev_obraz );
printf("funkcja zakonczyla dzialanie\n\n\n");
//skopiowanie obrazu z karty graficznej
int ** obraz;
obraz = (int **) malloc( sizeof(int*)*D );
for( int i=0; i<D; ++i){
obraz[i] = (int *) malloc( sizeof(int)*D );
cudaMemcpy( obraz[i], dev_obraz+i*D, sizeof(int)*D, cudaMemcpyDeviceToHost);
}
printf("skopiowano z karty graficznej\n\n\n");
//zapisanie obrazu w formie pbm (P1)
for(int i=0; i<D; ++i){
for(int j=0; j<D; ++j)
fprintf(fp, "%d", obraz[i][j]);
fprintf(fp, "\n");
}
fclose(fp);
return 0;
}
|
10,830 | #include "includes.h"
__global__ void rgb2gray(unsigned char* d_Pin, unsigned char* d_Pout, int width, int height) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if((Row < height) && (Col < width)) {
d_Pout[Row*width+Col] = d_Pin[(Row*width+Col)*3+BLUE]*0.114 + d_Pin[(Row*width+Col)*3+GREEN]*0.587 + d_Pin[(Row*width+Col)*3+RED]*0.299;
}
} |
10,831 | #include "includes.h"
#define N 1000 // size of vectors
#define T 10000// number of threads per block
__global__ void vecAdd(int *A, int *B, int *C) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
C[i] = A[i] * 10 + B[i];
} |
10,832 | // fermi
// Avoid mangling of function names
extern "C" {
__global__ void kmeans(int npoints, int nclusters, int nfeatures, const float* points, const float* clusters, int* pointsCluster);
}
__global__ void kmeans(int npoints, int nclusters, int nfeatures,
const float* points, const float* clusters, int* pointsCluster) {
const int ttpid = threadIdx.x;
const int wtpid = threadIdx.y;
const int bpid = blockIdx.x;
int ind = 0;
float min_dist = 3.0E+38;
for (int cluster = 0; cluster < nclusters; cluster++) {
float dist = 0;
for (int feature = 0; feature < nfeatures; feature++) {
float diff = points[1024 * bpid + (32 * wtpid + ttpid) + feature * npoints] - clusters[feature + cluster * nfeatures];
dist = dist + diff * diff;
}
if (dist < min_dist) {
min_dist = dist;
ind = cluster;
}
}
pointsCluster[1024 * bpid + (32 * wtpid + ttpid)] = ind;
}
|
10,833 | // incrementArray.cu
#include <stdio.h>
#include <time.h>
#include <sys/types.h>
#include <unistd.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
void printarray(float *a, int n)
{
int i = 0;
for (i = 0; i < n; i++) printf("%f ", a[i]);
printf("\n");
}
// http://www.concentric.net/~Ttwang/tech/inthash.htm
unsigned long mix(unsigned long a, unsigned long b, unsigned long c)
{
a=a-b; a=a-c; a=a^(c >> 13);
b=b-c; b=b-a; b=b^(a << 8);
c=c-a; c=c-b; c=c^(b >> 13);
a=a-b; a=a-c; a=a^(c >> 12);
b=b-c; b=b-a; b=b^(a << 16);
c=c-a; c=c-b; c=c^(b >> 5);
a=a-b; a=a-c; a=a^(c >> 3);
b=b-c; b=b-a; b=b^(a << 10);
c=c-a; c=c-b; c=c^(b >> 15);
return c;
}
int main(int argc, char** argv)
{
// program args
if (argc < 2) {
printf("usage: incrementArrayRandom [max_size] [repetitions]\n");
return EXIT_SUCCESS;
}
int max_size = atoi(argv[1]);
int repetitions = atoi(argv[2]);
// randomize within same run
srand(mix(clock(), time(NULL), getpid()));
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, epoch = 0;
int N = 0;
int total_success = 0;
for (epoch = 0; epoch < repetitions; epoch++) {
N = rand() % max_size;
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
cudaMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
// printarray(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve result from device and store in b_h
cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// check results
// printarray(b_h, N);
int success = 1;
for (i=0; i<N; i++) {
if (a_h[i] != b_h[i]) {
success = 0;
break;
}
}
printf("epoch %d a[%d] = %s\n", epoch, N, (success == 1) ? "true" : "false");
if (success == 1) total_success += 1;
}
printf("\nsuccess rate: %f%%\n", total_success / ((float)repetitions) * 100.0);
// cleanup
free(a_h); free(b_h); cudaFree(a_d);
return EXIT_SUCCESS;
}
|
10,834 | #include <iostream>
#include <cmath>
#include <chrono>
__global__ void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
{
y[i]+= x[i];
}
}
int main()
{
int N = 1 << 20;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
auto start = std::chrono::high_resolution_clock::now();
add<<<1,1>>>(N, x, y);
cudaDeviceSynchronize();
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
float max_err = 0.0f;
for (int i = 0; i < N; i++)
{
max_err = std::fmax(max_err, std::fabs(y[i]-3.0f));
}
std::cout << duration.count() << " microseconds" << std::endl;
std::cout << "Max error: " << max_err << std::endl;
cudaFree(x);
cudaFree(y);
} |
10,835 | __global__ void tanh_kernel(float* Y,
int batch_size,
int num){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float tp;
// load to register and then do tanh
tp = Y[idx];
Y[idx] = tanh(tp);
}
void launch_tanh(float* Y,
int batch_size,
int num){
dim3 gridSize((batch_size * num + 1023)/ 1024);
dim3 blockSize(1024);
tanh_kernel<<<gridSize, blockSize>>>(Y, batch_size, num);
} |
10,836 | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
/*
Globale Variablen stehen in allen Funktionen zur Verfuegung.
Achtung: Das gilt *nicht* fuer Kernel-Funktionen!
*/
int Nx, Ny, N, npts;
int *active;
/*
* Dieses Beispiel demonstriert die Addition zweier Arrays.
* addArrayGPU soll die Arbeit ueber CUDA Threads auf der GPU verteilen.
* addArrayHost iteriert sequentiell durch die Vektorelemente auf dem Host.
*/
// Macro zur Fehlerauswertung
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
/*
Fuer die Koordinaten:
i = 0,1,...,Nx+1
j = 0,1,...,Ny+1
wird der fortlaufenden Index berechnet
*/
int coord2index(int i, int j)
{
return j*(Nx+2) + i;
}
/*
Das Flag-Array der aktiven/inneren Punkte wird gesetzt.
*/
void active_pts()
{
int idx,i,j;
active=(int*)malloc(npts*sizeof(int));
idx=0; // fortlaufender Index
for (j=0; j<Ny+2; j++)
{
for (i=0; i<Nx+2; i++)
{
if ((i==0)||(j==0)||(i==Nx+1)||(j==Ny+1))
active[idx]=0; // Randpunkt
else
active[idx]=1; // innerer Punkt
idx+=1;
}
}
}
/*
Der Vektor p wird im Inneren auf zufaellige Werte gesetzt
*/
void random_vector(float *p)
{
int idx;
for(idx = 0; idx < npts; idx++)
{
if (active[idx])
p[idx] = (float)(rand() & 0xFF ) / 10.0;
}
}
/*
Das Flag-Array der aktiven/inneren Punkte wird als
2D Gitter ausgegeben.
*/
void print_active()
{
int i,j,idx;
printf("active points:\n");
idx=0;
for (j=0; j<Ny+2; j++)
{
printf(" ");
for (i=0; i<Nx+2; i++)
{
printf("%d ",active[idx]);
idx+=1;
}
printf("\n");
}
}
/*
Norm-Quadrat vom Vektor v.
*/
float norm_sqr(float *v)
{
int idx;
float r=0.0;
for (idx=0; idx<npts; idx++)
{
r+=v[idx]*v[idx];
}
return r;
}
/*
Der Vektor p wird als 2D Gitter fuer i,j<=16 ausgegeben. Es werden innere/aktive
und, falls flag>0, auch die aeusseren Punkte ausgegeben.
*/
void print_vector(char *name, float *p, int flag)
{
int i,j,idx;
float nrm;
printf("%s = \n",name);
idx=0;
for (j=0; j<Ny+2; j++)
{
if (j>16)
{
printf(" ...\n");
break;
}
printf(" ");
for (i=0; i<Nx+2; i++)
{
if ((i<16)&&((flag>0)||(active[idx])))
printf("%.2f ",p[idx]);
if (i==16)
printf("...");
idx+=1;
}
printf("\n");
}
nrm=norm_sqr(p);
printf("||%s|| = %.8f\n",name,sqrt(nrm));
}
__global__
void laplace2d_GPU(float *w, float *v, const int N) {
/* calculate the id */
int blockOffset = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * blockDim.y;
int threadId = threadIdx.x + threadIdx.y * blockDim.x;
int idx = blockOffset + threadId;
int first_coord = (int) (idx % N);
int second_coord = (int) (idx / N);
if (
(first_coord > 0) && (first_coord < (N-1)) &&
(second_coord > 0) && (second_coord < (N-1))
) {
w[idx] = -4. * v[idx] + v[idx-1] + v[idx+1] + v[idx-N] + v[idx+N];
/* printf("%d, %f, %f\n", idx, w[idx], v[idx]); */
}
}
/*
* laplace_2d(float *w, float *v)
*
* Calculates the product A*v for a vector v and writes the result into w.
* A is a laplacian.
*
*/
void laplace2d_CPU(float *w, float *v) {
int i, j, n = Nx+2;
int npts = n * n;
/* two loops over every element of the unknowns */
for (i=0; i<npts; i++) {
for (j=0; j<npts; j++) {
/* skip the boundary */
if (
(i % n == 0) ||
((int) (i / n) == 0) ||
(i % n == n-1) ||
((int) (i / n) == n-1)
) {
continue;
}
/* diagonal */
if (i == j) {
w[i] += -4 * v[j];
}
/* first two off diagonals */
if (
((i == j+1) && (i % n != 0))
||
((i == j-1) && (j % n != 0))
)
{
w[i] += 1 * v[j];
}
/* other two off diagonals */
if ((i == j+n) || (i == j-n)) {
w[i] += 1 * v[j];
}
}
}
}
/*
* vector_addition(float *v, float *u, float *w)
*
* Adds two vectors u and v together and writes the result into w.
*
*/
void vector_addition_CPU(float *u, float *v, float *w) {
int i;
for (i=0; i<npts; i++) {
w[i] += u[i] + v[i];
}
}
__global__
void vector_addition_GPU(float *u, float *v, float *w) {
/* calculate the id */
int blockOffset = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * blockDim.y;
int threadId = threadIdx.x + threadIdx.y * blockDim.x;
int idx = blockOffset + threadId;
w[idx] = u[idx] + v[idx];
}
/*
* scale_vector(float prefactor, float *v, float *w)
*
* Scales a vector v by a prefactor and writes the result into w.
*
*/
void scale_vector_CPU(float prefactor, float *v, float *w) {
int i;
for (i=0; i<npts; i++) {
w[i] = prefactor * v[i];
}
}
__global__
void scale_vector_GPU(float prefactor, float *v, float *w) {
/* calculate the id */
int blockOffset = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x * blockDim.y;
int threadId = threadIdx.x + threadIdx.y * blockDim.x;
int idx = blockOffset + threadId;
w[idx] = prefactor * v[idx];
}
void laplaceOneLoop(FILE* laplace_speedup_file, const int N, int gridX, int gridY, int threadX, int threadY) {
int nBytes;
float *h_w, *h_v, *h_u;
// Globale Variablen setzen:
// Anzahl der Inneren Punkte in x- und y-Richtung
Nx = N;
Ny = N;
// Gesamtanzahl der Gitterpunkte
npts=(Nx+2)*(Ny+2);
// Aktive Punkte - Array
active_pts();
// Speicherbedarf pro Vektor in Byte
nBytes=npts*sizeof(float);
// Speicher für Vektoren allozieren
h_w = (float *) malloc(npts * sizeof(float));
h_v = (float *) malloc(npts * sizeof(float));
h_u = (float *) malloc(npts * sizeof(float));
// auf Null setzen
memset(h_w, 0, nBytes);
memset(h_v, 0, nBytes);
memset(h_u, 0, nBytes);
// Zufaelliger Vektor
random_vector(h_v);
/* print_vector("v",h_v,1); */
// Device-Speicher allozieren mit cudaMalloc
float *d_v, *d_w;
CHECK(cudaMalloc((float**)&d_v, nBytes));
CHECK(cudaMalloc((float**)&d_w, nBytes));
// kopieren Host -> Device mit cudaMemcpy
CHECK(cudaMemcpy(d_v, h_v, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_w, h_w, nBytes, cudaMemcpyHostToDevice));
dim3 block(gridX, gridY);
dim3 grid(threadX, threadY);
double t_GPU_start = seconds();
laplace2d_GPU <<<block,grid>>> (d_w, d_v, N+2);
cudaDeviceSynchronize();
double t_GPU_end = seconds();
double t_GPU = t_GPU_end - t_GPU_start;
/* kopieren Device -> Host mit cudaMemcpy */
CHECK(cudaMemcpy(h_w, d_w, nBytes, cudaMemcpyDeviceToHost));
/* print_vector("w_GPU",h_w,1); */
// Device-Speicher freigeben
CHECK(cudaFree(d_v));
CHECK(cudaFree(d_w));
double t_CPU_start = seconds();
laplace2d_CPU(h_u, h_v);
double t_CPU_end = seconds();
double t_CPU = t_CPU_end - t_CPU_start;
/* print_vector("w_CPU",h_w,1); */
fprintf(
laplace_speedup_file,
"%lf, %lf, %lf,\n",
t_GPU, t_CPU, t_CPU/t_GPU
);
free(active);
free(h_w);
free(h_v);
}
void vectorScaleOneLoop(FILE* vector_scale_speedup_file, const int N, int gridX, int gridY, int threadX, int threadY) {
int nBytes;
float *h_w, *h_v, *h_u;
// Globale Variablen setzen:
// Anzahl der Inneren Punkte in x- und y-Richtung
Nx = N;
Ny = N;
// Gesamtanzahl der Gitterpunkte
npts=(Nx+2)*(Ny+2);
// Aktive Punkte - Array
active_pts();
// Speicherbedarf pro Vektor in Byte
nBytes=npts*sizeof(float);
// Speicher für Vektoren allozieren
h_w = (float *) malloc(npts * sizeof(float));
h_v = (float *) malloc(npts * sizeof(float));
h_u = (float *) malloc(npts * sizeof(float));
// auf Null setzen
memset(h_w, 0, nBytes);
memset(h_v, 0, nBytes);
memset(h_u, 0, nBytes);
// Zufaelliger Vektor
random_vector(h_v);
/* print_vector("v",h_v,1); */
// Device-Speicher allozieren mit cudaMalloc
float *d_v, *d_w;
CHECK(cudaMalloc((float**)&d_v, nBytes));
CHECK(cudaMalloc((float**)&d_w, nBytes));
// kopieren Host -> Device mit cudaMemcpy
CHECK(cudaMemcpy(d_v, h_v, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_w, h_w, nBytes, cudaMemcpyHostToDevice));
dim3 block(gridX, gridY);
dim3 grid(threadX, threadY);
double t_GPU_start = seconds();
laplace2d_GPU <<<block,grid>>> (d_w, d_v, N+2);
cudaDeviceSynchronize();
double t_GPU_end = seconds();
double t_GPU = t_GPU_end - t_GPU_start;
/* kopieren Device -> Host mit cudaMemcpy */
CHECK(cudaMemcpy(h_w, d_w, nBytes, cudaMemcpyDeviceToHost));
/* print_vector("w_GPU",h_w,1); */
// Device-Speicher freigeben
CHECK(cudaFree(d_v));
CHECK(cudaFree(d_w));
double t_CPU_start = seconds();
laplace2d_CPU(h_u, h_v);
double t_CPU_end = seconds();
double t_CPU = t_CPU_end - t_CPU_start;
/* print_vector("w_CPU",h_w,1); */
fprintf(
vector_scale_speedup_file,
"%lf, %lf, %lf,\n",
t_GPU, t_CPU, t_CPU/t_GPU
);
free(active);
free(h_w);
free(h_v);
}
void vectorAddOneLoop(FILE* vector_add_speedup_file, const int N, int gridX, int gridY, int threadX, int threadY) {
int nBytes;
float *h_w, *h_v, *h_u;
// Globale Variablen setzen:
// Anzahl der Inneren Punkte in x- und y-Richtung
Nx = N;
Ny = N;
// Gesamtanzahl der Gitterpunkte
npts=(Nx+2)*(Ny+2);
// Aktive Punkte - Array
active_pts();
// Speicherbedarf pro Vektor in Byte
nBytes=npts*sizeof(float);
// Speicher für Vektoren allozieren
h_w = (float *) malloc(npts * sizeof(float));
h_v = (float *) malloc(npts * sizeof(float));
h_u = (float *) malloc(npts * sizeof(float));
// auf Null setzen
memset(h_w, 0, nBytes);
memset(h_v, 0, nBytes);
memset(h_u, 0, nBytes);
// Zufaelliger Vektor
random_vector(h_v);
/* print_vector("v",h_v,1); */
// Device-Speicher allozieren mit cudaMalloc
float *d_v, *d_w;
CHECK(cudaMalloc((float**)&d_v, nBytes));
CHECK(cudaMalloc((float**)&d_w, nBytes));
// kopieren Host -> Device mit cudaMemcpy
CHECK(cudaMemcpy(d_v, h_v, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_w, h_w, nBytes, cudaMemcpyHostToDevice));
dim3 block(gridX, gridY);
dim3 grid(threadX, threadY);
double t_GPU_start = seconds();
laplace2d_GPU <<<block,grid>>> (d_w, d_v, N+2);
cudaDeviceSynchronize();
double t_GPU_end = seconds();
double t_GPU = t_GPU_end - t_GPU_start;
/* kopieren Device -> Host mit cudaMemcpy */
CHECK(cudaMemcpy(h_w, d_w, nBytes, cudaMemcpyDeviceToHost));
/* print_vector("w_GPU",h_w,1); */
// Device-Speicher freigeben
CHECK(cudaFree(d_v));
CHECK(cudaFree(d_w));
double t_CPU_start = seconds();
laplace2d_CPU(h_u, h_v);
double t_CPU_end = seconds();
double t_CPU = t_CPU_end - t_CPU_start;
/* print_vector("w_CPU",h_w,1); */
fprintf(
vector_add_speedup_file,
"%lf, %lf, %lf,\n",
t_GPU, t_CPU, t_CPU/t_GPU
);
free(active);
free(h_w);
free(h_v);
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
typedef struct grid_parameters {
int N;
int gridX;
int gridY;
int threadX;
int threadY;
} grid_params_t;
int data_points = 64;
grid_params_t grid_data[data_points];
/* read in grid parameters */
FILE *f = fopen("../scripts/factorizations", "r");
int i;
for (i = 0;
i != data_points &&
fscanf(f, "%d, %d, %d, %d, %d,\n", &grid_data[i].N, &grid_data[i].gridX, &grid_data[i].gridY, &grid_data[i].threadX, &grid_data[i].threadY) != EOF;
i++
);
fclose(f);
/* overwrite results */
FILE *laplace_speedup = fopen("../scripts/laplace_speedup_results", "w");
FILE *vector_scale_speedup = fopen("../scripts/vector_scale_speedup_results", "w");
FILE *vector_add_speedup = fopen("../scripts/vector_add_speedup_results", "w");
fprintf(
laplace_speedup,
"t_GPU, t_CPU, t_CPU/t_GPU,\n"
);
fprintf(
vector_scale_speedup,
"t_GPU, t_CPU, t_CPU/t_GPU,\n"
);
fprintf(
vector_add_speedup,
"t_GPU, t_CPU, t_CPU/t_GPU,\n"
);
int gridX, gridY, threadX, threadY;
for (i = 0; i < data_points; i++) {
N = grid_data[i].N;
gridX = grid_data[i].gridX;
gridY = grid_data[i].gridY;
threadX = grid_data[i].threadX;
threadY = grid_data[i].threadY;
laplaceOneLoop(
laplace_speedup,
N,
gridX, gridY,
threadX, threadY
);
vectorScaleOneLoop(
vector_scale_speedup,
N,
gridX, gridY,
threadX, threadY
);
vectorAddOneLoop(
vector_add_speedup,
N,
gridX, gridY,
threadX, threadY
);
printf("%d/%d\r", i+1, data_points);
fflush(stdout);
}
printf("\n");
fclose(laplace_speedup);
fclose(vector_scale_speedup);
fclose(vector_add_speedup);
return (0);
}
|
10,837 | #include "includes.h"
__global__ void updF_SoA(float *f, float *z1, float *z2, float *g, float tf, float lambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
if ((px<(nx - 1))) DIVZ += z1[idx];
if ((px>0)) DIVZ -= z1[idx - 1];
if ((py<(ny - 1))) DIVZ += z2[idx];
if ((py>0)) DIVZ -= z2[idx - nx];
// update f
//f[idx] = (1.-tf*lambda)*f[idx] + tf * DIVZ + tf*lambda*g[idx];
f[idx] = (f[idx] + tf * DIVZ + tf*lambda*g[idx]) / (1 + tf*lambda);
}
} |
10,838 | template<typename T>
__device__ void columnsIndices(const T* matrix, int* result,
const int rows, const int cols) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < rows && col < cols) {
int ij = row * cols + col;
result[ij] = col;
}
}
template<typename T>
__device__ void rowsIndices(const T* matrix, int* result,
const int rows, const int cols) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < rows && col < cols) {
int ij = row * cols + col;
result[ij] = row;
}
} |
10,839 | #include <stdio.h>
#include <cuda_runtime.h>
#define N 12
// tugas 1: alokasi memori dan transfer dari device ke host
__global__ void
kern(int *A)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
A[idx] = idx;
}
/**
* Host main routine
*/
int main(void)
{
// alokasikan memori, dan salin nilainya
int *A = (int *) malloc(N*sizeof(int)); //alokasi memori di host
int *d_A;
cudaMalloc(&d_A,N*sizeof(int)); //alokasi memori di device
cudaMemcpy(d_A,A,N*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid,block;
block.x = 4;
grid.x = 12/block.x;
kern<<<grid,block>>>(d_A);
cudaMemcpy(A,d_A,N*sizeof(int),cudaMemcpyDeviceToHost);
// copy result
for(int i = 0;i < N;i++)
printf("A[%d] = %d\n",i,A[i]);
free(A);
cudaFree(d_A);
return 0;
}
|
10,840 | // assumes square matrices (M = K = N)
// Note: A and B are source matrices
// A is M rows by K columns
// B is K rows by N columns
// C is destination
// C is M rows by N columns
extern "C" __global__ void sgemm(
const float* A,
const float* B,
float* C,
int widthA,
int widthB)
{
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < widthB && col < widthA)
{
float sum = 0.0f;
for (int i = 0; i < widthA; i++)
{
sum +=
A[i + row * widthA] *
B[col + i * widthB];
}
C[col + row * widthB] = sum;
}
}
extern "C" __global__ void dgemm(
const double* A,
const double* B,
double* C,
int widthA,
int widthB)
{
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < widthB && col < widthA)
{
float sum = 0.0f;
for (int i = 0; i < widthA; i++)
{
sum +=
A[i + row * widthA] *
B[col + i * widthB];
}
C[col + row * widthB] = sum;
}
} |
10,841 | #include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <cufft.h>
#include <math.h>
cudaEvent_t t_start, t_stop;
cufftHandle plan;
__global__ void filter_block(float *input_buffer, float *taps, float *output_buffer, int N, int P)
{
float temp_output = 0;
for (int x=0; x < P; x++) {
temp_output += taps[threadIdx.x + x*N]*input_buffer[threadIdx.x + x*N + (blockIdx.x*blockDim.x)];
// input buffer of continuous voltage samples should be distributed amongst N channels
// index into input buffer:
// current thread index indicates which channel we are currently processing
// x which tap we are getting data for
// blockIdx.x * blockDim.x is our depth into the block of data in multiples of channel number
// structure of taps vector is actually [N,P]. So each thread will read taps[channel_number:channel_number+8] for taps
}
output_buffer[threadIdx.x + (blockIdx.x*blockDim.x)] = temp_output;
}
int main(int argc, char **argv) {
int write_block = 1024*1024;
// 1 MB worth of data at a time...
int N = 512;
// # of frequency channels in the PFB
int P = 8;
// length of pre filtering
int write_size = sizeof(float) * write_block;
int tap_size = sizeof(float) * P * N;
int fft_output_size = (write_block / N) * sizeof(cufftComplex) * (N/2 + 1);
// hold BATCH (write_block / N) worth of complex FFT outputs
int fh;
unsigned int i=0;
char *data_file;
char *fir_taps_file;
char *base_buffer;
float *float_buffer;
float et;
// counter for elapsed time of cuda ops
float *fir_taps;
float *device_input_buffer;
float *device_output_buffer;
cufftComplex *fft_output_buffer;
cufftComplex *fft_buffer;
float *device_fir_taps;
if (argc > 2) {
data_file = argv[1];
fir_taps_file = argv[2];
} else { printf("Please supply both data and fir_taps filenames...\n"); return -1;}
base_buffer = (char*)malloc(write_block);
float_buffer = (float*)malloc(write_size);
fir_taps = (float*)malloc(tap_size);
fft_buffer = (cufftComplex*)malloc(fft_output_size);
fh = open(fir_taps_file, O_RDONLY);
read(fh, fir_taps, tap_size);
// source of taps vector should be flattened [P,N] array
close(fh);
//for (i=0; i < P*N; i++) { fprintf(stderr,"%f ",(float)*(fir_taps+i));}
fh = open(data_file, O_LARGEFILE);
read(fh, base_buffer, write_block);
// read in a write block worth of int8
cudaEventCreate(&t_start);
cudaEventCreate(&t_stop);
cudaMalloc((void**)&device_input_buffer, write_size);
cudaMalloc((void**)&device_output_buffer, write_size);
cudaMalloc((void**)&fft_output_buffer, fft_output_size);
cudaMalloc((void**)&device_fir_taps, tap_size);
// allocate the device storage
cudaMemcpy(device_fir_taps, fir_taps, tap_size, cudaMemcpyHostToDevice);
// copy the filter taps to the device
int threadsPerBlock = N;
int blocksPerGrid = write_block / N;
fprintf(stderr,"Blocks per grid: %i, Threads per block: %i\n",blocksPerGrid, threadsPerBlock);
cufftPlan1d(&plan, N, CUFFT_R2C, int(write_block/N));
fprintf(stderr,"FFT Plan has length %i with batch size %i\n",N, int(write_block/N));
for (i = 0; i < write_block; ++i) {
*(float_buffer+i) = (float)*(base_buffer+i);
}
//write(1, float_buffer, write_size);
// output the raw data for comparison..
//for (i=0; i < write_block; i++) { fprintf(stderr,"Base: %i, Float: %f\n",(int)*(base_buffer+i),(float)*(float_buffer+i)); }
cudaEventRecord(t_start, 0);
cudaMemcpy(device_input_buffer, float_buffer, write_size, cudaMemcpyHostToDevice);
// copy the floats to the device
filter_block<<<blocksPerGrid, threadsPerBlock>>>(device_input_buffer, device_fir_taps, device_output_buffer, N, P);
// kernel applies pre filtering to entire block leaving it in device_output_buffer ready for FFT
cudaMemcpy(float_buffer, device_output_buffer, write_size, cudaMemcpyDeviceToHost);
// get the intermediate results...
write(1, float_buffer, write_size);
// output the intermediate results...
cufftExecR2C(plan, (cufftReal*)device_output_buffer, (cufftComplex*)fft_output_buffer);
// Do FFT's over the entire block, one column at a time
cudaMemcpy(fft_buffer, fft_output_buffer, fft_output_size, cudaMemcpyDeviceToHost);
// get the final block
//for (i=0; i < 100 * (N/2 + 1); i++) { fprintf(stderr,"Complex value %i has x=%f, y=%f\n", i, fft_buffer[i].x, fft_buffer[i].y); }
cudaEventRecord(t_stop, 0);
cudaEventSynchronize(t_stop);
cudaEventElapsedTime(&et, t_start, t_stop);
fprintf(stderr,"Done. CUDA time is %f ms\n", et);
write(1, fft_buffer, write_size);
// emit to stdout (which has hopefully been redirected...)
return 0;
}
|
10,842 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
double wmcw_energy(int n,double2* r,int* sig, double l){
int i, j, k;
double x, y, inverselx, inversely, h, wmcw_energy;
double pi = 4.0*atan(1.0);
inverselx = 2.0*pi/l;
inversely = 2.0*pi/l;
wmcw_energy = 0.0;
for (i = 0;i<=n-2;i++){
for (j=i+1;j<=n-1;j++){
if (i != j){
x = fabs(r[i].x-r[j].x) * inverselx;
y = fabs(r[i].y-r[j].y) * inversely;
h = 0.0;
for (k = -10;k<=10;k++){
h = h + log((cosh(x-2.0*pi*(double)k)-cos(y))/cosh(2.0*pi*(double)k));
}
h = h - x*x/(2.0*pi);
wmcw_energy = wmcw_energy - h* ((double)sig[i]) *( (double)sig[j]);
}
}
}
//Difference between Weiss & McWilliams and (?)Montgomery & Joyce
wmcw_energy = wmcw_energy/(double)n-0.177021697969890;
return wmcw_energy;
}
|
10,843 | #include "includes.h"
__global__ void _drop32(int n, float *x, float *xmask, float dropout, float scale) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (xmask[i] < dropout) x[i] = 0;
else x[i] *= scale;
i += blockDim.x * gridDim.x;
}
} |
10,844 | #include <curand_kernel.h>
extern "C" __global__ void addVector(int* a, int aLen0, int* b, int bLen0, int* c, int cLen0)
{
int x = blockIdx.x;
c[x] = a[x] + b[x];
}
extern "C" __global__ void subVector(int* a, int aLen0, int* b, int bLen0, int* c, int cLen0)
{
int x = blockIdx.x;
c[x] = a[x] - b[x];
}
extern "C" __global__ void mulVector(int* a, int aLen0, int* b, int bLen0, int n)
{
int x = blockIdx.x;
b[x] = a[x] * n;
}
extern "C" __global__ void divVector(int* a, int aLen0, int* b, int bLen0, int n)
{
int x = blockIdx.x;
b[x] = a[x] / n;
}
|
10,845 | #include "includes.h"
__global__ void mat_transpose(const float *a, float *b, int n, int m){
const int TIlE_WIDTH = 8;
__shared__ float temp[TIlE_WIDTH][TIlE_WIDTH];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int i = TIlE_WIDTH * bx + tx;
int j = TIlE_WIDTH * by + ty;
int idxa = j * n + i;
int idxb = i * n + j;
temp[ty][tx] = a[idxa];
__syncthreads();
b[idxb] = temp[ty][tx];
// if(i < n and j < m){
// b[idxb] = a[idxa];
// }
} |
10,846 | /*******************************************************************************
*
******************************************************************************/
|
10,847 | #include <chrono>
#include <iostream>
#include "thrust/device_vector.h"
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include "cuComplex.h"
#include "cufft.h"
#include "cuda.h"
#define NACC 256
#define NFPGAS 48
#define NCHAN_COARSE 336
#define NCHAN_FINE_IN 32
#define NCHAN_FINE_OUT 27
#define NACCUMULATE 128
#define NPOL 2
#define NSAMPS 4
#define NCHAN_SUM 16
#define NSAMP_PER_PACKET 128
#define NCHAN_PER_PACKET 7
#define HEADLEN 64
#define NSAMPS_SUMMED 2
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
using std::cerr;
using std::cout;
using std::endl;
#define XSIZE 7
#define YSIZE 128
#define ZSIZE 48
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*void UnpackCpu(void) {
#pragma unroll
for (int chan = 0; chan < 7; chan++) {
for (int sample = 0; sample < 128; sample++) {
idx = (sample * 7 + chan) * BYTES_PER_WORD; // get the start of the word in the received data array
idx2 = chan * 128 + sample + startidx; // get the position in the buffer
h_pol[idx2].x = static_cast<float>(static_cast<short>(data[HEADLEN + idx + 7] | (data[HEADLEN + idx + 6] << 8)));
h_pol[idx2].y = static_cast<float>(static_cast<short>(data[HEADLEN + idx + 5] | (data[HEADLEN + idx + 4] << 8)));
h_pol[idx2 + d_in_size / 2].x = static_cast<float>(static_cast<short>(data[HEADLEN + idx + 3] | (data[HEADLEN + idx + 2] << 8)));
h_pol[idx2 + d_in_size / 2].y = static_cast<float>(static_cast<short>(data[HEADLEN + idx + 1] | (data[HEADLEN + idx + 0] << 8)));
}
}
} */
__global__ void unpack_original_tex(cudaTextureObject_t texObj, cufftComplex * __restrict__ out, unsigned int acc)
{
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * 128;
int chanidx = threadIdx.x + blockIdx.y * 7;
int skip;
int2 word;
for (int ac = 0; ac < acc; ac++) {
skip = 336 * 128 * 2 * ac;
for (int sample = 0; sample < YSIZE; sample++) {
word = tex2D<int2>(texObj, xidx, yidx + ac * 48 * 128 + sample);
out[skip + chanidx * YSIZE * 2 + sample].x = static_cast<float>(static_cast<short>(((word.y & 0xff000000) >> 24) | ((word.y & 0xff0000) >> 8)));
out[skip + chanidx * YSIZE * 2 + sample].y = static_cast<float>(static_cast<short>(((word.y & 0xff00) >> 8) | ((word.y & 0xff) << 8)));
out[skip + chanidx * YSIZE * 2 + YSIZE + sample].x = static_cast<float>(static_cast<short>(((word.x & 0xff000000) >> 24) | ((word.x & 0xff0000) >> 8)));
out[skip + chanidx * YSIZE * 2 + YSIZE + sample].y = static_cast<float>(static_cast<short>(((word.x & 0xff00) >> 8) | ((word.x & 0xff) << 8)));
}
}
}
__global__ void unpack_new(const unsigned int *__restrict__ in, cufftComplex * __restrict__ out)
{
int skip = 0;
__shared__ unsigned int accblock[1792];
int chan = 0;
int time = 0;
int line = 0;
cufftComplex cpol;
int polint;
int2 tmp;
int outskip = 0;
for (int iacc = 0; iacc < NACCUMULATE; ++iacc) {
// NOTE: This is skipping whole words as in will be cast to int2
skip = iacc * NCHAN_COARSE * NSAMP_PER_PACKET + blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET;
for (int ichunk = 0; ichunk < 7; ++ichunk) {
line = ichunk * blockDim.x + threadIdx.x;
chan = line % 7;
time = line / 7;
tmp = ((int2*)in)[skip + line];
accblock[chan * NSAMP_PER_PACKET + time] = tmp.y;
accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + chan * NSAMP_PER_PACKET + time] = tmp.x;
}
__syncthreads();
skip = NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE;
outskip = blockIdx.x * 7 * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET;
for (chan = 0; chan < NCHAN_PER_PACKET; ++chan) {
/*polaint = accblock[ichan * NSAMP_PER_PACKET + threadIdx.x];
polbint = accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + ichan * NSAMP_PER_PACKET + threadIdx.x];
pola.x = static_cast<float>(static_cast<short>( ((polaint & 0xff000000) >> 24) | ((polaint & 0xff0000) >> 8) ));
pola.y = static_cast<float>(static_cast<short>( ((polaint & 0xff00) >> 8) | ((polaint & 0xff) << 8) ));
polb.x = static_cast<float>(static_cast<short>( ((polbint & 0xff000000) >> 24) | ((polbint & 0xff0000) >> 8) ));
polb.y = static_cast<float>(static_cast<short>( ((polbint & 0xff00) >> 8) | ((polbint & 0xff) << 8) ));
*/
polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x];
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[outskip + threadIdx.x] = cpol;
polint = accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + chan * NSAMP_PER_PACKET + threadIdx.x];
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[skip + outskip + threadIdx.x] = cpol;
outskip += NSAMP_PER_PACKET * NACCUMULATE;
}
}
}
__global__ void unpack_new_int2(int2 *__restrict__ in, cufftComplex *__restrict__ out) {
int skip = 0;
__shared__ int2 accblock[896];
int chan = 0;
int time = 0;
int line = 0;
cufftComplex cpol;
int polint;
int outskip = 0;
for (int iacc = 0; iacc < NACCUMULATE; ++iacc) {
// NOTE: This is skipping whole words as in will be cast to int2
// skip = iacc * NCHAN_COARSE * NSAMP_PER_PACKET + blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET;
skip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NCHAN_PER_PACKET * NSAMP_PER_PACKET;
for (int ichunk = 0; ichunk < 7; ++ichunk) {
line = ichunk * blockDim.x + threadIdx.x;
chan = line % 7;
time = line / 7;
accblock[chan * NSAMP_PER_PACKET + time] = in[skip + line];
}
__syncthreads();
skip = NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE;
outskip = blockIdx.x * 7 * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET;
for (chan = 0; chan < NCHAN_PER_PACKET; ++chan) {
polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].y;
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[outskip + threadIdx.x] = cpol;
polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].x;
cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) ));
cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) ));
out[skip + outskip + threadIdx.x] = cpol;
outskip += NSAMP_PER_PACKET * NACCUMULATE;
}
}
}
__global__ void unpack_alt(const unsigned int *__restrict__ in, cufftComplex * __restrict__ out) {
if (threadIdx.x == 1022 || threadIdx.x == 1023)
return;
__shared__ unsigned int accblock[2044];
int inskip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NPOL * NACCUMULATE;
int outskip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE;
int time = 0;
int chan = 0;
int line = 0;
cufftComplex pola, polb;
int polaint;
int polbint;
// NOTE: That will leave last 224 lines unprocessed
// This can fit in 7 full warps of 32
for (int iacc = 0; iacc < 113; ++iacc) {
line = iacc * blockDim.y + threadIdx.y;
if (line < NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE) {
chan = threadIdx.y % 7;
time = threadIdx.y / 7;
accblock[chan * 146 + time] = in[inskip + threadIdx.y * NPOL];
accblock[NCHAN_PER_PACKET * 146 + chan * 146 + time] = in[inskip + threadIdx.y * NPOL + 1];
inskip += 2044;
__syncthreads();
polbint = accblock[threadIdx.y];
polaint = accblock[NCHAN_PER_PACKET * 146 + threadIdx.y];
pola.x = static_cast<float>(static_cast<short>( ((polaint & 0xff000000) >> 24) | ((polaint & 0xff0000) >> 8) ));
pola.y = static_cast<float>(static_cast<short>( ((polaint & 0xff00) >> 8) | ((polaint & 0xff) << 8) ));
polb.x = static_cast<float>(static_cast<short>( ((polbint & 0xff000000) >> 24) | ((polbint & 0xff0000) >> 8) ));
polb.y = static_cast<float>(static_cast<short>( ((polbint & 0xff00) >> 8) | ((polbint & 0xff) << 8) ));
chan = threadIdx.y / 146;
time = threadIdx.y % 146;
out[outskip + chan * NSAMP_PER_PACKET * NACCUMULATE + time] = pola;
out[NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE + outskip + chan * NSAMP_PER_PACKET * NACCUMULATE + time] = polb;
outskip += 146;
}
}
}
__global__ void powertimefreq_new_hardcoded(
cuComplex* __restrict__ in,
float* __restrict__ out)
{
__shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE];
int warp_idx = threadIdx.x >> 0x5;
int lane_idx = threadIdx.x & 0x1f;
if (lane_idx >= NCHAN_FINE_OUT)
return;
int offset = blockIdx.x * NCHAN_COARSE * NPOL * NSAMPS * NCHAN_FINE_IN;
int out_offset = blockIdx.x * NCHAN_COARSE * NCHAN_FINE_OUT / NCHAN_SUM;
for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += warpSize)
{
float real = 0.0f;
float imag = 0.0f;
int coarse_chan_offset = offset + coarse_chan_idx * NPOL * NSAMPS * NCHAN_FINE_IN;
for (int pol=0; pol<NPOL; ++pol)
{
int pol_offset = coarse_chan_offset + pol * NSAMPS * NCHAN_FINE_IN;
for (int samp=0; samp<NSAMPS; ++samp)
{
int samp_offset = pol_offset + samp * NCHAN_FINE_IN;
cuComplex val = in[samp_offset + lane_idx];
real += val.x * val.x;
imag += val.y * val.y;
}
}
int output_idx = coarse_chan_idx * NCHAN_FINE_OUT + lane_idx;
freq_sum_buffer[output_idx] = real+imag; //scaling goes here
__syncthreads();
for (int start_chan=threadIdx.x; start_chan<NCHAN_FINE_OUT*NCHAN_COARSE; start_chan*=blockDim.x)
{
if ((start_chan+NCHAN_SUM) > NCHAN_FINE_OUT*NCHAN_COARSE)
return;
float sum = freq_sum_buffer[start_chan];
for (int ii=0; ii<4; ++ii)
{
sum += freq_sum_buffer[start_chan + (1<<ii)];
__syncthreads();
}
out[out_offset+start_chan/NCHAN_SUM];
}
}
return;
}
__global__ void DetectScrunchKernel(cuComplex* __restrict__ in, float* __restrict__ out, short nchans)
{
/**
* This block is going to do 2 timesamples for all coarse channels.
* The fine channels are dealt with by the lanes, but on the fine
* channel read we perform an fft shift and exclude the band edges.
*/
// gridDim.x should be Nacc * 128 / (32 * nsamps_to_add) == 256
__shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE]; // 9072 elements
int warp_idx = threadIdx.x >> 0x5;
int lane_idx = threadIdx.x & 0x1f;
int pol_offset = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE;
int coarse_chan_offet = NACCUMULATE * NCHAN_FINE_IN * NSAMPS;
int block_offset = NCHAN_FINE_IN * NSAMPS_SUMMED * blockIdx.x;
int nwarps_per_block = blockDim.x/warpSize;
//Here we calculate indexes for FFT shift.
int offset_lane_idx = (lane_idx + 19)%32;
//Here only first 27 lanes are active as we drop
//5 channels due to the 32/27 oversampling ratio
if (lane_idx < 27)
{
// This warp
// first sample in inner dimension = (32 * 2 * blockIdx.x)
// This warp will loop over coarse channels in steps of NWARPS per block coarse_chan_idx (0,335)
for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += nwarps_per_block)
{
float real = 0.0f;
float imag = 0.0f;
int base_offset = coarse_chan_offet * coarse_chan_idx + block_offset + offset_lane_idx;
for (int pol_idx=0; pol_idx<NPOL; ++pol_idx)
{
int offset = base_offset + pol_offset * pol_idx;
for (int sample_idx=0; sample_idx<NSAMPS_SUMMED; ++sample_idx)
{
//Get first channel
// IDX = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE * pol_idx
// + NACCUMULATE * NCHAN_FINE_IN * NSAMPS * coarse_chan_idx
// + blockIdx.x * NCHAN_FINE_IN * NSAMPS_SUMMED
// + NCHAN_FINE_IN * sample_idx
// + lane_idx;
cuComplex val = in[offset + (NCHAN_FINE_IN * sample_idx)]; // load frequencies in right order
real += val.x * val.x;
imag += val.y * val.y;
}
// 3 is the leading dead lane count
// sketchy
freq_sum_buffer[coarse_chan_idx*NCHAN_FINE_OUT + lane_idx] = real + imag;
}
}
}
__syncthreads();
int saveoff = blockIdx.x * nchans;
if (threadIdx.x < (NCHAN_FINE_OUT * NCHAN_COARSE / NCHAN_SUM)) {
float sum = 0.0;
for (int chan_idx = threadIdx.x * NCHAN_SUM; chan_idx < (threadIdx.x+1) * NCHAN_SUM; ++chan_idx) {
sum += freq_sum_buffer[chan_idx];
}
out[saveoff + threadIdx.x] = sum;
}
return;
}
__global__ void DetectScrunchScaleKernel(cuComplex* __restrict__ in, float* __restrict__ out, short nchans, float *means, float *scales)
{
/**
* This block is going to do 2 timesamples for all coarse channels.
* The fine channels are dealt with by the lanes, but on the fine
* channel read we perform an fft shift and exclude the band edges.
*/
// gridDim.x should be Nacc * 128 / (32 * nsamps_to_add) == 256
__shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE]; // 9072 elements
int warp_idx = threadIdx.x >> 0x5;
int lane_idx = threadIdx.x & 0x1f;
int pol_offset = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE;
int coarse_chan_offet = NACCUMULATE * NCHAN_FINE_IN * NSAMPS;
int block_offset = NCHAN_FINE_IN * NSAMPS_SUMMED * blockIdx.x;
int nwarps_per_block = blockDim.x/warpSize;
//Here we calculate indexes for FFT shift.
int offset_lane_idx = (lane_idx + 19)%32;
//Here only first 27 lanes are active as we drop
//5 channels due to the 32/27 oversampling ratio
if (lane_idx < 27)
{
// This warp
// first sample in inner dimension = (32 * 2 * blockIdx.x)
// This warp will loop over coarse channels in steps of NWARPS per block coarse_chan_idx (0,335)
for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += nwarps_per_block)
{
float real = 0.0f;
float imag = 0.0f;
int base_offset = coarse_chan_offet * coarse_chan_idx + block_offset + offset_lane_idx;
for (int pol_idx=0; pol_idx<NPOL; ++pol_idx)
{
int offset = base_offset + pol_offset * pol_idx;
for (int sample_idx=0; sample_idx<NSAMPS_SUMMED; ++sample_idx)
{
//Get first channel
// IDX = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE * pol_idx
// + NACCUMULATE * NCHAN_FINE_IN * NSAMPS * coarse_chan_idx
// + blockIdx.x * NCHAN_FINE_IN * NSAMPS_SUMMED
// + NCHAN_FINE_IN * sample_idx
// + lane_idx;
cuComplex val = in[offset + (NCHAN_FINE_IN * sample_idx)]; // load frequencies in right order
real += val.x * val.x;
imag += val.y * val.y;
}
// 3 is the leading dead lane count
// sketchy
freq_sum_buffer[coarse_chan_idx*NCHAN_FINE_OUT + lane_idx] = real + imag;
}
}
}
__syncthreads();
int saveoff = blockIdx.x * nchans;
if (threadIdx.x < (NCHAN_FINE_OUT * NCHAN_COARSE / NCHAN_SUM)) {
float sum = 0.0;
int scaled = 0;
for (int chan_idx = threadIdx.x * NCHAN_SUM; chan_idx < (threadIdx.x+1) * NCHAN_SUM; ++chan_idx) {
sum += freq_sum_buffer[chan_idx];
}
scaled = __float2int_ru((sum - means[threadIdx.x]) * scales[threadIdx.x] + 64.5f);
if (scaled > 255) {
scaled = 255;
} else if (scaled < 0) {
scaled = 0;
}
//out[saveoff + threadIdx.x] = (unsigned char)scaled;
// NOTE: That puts the highest frequency first (OUTCHANS - 1 - threadIdx.x)
out[saveoff + threadIdx.x] = (unsigned char)scaled;
}
return;
}
__global__ void DetectScrunchScaleRevKernel(cuComplex* __restrict__ in, float* __restrict__ out, short nchans, float *means, float *scales)
{
/**
* This block is going to do 2 timesamples for all coarse channels.
* The fine channels are dealt with by the lanes, but on the fine
* channel read we perform an fft shift and exclude the band edges.
*/
// gridDim.x should be Nacc * 128 / (32 * nsamps_to_add) == 256
__shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE]; // 9072 elements
int warp_idx = threadIdx.x >> 0x5;
int lane_idx = threadIdx.x & 0x1f;
int pol_offset = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE;
int coarse_chan_offet = NACCUMULATE * NCHAN_FINE_IN * NSAMPS;
int block_offset = NCHAN_FINE_IN * NSAMPS_SUMMED * blockIdx.x;
int nwarps_per_block = blockDim.x/warpSize;
//Here we calculate indexes for FFT shift.
int offset_lane_idx = (lane_idx + 19)%32;
//Here only first 27 lanes are active as we drop
//5 channels due to the 32/27 oversampling ratio
if (lane_idx < 27)
{
// This warp
// first sample in inner dimension = (32 * 2 * blockIdx.x)
// This warp will loop over coarse channels in steps of NWARPS per block coarse_chan_idx (0,335)
for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += nwarps_per_block)
{
float real = 0.0f;
float imag = 0.0f;
int base_offset = coarse_chan_offet * coarse_chan_idx + block_offset + offset_lane_idx;
for (int pol_idx=0; pol_idx<NPOL; ++pol_idx)
{
int offset = base_offset + pol_offset * pol_idx;
for (int sample_idx=0; sample_idx<NSAMPS_SUMMED; ++sample_idx)
{
//Get first channel
// IDX = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE * pol_idx
// + NACCUMULATE * NCHAN_FINE_IN * NSAMPS * coarse_chan_idx
// + blockIdx.x * NCHAN_FINE_IN * NSAMPS_SUMMED
// + NCHAN_FINE_IN * sample_idx
// + lane_idx;
cuComplex val = in[offset + (NCHAN_FINE_IN * sample_idx)]; // load frequencies in right order
real += val.x * val.x;
imag += val.y * val.y;
}
// 3 is the leading dead lane count
// sketchy
freq_sum_buffer[coarse_chan_idx*NCHAN_FINE_OUT + lane_idx] = real + imag;
}
}
}
__syncthreads();
int saveoff = blockIdx.x * nchans;
if (threadIdx.x < (NCHAN_FINE_OUT * NCHAN_COARSE / NCHAN_SUM)) {
float sum = 0.0;
int scaled = 0;
for (int chan_idx = threadIdx.x * NCHAN_SUM; chan_idx < (threadIdx.x+1) * NCHAN_SUM; ++chan_idx) {
sum += freq_sum_buffer[chan_idx];
}
scaled = __float2int_ru((sum - means[566 - threadIdx.x]) * scales[566 - threadIdx.x] + 64.5f);
if (scaled > 255) {
scaled = 255;
} else if (scaled < 0) {
scaled = 0;
}
//out[saveoff + threadIdx.x] = (unsigned char)scaled;
// NOTE: That puts the highest frequency first (OUTCHANS - 1 - threadIdx.x)
out[saveoff + 566 - threadIdx.x] = (unsigned char)scaled;
}
return;
}
__global__ void DetectScrunchScaleTruncKernel(cuComplex* __restrict__ in, float* __restrict__ out, short nchans, float *means, float *scales)
{
/**
* This block is going to do 2 timesamples for all coarse channels.
* The fine channels are dealt with by the lanes, but on the fine
* channel read we perform an fft shift and exclude the band edges.
*/
// gridDim.x should be Nacc * 128 / (32 * nsamps_to_add) == 256
__shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE]; // 9072 elements
int warp_idx = threadIdx.x >> 0x5;
int lane_idx = threadIdx.x & 0x1f;
int pol_offset = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE;
int coarse_chan_offet = NACCUMULATE * NCHAN_FINE_IN * NSAMPS;
int block_offset = NCHAN_FINE_IN * NSAMPS_SUMMED * blockIdx.x;
int nwarps_per_block = blockDim.x/warpSize;
//Here we calculate indexes for FFT shift.
int offset_lane_idx = (lane_idx + 19)%32;
//Here only first 27 lanes are active as we drop
//5 channels due to the 32/27 oversampling ratio
if (lane_idx < 27)
{
// This warp
// first sample in inner dimension = (32 * 2 * blockIdx.x)
// This warp will loop over coarse channels in steps of NWARPS per block coarse_chan_idx (0,335)
for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += nwarps_per_block)
{
float real = 0.0f;
float imag = 0.0f;
int base_offset = coarse_chan_offet * coarse_chan_idx + block_offset + offset_lane_idx;
for (int pol_idx=0; pol_idx<NPOL; ++pol_idx)
{
int offset = base_offset + pol_offset * pol_idx;
for (int sample_idx=0; sample_idx<NSAMPS_SUMMED; ++sample_idx)
{
//Get first channel
// IDX = NCHAN_COARSE * NSAMPS * NCHAN_FINE_IN * NACCUMULATE * pol_idx
// + NACCUMULATE * NCHAN_FINE_IN * NSAMPS * coarse_chan_idx
// + blockIdx.x * NCHAN_FINE_IN * NSAMPS_SUMMED
// + NCHAN_FINE_IN * sample_idx
// + lane_idx;
cuComplex val = in[offset + (NCHAN_FINE_IN * sample_idx)]; // load frequencies in right order
real += val.x * val.x;
imag += val.y * val.y;
}
// 3 is the leading dead lane count
// sketchy
freq_sum_buffer[coarse_chan_idx*NCHAN_FINE_OUT + lane_idx] = real + imag;
}
}
}
__syncthreads();
int saveoff = blockIdx.x * nchans;
int skipbottom = 28 * NCHAN_SUM;
if (threadIdx.x < 512) {
float sum = 0.0;
int scaled = 0;
for (int chan_idx = threadIdx.x * NCHAN_SUM; chan_idx < (threadIdx.x+1) * NCHAN_SUM; ++chan_idx) {
sum += freq_sum_buffer[skipbottom + chan_idx];
}
scaled = __float2int_ru((sum - means[511 - threadIdx.x]) * scales[511 - threadIdx.x] + 64.5f);
if (scaled > 255) {
scaled = 255;
} else if (scaled < 0) {
scaled = 0;
}
//out[saveoff + threadIdx.x] = (unsigned char)scaled;
// NOTE: That puts the highest frequency first (OUTCHANS - 1 - threadIdx.x)
out[saveoff + 511 - threadIdx.x] = (unsigned char)scaled;
}
return;
}
__global__ void GetPowerAddTimeKernel(cufftComplex* __restrict__ in, float* __restrict__ out, unsigned int jump, unsigned int factort, unsigned int acc) {
int idx1, idx2;
int outidx;
int skip1, skip2;
float power1, power2;
for (int iac = 0; iac < acc; iac++) {
skip1 = iac * 336 * 128 * 2;
skip2 = iac * 336 * 27;
for (int ichan = 0; ichan < 7; ichan++) {
outidx = skip2 + 7 * 27 * blockIdx.x + ichan * 27 + threadIdx.x;
out[outidx] = (float)0.0;
out[outidx + jump] = (float)0.0;
out[outidx + 2 * jump] = (float)0.0;
out[outidx + 3 * jump] = (float)0.0;
idx1 = skip1 + 256 * (blockIdx.x * 7 + ichan);
for (int itime = 0; itime < factort; itime++) {
idx2 = threadIdx.x + itime * 32;
power1 = (in[idx1 + idx2].x * in[idx1 + idx2].x + in[idx1 + idx2].y * in[idx1 + idx2].y);
power2 = (in[idx1 + 128 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + 128 + idx2].y * in[idx1 + 128 + idx2].y);
out[outidx] += (power1 + power2);
out[outidx + jump] += (power1 - power2);
out[outidx + 2 * jump] += (2 * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + idx2].y * in[idx1 + 128 + idx2].y));
out[outidx + 3 * jump] += (2 * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].y - in[idx1 + idx2].y * in[idx1 + 128 + idx2].x));
}
}
}
}
__global__ void GetScaleFactorsKernel(float *indata, float *base, float *stdev, float *factors, int nchans, int processed) {
/*
// NOTE: Filterbank file format coming in
//float mean = indata[threadIdx.x];
float mean = 0.0f;
// NOTE: Depending whether I save STD or VAR at the end of every run
// float estd = stdev[threadIdx.x];
float estd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f);
float oldmean = base[threadIdx.x];
//float estd = 0.0f;
//float oldmean = 0.0;
float val = 0.0f;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
val = indata[isamp * nchans + threadIdx.x];
diff = val - oldmean;
mean = oldmean + diff * factors[processed + isamp + 1];
estd += diff * (val - mean);
oldmean = mean;
}
base[threadIdx.x] = mean;
stdev[threadIdx.x] = sqrtf(estd / (float)(processed + 2 * NACCUMULATE - 1.0f));
// stdev[threadIdx.x] = estd;
*/
float chmean = 0.0f;
float chestd = 0.0f;
float val = 0.0;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
val = indata[isamp * nchans + threadIdx.x];
diff = val - chmean;
chmean += diff * factors[isamp + 1];
chestd += diff * (val - chmean);
}
float oldmean = base[threadIdx.x];
float oldestd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f);
float newestd = 0.0f;
diff = chmean - oldmean;
base[threadIdx.x] = oldmean + diff * (float)(2.0f * NACCUMULATE) / (float)(processed + 2.0 * NACCUMULATE);
newestd = oldestd + chestd + diff * diff * (float)(2.0f * NACCUMULATE) * (float)processed / (float)(processed + 2.0 * NACCUMULATE);
stdev[threadIdx.x] = sqrt(newestd / (float)(processed + 2 * NACCUMULATE - 1.0f));
}
__global__ void GetScaleFactorsDivKernel(float *indata, float *base, float *stdev, int nchans, int processed) {
/*
// NOTE: Filterbank file format coming in
//float mean = indata[threadIdx.x];
float mean = 0.0f;
// NOTE: Depending whether I save STD or VAR at the end of every run
// float estd = stdev[threadIdx.x];
float estd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f);
float oldmean = base[threadIdx.x];
//float estd = 0.0f;
//float oldmean = 0.0;
float val = 0.0f;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
val = indata[isamp * nchans + threadIdx.x];
diff = val - oldmean;
mean = oldmean + diff * factors[processed + isamp + 1];
estd += diff * (val - mean);
oldmean = mean;
}
base[threadIdx.x] = mean;
stdev[threadIdx.x] = sqrtf(estd / (float)(processed + 2 * NACCUMULATE - 1.0f));
// stdev[threadIdx.x] = estd;
*/
float chmean = 0.0f;
float chestd = 0.0f;
float val = 0.0;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
val = indata[isamp * nchans + threadIdx.x];
diff = val - chmean;
chmean += diff / (isamp + 1);
chestd += diff * (val - chmean);
}
float oldmean = base[threadIdx.x];
float oldestd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f);
float newestd = 0.0f;
diff = chmean - oldmean;
base[threadIdx.x] = oldmean + diff * (float)(2.0f * NACCUMULATE) / (float)(processed + 2.0 * NACCUMULATE);
newestd = oldestd + chestd + diff * diff * (float)(2.0f * NACCUMULATE) * (float)processed / (float)(processed + 2.0 * NACCUMULATE);
stdev[threadIdx.x] = sqrt(newestd / (float)(processed + 2 * NACCUMULATE - 1.0f));
}
__global__ void GetScaleFactorsDoubleKernel(float *indata, float *base, float *stdev, int nchans) {
/*
// NOTE: Filterbank file format coming in
//float mean = indata[threadIdx.x];
float mean = 0.0f;
// NOTE: Depending whether I save STD or VAR at the end of every run
// float estd = stdev[threadIdx.x];
float estd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f);
float oldmean = base[threadIdx.x];
//float estd = 0.0f;
//float oldmean = 0.0;
float val = 0.0f;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
val = indata[isamp * nchans + threadIdx.x];
diff = val - oldmean;
mean = oldmean + diff * factors[processed + isamp + 1];
estd += diff * (val - mean);
oldmean = mean;
}
base[threadIdx.x] = mean;
stdev[threadIdx.x] = sqrtf(estd / (float)(processed + 2 * NACCUMULATE - 1.0f));
// stdev[threadIdx.x] = estd;
*/
/*
float chmean = 0.0f;
float chestd = 0.0f;
float val = 0.0;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
val = indata[isamp * nchans + threadIdx.x];
diff = val - chmean;
chmean += diff / (isamp + 1);
chestd += diff * (val - chmean);
}
float oldmean = base[threadIdx.x];
float oldestd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f);
float newestd = 0.0f;
diff = chmean - oldmean;
base[threadIdx.x] = oldmean + diff * (float)(2.0f * NACCUMULATE) / (float)(processed + 2.0 * NACCUMULATE);
newestd = oldestd + chestd + diff * diff * (float)(2.0f * NACCUMULATE) * (float)processed / (float)(processed + 2.0 * NACCUMULATE);
stdev[threadIdx.x] = sqrt(newestd / (float)(processed + 2 * NACCUMULATE - 1.0f));
*/
float sum = indata[threadIdx.x];
for (int isamp = 1; isamp < 2 * NACCUMULATE; ++isamp) {
sum += indata[isamp * nchans + threadIdx.x];
}
float mean = sum / (float)(2.0 * NACCUMULATE);
base[threadIdx.x] = mean;
float sumsq = 0.0f;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
diff = indata[isamp * nchans + threadIdx.x] - mean;
sumsq += diff * diff;
}
stdev[threadIdx.x] = sqrt(sumsq / (float)(NACCUMULATE - 1.0));
}
struct FactorFunctor {
__host__ __device__ float operator()(float val) {
return val != 0.0f ? 1.0f/val : val;
}
};
int main(int argc, char* argv[]) {
unsigned char *rawbuffer = new unsigned char[7168 * NFPGAS * NACC];
thrust::device_vector<unsigned char> rawdata(7168 * NFPGAS * NACCUMULATE);
// NOTE: 336 coarse channels * 32 fine channels * 4 time samples * 2 polarisations
thrust::device_vector<cuComplex> input(336*32*4*2*NACCUMULATE);
// NOTE: 336 coarse channels * 27 fine channels
thrust::device_vector<float> output(336*27*NACCUMULATE);
thrust::device_vector<float> means(567);
thrust::device_vector<float> scales(567);
thrust::device_vector<float> factors(NACCUMULATE * 2);
thrust::sequence(factors.begin(), factors.end());
thrust::transform(factors.begin(), factors.end(), factors.begin(), FactorFunctor());
// NOTE: Benchmarking the unpacker kernel
cudaArray *rawarray;
cudaChannelFormatDesc cdesc;
cdesc = cudaCreateChannelDesc<int2>();
cudaMallocArray(&rawarray, &cdesc, 7, 48 * 128 * NACCUMULATE);
cudaResourceDesc rdesc;
memset(&rdesc, 0, sizeof(cudaResourceDesc));
rdesc.resType = cudaResourceTypeArray;
rdesc.res.array.array = rawarray;
cudaTextureDesc tdesc;
memset(&tdesc, 0, sizeof(cudaTextureDesc));
tdesc.addressMode[0] = cudaAddressModeClamp;
tdesc.filterMode = cudaFilterModePoint;
tdesc.readMode = cudaReadModeElementType;
cudaTextureObject_t texObj = 0;
cudaCreateTextureObject(&texObj, &rdesc, &tdesc, NULL);
cudaMemcpyToArray(rawarray, 0, 0, rawbuffer, 7168 * 48 * NACCUMULATE * sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 rearrange_b(1,48,1);
dim3 rearrange_t(7,1,1);
dim3 unpackt(2, 128, 1);
dim3 unpacka(1, 1024, 1);
dim3 unpackb(48, 2, 1);
// ##################################
// ### UNPACK KERNEL BENCHMARKING ###
// ##################################
std::chrono::time_point<std::chrono::system_clock> unpackstart, unpackend;
std::chrono::duration<double> unpackelapsed;
unpackstart = std::chrono::system_clock::now();
// NOTE: Unpacking with texture memory
for (int ii = 0; ii < 32; ++ii) {
unpack_original_tex<<<rearrange_b, rearrange_t, 0>>>(texObj, thrust::raw_pointer_cast(input.data()), NACCUMULATE);
gpuErrchk(cudaDeviceSynchronize());
}
unpackend = std::chrono::system_clock::now();
unpackelapsed = unpackend - unpackstart;
cout << "Unpacking with texture memory: " << unpackelapsed.count() / 32.0 << "s" << endl;
// NOTE: Unpacking with shared memory
unpackstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
unpack_new<<<48, 128, 0>>>(reinterpret_cast<unsigned int*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data()));
gpuErrchk(cudaDeviceSynchronize());
}
unpackend = std::chrono::system_clock::now();
unpackelapsed = unpackend - unpackstart;
cout << "Unpacking with shared memory: " << unpackelapsed.count() / 32.0 << "s" << endl;
// NOTE: Unpacking with shared memory with cast to int2 in the function call
unpackstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
unpack_new_int2<<<48, 128, 0>>>(reinterpret_cast<int2*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data()));
gpuErrchk(cudaDeviceSynchronize());
}
unpackend = std::chrono::system_clock::now();
unpackelapsed = unpackend - unpackstart;
cout << "Unpacking with shared memory with cast to int2 in the function call: " << unpackelapsed.count() / 32.0 << "s" << endl;
// NOTE: Unpacking with shared memory with the alternative incoming buffer arrangement
unpackstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
unpack_alt<<<48, unpacka, 0>>>(reinterpret_cast<unsigned int*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data()));
gpuErrchk(cudaDeviceSynchronize());
}
unpackend = std::chrono::system_clock::now();
unpackelapsed = unpackend - unpackstart;
cout << "Unpacking with shared memory with the alternative incoming buffer arrangement: " << unpackelapsed.count() / 32.0 << "s" << endl;
std::chrono::time_point<std::chrono::system_clock> powerstart, powerend;
std::chrono::duration<double> powerelapsed;
// #################################
// ### POWER KERNEL BENCHMARKING ###
// #################################
// NOTE: Unoptimised power kernel with time averaging only
powerstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
GetPowerAddTimeKernel<<<48, 27, 0>>>(thrust::raw_pointer_cast(input.data()), thrust::raw_pointer_cast(output.data()), 0, NSAMPS, NACCUMULATE);
gpuErrchk(cudaDeviceSynchronize());
}
powerend = std::chrono::system_clock::now();
powerelapsed = powerend - powerstart;
cout << "Unoptimised power kernel: " << powerelapsed.count() / 32.0 << "s" << endl;
// NOTE: Optimised power kernel with shared memory with time and frequency averaging
powerstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
DetectScrunchKernel<<<NACCUMULATE,1024,0>>>(thrust::raw_pointer_cast(input.data()),thrust::raw_pointer_cast(output.data()), 567);
gpuErrchk(cudaDeviceSynchronize());
}
powerend = std::chrono::system_clock::now();
powerelapsed = powerend - powerstart;
cout << "Optimised power kernel: " << powerelapsed.count() / 32.0 << "s" << endl;
// NOTE: Optimised power kernel with scaling
powerstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
DetectScrunchScaleKernel<<<NACCUMULATE,1024,0>>>(thrust::raw_pointer_cast(input.data()),thrust::raw_pointer_cast(output.data()), 567,
thrust::raw_pointer_cast(means.data()), thrust::raw_pointer_cast(scales.data()));
gpuErrchk(cudaDeviceSynchronize());
}
powerend = std::chrono::system_clock::now();
powerelapsed = powerend - powerstart;
cout << "Optimised power kernel with scaling: " << powerelapsed.count() / 32.0 << "s" << endl;
// NOTE: Optimised power kernel with scaling and reversed frequency
powerstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
DetectScrunchScaleRevKernel<<<NACCUMULATE,1024,0>>>(thrust::raw_pointer_cast(input.data()),thrust::raw_pointer_cast(output.data()), 567,
thrust::raw_pointer_cast(means.data()), thrust::raw_pointer_cast(scales.data()));
gpuErrchk(cudaDeviceSynchronize());
}
powerend = std::chrono::system_clock::now();
powerelapsed = powerend - powerstart;
cout << "Optimised power kernel with scaling and reversed frequency: " << powerelapsed.count() / 32.0 << "s" << endl;
// NOTE: Optimised power kernel with scaling truncated and reversed frequency
powerstart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
DetectScrunchScaleTruncKernel<<<NACCUMULATE,1024,0>>>(thrust::raw_pointer_cast(input.data()),thrust::raw_pointer_cast(output.data()), 512,
thrust::raw_pointer_cast(means.data()), thrust::raw_pointer_cast(scales.data()));
gpuErrchk(cudaDeviceSynchronize());
}
powerend = std::chrono::system_clock::now();
powerelapsed = powerend - powerstart;
cout << "Optimised power kernel with scaling, truncated and reversed frequency: " << powerelapsed.count() / 32.0 << "s" << endl;
// #################################
// ### SCALE KERNEL BENCHMARKING ###
// #################################
std::chrono::time_point<std::chrono::system_clock> scalestart, scaleend;
std::chrono::duration<double> scaleelapsed;
// NOTE: Double-pass scaling factors kernel
scalestart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
GetScaleFactorsDoubleKernel<<<1, 512, 0>>>(thrust::raw_pointer_cast(output.data()), thrust::raw_pointer_cast(means.data()), thrust::raw_pointer_cast(scales.data()), 512);
gpuErrchk(cudaDeviceSynchronize());
}
scaleend = std::chrono::system_clock::now();
scaleelapsed = scaleend - scalestart;
cout << "Double pass scaling factors kernel with division: " << scaleelapsed.count() / 32.0 << "s" << endl;
// NOTE: Unoptimised scaling factors kernel with division
scalestart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
GetScaleFactorsDivKernel<<<1, 512, 0>>>(thrust::raw_pointer_cast(output.data()), thrust::raw_pointer_cast(means.data()), thrust::raw_pointer_cast(scales.data()),
512, 0);
gpuErrchk(cudaDeviceSynchronize());
}
scaleend = std::chrono::system_clock::now();
scaleelapsed = scaleend - scalestart;
cout << "Unoptimised scaling factors kernel with division: " << scaleelapsed.count() / 32.0 << "s" << endl;
// NOTE: Optimised scaling factors kernel
scalestart = std::chrono::system_clock::now();
for (int ii = 0; ii < 32; ++ii) {
GetScaleFactorsKernel<<<1, 512, 0>>>(thrust::raw_pointer_cast(output.data()), thrust::raw_pointer_cast(means.data()), thrust::raw_pointer_cast(scales.data()),
thrust::raw_pointer_cast(factors.data()), 512, 0);
gpuErrchk(cudaDeviceSynchronize());
}
scaleend = std::chrono::system_clock::now();
scaleelapsed = scaleend - scalestart;
cout << "Optimised scaling factors kernel: " << scaleelapsed.count() / 32.0 << "s" << endl;
}
|
10,848 | #include <bitset>
#include <iomanip>
#include <ios>
#include <iostream>
#include <sstream>
#include <string>
std::string get_UUID_as_String(const cudaUUID_t& uuid){
std::stringstream result;
result << "GPU-";
size_t cnt = 0;
for(auto& c: uuid.bytes) {
std::bitset<8> bits(c);
if(cnt == 4 || cnt == 6 || cnt == 8 || cnt == 10) result << "-";
result << std::hex << bits.to_ulong() ;
cnt++;
}
return result.str();
}
void print_device_information(const int deviceId) {
cudaDeviceProp deviceProp{};
cudaGetDeviceProperties(&deviceProp, deviceId);
std::cout << "================ DeviceId: " << deviceId << " ================ \n";
std::cout << "--> General Information: \n"
<< "\tDevice name: " << deviceProp.name << "\n"
<< "\tUUID: " << get_UUID_as_String(deviceProp.uuid) << "\n"
<< "\tIntegrated: " << deviceProp.integrated << "\n"
<< "\tClock rate (kHz): " << deviceProp.clockRate << "\n";
std::cout << "\n--> Computation: \n"
<< "\tComputer capability: " << deviceProp.major << "." << deviceProp.minor << "\n"
<< "\t# of SMs: " << deviceProp.multiProcessorCount << "\n"
<< "\tWarp size: " << deviceProp.warpSize << "\n"
<< "\tmax block dim: (" << deviceProp.maxThreadsDim[0] << ", " << deviceProp.maxThreadsDim[1] << ", "
<< deviceProp.maxThreadsDim[2] << ")\n"
<< "\tmax threads/block: " << deviceProp.maxThreadsPerBlock << "\n"
<< "\tmax threads/SM: " << deviceProp.maxThreadsPerMultiProcessor << "\n"
<< "\tSingle/Double precision ration: " << deviceProp.singleToDoublePrecisionPerfRatio << "\n"
<< "\n";
std::cout << "--> Memory: \n"
<< "\tUnified addressing: " << deviceProp.unifiedAddressing << "\n"
<< "\tSupports managed memory: " << deviceProp.managedMemory << "\n"
<< "\tTotal global memory (Gb): " << std::setprecision(3) << std::fixed
<< static_cast<float>(deviceProp.totalGlobalMem) / (1024. * 1024. * 1024.) << "\n"
<< "\tTotal constant memory (kb): " << deviceProp.totalConstMem / 1024 << "\n"
<< "\tsMem/block (kb): " << deviceProp.sharedMemPerBlock / 1024 << "\n"
<< "\tsMem/SM (kb): " << deviceProp.sharedMemPerMultiprocessor << "\n"
<< "\n";
}
int main() {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
std::cout << "Detected " << deviceCount << " GPU devices.\n";
for (int device = 0; device < deviceCount; ++device) {
print_device_information(device);
}
return 0;
}
|
10,849 | #include "cuda.h"
__global__ void kernel_saxpy( int n, float * Masse, float * PosX, float * PosY, float * PosZ, float * VelX, float * VelY, float * VelZ, float * accX, float * accY, float * accZ ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n ) {
float aX = 0;
float aY = 0;
float aZ = 0;
int j;
for(j=0; j<n; j++){
if(!(i==j)){
float deltaX = (PosX[j]-PosX[i]);
float deltaY = (PosY[j]-PosY[i]);
float deltaZ = (PosZ[j]-PosZ[i]);
float Dij = sqrtf((deltaX*deltaX) + (deltaY*deltaY) + (deltaZ*deltaZ));
if ( Dij < 1.0 ) Dij = 1.0;
float coef = 10 * 1 * (1/(Dij*Dij*Dij)) * Masse[j];
aX += deltaX * coef;
aY += deltaY * coef;
aZ += deltaZ * coef;
}
}
accX[i] = aX;
accY[i] = aY;
accZ[i] = aZ;
}
}
void saxpy( int nblocks, int nthreads, int n, float * deviceSrc1, float * deviceSrc2, float * deviceSrc3, float * deviceSrc4, float * deviceSrc5, float * deviceSrc6, float * deviceSrc7, float * deviceSrc8, float * deviceSrc9, float * deviceSrc10 ) {
kernel_saxpy<<<nblocks, nthreads>>>( n, deviceSrc1, deviceSrc2, deviceSrc3, deviceSrc4, deviceSrc5, deviceSrc6, deviceSrc7, deviceSrc8, deviceSrc9, deviceSrc10 );
}
|
10,850 | #include <stdlib.h>
#include <stdio.h>
#define N 64
#define TPB 32
float scale(int i, int n) {
return ((float)i/(n-1));
}
__device__ float distance(float x1, float x2) {
return sqrt((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float* d_out, float* d_in, float ref) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
int main() {
const float ref = 0.5f;
float* in = NULL;
float* out = NULL;
cudaMallocManaged(&in, N * sizeof(float));
cudaMallocManaged(&out, N * sizeof(float));
for (int i = 0; i < N; ++i) {
in[i] = scale(i, N);
}
int banks = (N+TPB-1)/TPB;
distanceKernel<<<banks, TPB>>>(out, in, ref);
cudaDeviceSynchronize();
cudaFree(in);
cudaFree(out);
return 0;
}
|
10,851 | /*
* Copyright (C) 2002-2021 the Network-Based Computing Laboratory
* (NBCL), The Ohio State University.
*
* Contact: Dr. D. K. Panda (panda@cse.ohio-state.edu)
*
* For detailed copyright and licensing information, please refer to the
* copyright file COPYRIGHT in the top level OMB directory.
*/
__global__ void compute_kernel(float a, float *x, float *y, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
if (i < N) {
for (count = 0; count < (N / 8); count++) {
y[i] = a * x[i] + y[i];
}
}
}
__global__ void touch_managed_kernel(char *buf, size_t len)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
buf[i] = buf[i] + 1;
}
}
__global__ void empty_kernel(char *buf, size_t len) {}
extern "C" void call_kernel(float a, float *d_x, float *d_y, int N,
cudaStream_t *stream)
{
compute_kernel<<<(N + 255) / 256, 256, 0, *stream>>>(a, d_x, d_y, N);
}
extern "C" void call_touch_managed_kernel(char *buf, size_t length,
cudaStream_t *stream)
{
touch_managed_kernel<<<(length + 255) / 256, 256, 0, *stream>>>(buf,
length);
}
extern "C" void call_empty_kernel(char *buf, size_t length,
cudaStream_t *stream)
{
empty_kernel<<<(length + 255) / 256, 256, 0, *stream>>>(buf, length);
}
|
10,852 | #include <stdio.h>
#include <cstdlib>
#include <cmath>
#include <iostream>
#include <sys/time.h>
#define TIME_RESOLUTION 1000000 // time measuring resolution (us)
#define NUM_BLOCKS 128 // this is wrong
#define NUM_THREADS_PER_BLOCK 256 // this is wrong
#define SIZE 2048
#define TILE_SIZE 16
using namespace std;
float m1[SIZE][SIZE], m2[SIZE][SIZE], result[SIZE][SIZE], _result[SIZE][SIZE];
long long unsigned cpu_time;
timeval t;
void startTime (void) {
gettimeofday(&t, NULL);
cpu_time = t.tv_sec * TIME_RESOLUTION + t.tv_usec;
}
void stopTime (void) {
gettimeofday(&t, NULL);
long long unsigned final_time = t.tv_sec * TIME_RESOLUTION + t.tv_usec;
final_time -= cpu_time;
cout << final_time << " us have elapsed for the CPU execution" << endl;
}
void fillMatrices (void) {
for (unsigned i = 0; i < SIZE; ++i) {
for (unsigned j = 0; j < SIZE; ++j) {
result[i][j] = 0.0;
_result[i][j] = 0.0;
m1[i][j] = ((float) rand()) / ((float) RAND_MAX);
m2[i][j] = ((float) rand()) / ((float) RAND_MAX);
}
}
}
void checkCUDAError (const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
cerr << "Cuda error: " << msg << ", " << cudaGetErrorString( err) << endl;
exit(-1);
}
}
// You need to optimize AND parallelize this first
void regularMatrixMult (void) {
for (unsigned i = 0; i < SIZE; ++i) {
for (unsigned j = 0; j < SIZE; ++j) {
result[i][j] = 0;
for (unsigned k = 0; k < SIZE; ++k) {
result[i][j] += m1[i][k] * m2[k][j];
}
}
}
}
void tiledMatrixMult (void) {
for (unsigned m = 0; m < SIZE; m += TILE_SIZE) {
for (unsigned n = 0; n < SIZE; n += TILE_SIZE) {
//...
}
}
}
// Fill the input parameters and kernel qualifier
__global__
void matrixMultKernel (double *dev_m1, double *dev_m2, double *dev_res) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
}
// Fill with the code required for the GPU stencil (mem allocation, transfers, kernel launch of stencilKernel)
double* matrixMultGPU (void) {
// you can either:
// 1 - use 2D matrices, as in CPU
// 2 - use 1D matrices, but YOU have to convert them here
return NULL;
}
int main (int argc, char** argv) {
fillMatrices ();
// GPU stuff
matrixMultGPU ();
// CPU stuff
startTime();
regularMatrixMult ();
stopTime();
return 0;
}
|
10,853 | #include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <ctime>
#include <assert.h>
__global__ void allPrefixSums (long int* A_gpu, long int* arr, int N) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if (id == 0) return;
if (id > N-1) return;
for (int i = 0; i < id; i++) {
A_gpu[id] += arr[i];
}
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << "Incorrect input style, please do ./homework4 N" << std::endl;
return 2;
}
int N = atoi(argv[1]);
long int* arr = new long int[N];
for (int i = 0; i < N; i++) {
arr[i] = rand() % 1000 + 1;
}
long int* A_cpu = new long int[N];
// Sequential Code for all prefix sum
A_cpu[0] = 0;
for (int i = 1; i < N; i++) {
A_cpu[i] += (arr[i-1] + A_cpu[i-1]);
}
long int* deviceA;
cudaMalloc(&deviceA, N * sizeof(long int));
long int* deviceArr;
cudaMalloc(&deviceArr, N*sizeof(long int));
cudaMemcpy(deviceArr, arr, N*sizeof(long int), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(1024, 1, 1);
dim3 numBlocks(N / 1024 + 1, 1, 1);
// Make the parallel call
allPrefixSums<<<numBlocks, threadsPerBlock>>>(deviceA, deviceArr, N);
long int* A_gpu = new long int[N];;
cudaMemcpy(A_gpu, deviceA, N*sizeof(long int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
assert(A_gpu[i] == A_cpu[i]);
}
printf("GPU Output Matches CPU Output\n");
return 0;
}
|
10,854 | /*
* ABCTE.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include "ABCTE.h"
#include <iostream>
ABCTE::ABCTE(GridTE* _grid) : EyLeft((_grid->sizeY-1)*6, 0),
EyRight((_grid->sizeY-1)*6, 0),
ExTop((_grid->sizeX-1)*6, 0),
ExBottom((_grid->sizeX-1)*6, 0),
coeff0(0),
coeff1(0),
coeff2(0),
grid(_grid),
coeffDevice(3, 0)
{
float temp1 = grid->S;
float temp2 = 1.0 / temp1 + 2.0 + temp1;
coeff0 = -(1.0 / temp1 - 2.0 + temp1) / temp2;
coeff1 = -2.0 * (temp1 - 1.0 / temp1) / temp2;
coeff2 = 4.0 * (temp1 + 1.0 / temp1) / temp2;
int sizeX = grid->sizeX;
int sizeY = grid->sizeY;
std::vector<float> coeffHost(3, 0);
coeffHost[0] = coeff0;
coeffHost[1] = coeff1;
coeffHost[2] = coeff2;
coeffDevice = coeffHost;
leftUpdater.setParams(grid->Ey.getDevicePtr(),
EyLeft.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
rightUpdater.setParams(grid->Ey.getDevicePtr(),
EyRight.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
topUpdater.setParams(grid->Ex.getDevicePtr(),
ExTop.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
bottomUpdater.setParams(grid->Ex.getDevicePtr(),
ExBottom.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
std::cout << "Absorption boundary conditions initialized \n";
}
|
10,855 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 8
#define TAMBLOCK 2
#define LIMIT 10
__global__ void jacobi2d(float *A, float *B){
int ind = (((blockIdx.y*blockDim.y+threadIdx.y)+1)*N)+((blockIdx.x*blockDim.x+threadIdx.x)+1);
*(B+ind)=*(A+ind)+*(A+ind+1)+*(A+ind-1)+*(A+ind+N)+*(A+ind-N);
}
int main(){
int memsize=N*N*sizeof(float);
float *A = (float *)malloc(memsize); //Matriz creada
float *B = (float *)malloc(memsize); //2da Matriz, que usaremos para calcular valores.
float *d_a, *d_b;
cudaMalloc(&d_a, memsize);
cudaMalloc(&d_b, memsize);
//Primero vamos a inicializar la matriz A, dandole valores
for(int i=0; i<N; ++i)
for(int j=0;j<N;++j){
if((i==0||i==N)&&(j==0||j==N))
*(A+(i*N)+j)=150.0f;
else
*(A+(i*N)+j)=(float) (rand()%10);
}
cudaMemcpy(d_a, A, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, memsize, cudaMemcpyHostToDevice);
dim3 block((N-2)/TAMBLOCK, (N-2)/TAMBLOCK);
dim3 thread(TAMBLOCK, TAMBLOCK);
//Forma 2 Jacobi, extremos = 150 y solo sumo por el interior.
for(int k=0; k<LIMIT;++k){
jacobi2d <<<block, thread>>> (d_a, d_b);
float *aux=d_a;
d_a=d_b;
d_b=aux;
}
cudaMemcpy(A, d_a, memsize, cudaMemcpyDeviceToHost);
printf("\nValor del array A: \n");
for(int i=0;i<N*N;++i){
printf("%f ,",*(A+i));
if(i!=0 && (i+1)%8==0)
printf("\n");
}
//Este es el valor la matriz que contiene los valores anteriores.
/*printf("\nValor del array B: \n");
for(int i=0;i<ROW*COL;++i){
printf("%f ,",*(B+i));
if(i!=0 && (i+1)%5==0)
printf("\n");
}*/
}
|
10,856 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <string.h>
#include <iostream>
#define BLOCOS 1
//#define THREAD
#define CHECK_ERROR(call) do { \
if( cudaSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
cudaGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
using namespace std;
typedef struct automato {
char letra;
automato *prox;
automato *ant;
automato *inf;
int final;
} Automato;
__global__ void pfac(Automato* at, int *matches, char *frase){
int x = blockDim.x * blockIdx.x + threadIdx.x;
}
Automato* newAutomato(Automato* ant) {
Automato *nv = (Automato*) malloc(sizeof(Automato));
nv->prox = NULL;
nv->inf = NULL;
nv->ant = ant;
return nv;
}
Automato* addAlgarismo(Automato *at, char algm, int first) {
if (at != NULL && at->letra == algm && first == 1) {
return at;
}
// Caso algarismo novo seja diferente do algarismo da raiz
else if (at != NULL && at->letra != algm && first == 1) {
Automato *pt = at->inf;
Automato *ant = pt;
while (pt != NULL) {
if (pt->letra == algm) {
return pt;
}
else {
if (pt != NULL) {
ant = pt;
pt = pt->inf;
}
}
}
Automato *nv = newAutomato(at);
nv->letra = algm;
if (ant != NULL) {
ant->inf = nv;
return ant->inf;
}
else {
at->inf = nv;
return at->inf;
}
}
else if(at != NULL && first == 0)
{
Automato *pt = at->prox;
Automato *ant = NULL;
while (pt != NULL) {
if (pt->letra == algm) {
return pt;
}
else
{
ant = pt;
pt = pt->inf;
}
}
Automato *nv = newAutomato(at);
nv->letra = algm;
if (ant != NULL) {
ant->inf = nv;
}
else {
at->prox = nv;
}
return nv;
}
else
{
Automato *nv = newAutomato(NULL);
nv->letra = algm;
return nv;
}
}
void imprimir(Automato *at)
{
Automato *temp = at;
while (temp != NULL) {
printf("%c ", temp->letra);
imprimir(temp->prox);
temp = temp->inf;
printf("\n");
}
}
/*Automato* mallocGPU(Automato *at)
{
Automato *temp = at;
while (temp != NULL) {
imprimir(temp->prox);
temp = temp->inf;
}
}*/
int main (int argc, char **argv)
{
int GPU = 0;
Automato *at = newAutomato(NULL);
at->letra = 'a';
at->prox = NULL;
char frase[255] = "ab abg bede ef"; //"abc acd abb agd acc";
int THREADS = strlen(frase);
Automato *temp = at;
int i = 0;
int first = 1;
while(frase[i] != '\0')
{
if(frase[i] != ' ')
{
temp = addAlgarismo(temp, frase[i], first);
first = 0;
//printf("Letra: %c\n", temp->letra);
}
else
{
temp->final = 1;
temp = at;
first = 1;
}
i++;
}
imprimir(at);
// CPU
char h_fita[255] = "ab abg bede ef";
int *h_matches = (int*) malloc(sizeof(int));
// GPU
Automato *d_at = NULL;
char *d_fita = NULL;
int *d_matches = NULL;
CHECK_ERROR(cudaSetDevice(GPU));
*h_matches = 0;
//Reset na GPU selecionada
CHECK_ERROR(cudaDeviceReset());
CHECK_ERROR(cudaMalloc((void**) &d_at, sizeof(Automato*)));
CHECK_ERROR(cudaMalloc((void**) &d_fita, 255*sizeof(char)));
CHECK_ERROR(cudaMalloc((void**) &d_matches, sizeof(int)));
//Copiando CPU --> GPU
CHECK_ERROR(cudaMemcpy(d_at, at, sizeof(Automato*), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_fita, h_fita, 255*sizeof(char), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_matches, h_matches, sizeof(int), cudaMemcpyHostToDevice));
pfac <<<BLOCOS, THREADS>>> (d_at, d_matches, d_fita);
//Copiando GPU --> CPU
CHECK_ERROR(cudaMemcpy(at, d_at, sizeof(Automato*), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(h_fita, d_fita, 255*sizeof(char), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(h_matches, d_matches, sizeof(int), cudaMemcpyDeviceToHost));
// Liberando memória na GPU
CHECK_ERROR(cudaFree(d_at));
CHECK_ERROR(cudaFree(d_fita));
CHECK_ERROR(cudaFree(d_matches));
// Liberando memória na CPU
free(at);
free(h_matches);
free(h_fita);
return EXIT_SUCCESS;
}
|
10,857 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SIZE 128*1024*1024
#define BLOCK_SIZE 1024
__global__ void offsetCopy(float *odata, const float *idata, int offset)
{
int xid = blockIdx.x * blockDim.x + threadIdx.x + offset;
odata[xid] = idata[xid];
}
__global__ void strideCopy(float *odata, const float *idata, int stride)
{
int xid = (blockIdx.x * blockDim.x + threadIdx.x) * stride;
odata[xid] = idata[xid];
}
int main()
{
srand((unsigned)time(NULL));
int i;
float* cpu_A = (float*)malloc(sizeof(float)*SIZE);
float* cpu_B = (float*)malloc(sizeof(float)*SIZE);
memset(cpu_B, 0, sizeof(float)*SIZE);
for (i = 0; i < SIZE; ++i){
cpu_A[i] = (float)(rand() / RAND_MAX);
}
float *dev_a;
float *dev_b;
cudaSetDevice(0);
cudaMalloc((void**)&dev_a, SIZE * sizeof(float) * 2);
cudaMalloc((void**)&dev_b, SIZE * sizeof(float) * 2);
cudaMemcpy(dev_a, cpu_A, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for (i = 1; i <= 1024; i *= 2)
{
cudaEventRecord(start, 0);
int blocks = SIZE / BLOCK_SIZE;
int threads = BLOCK_SIZE;
offsetCopy<<<blocks, threads>>>(dev_b, dev_a, i-1);
//strideCopy << <SIZE / BLOCK_SIZE, BLOCK_SIZE >> >(dev_b, dev_a, 0);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU use time: %f (ms), Step: %d \n", elapsedTime, i-1);
}
cudaFree(dev_a);
cudaFree(dev_b);
}
|
10,858 | /* File: vec_add.cu
* Purpose: Implement vector addition on a gpu using cuda
*
* Compile: nvcc [-g] [-G] -o vec_add vec_add.cu
* Run: ./vec_add
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
__global__ void Vec_add(double x[], double y[], double z[], int n) {
int thread_id = threadIdx.x;
if (thread_id < n){
z[thread_id] = x[thread_id] + y[thread_id];
}
}
extern "C" void axpb_cpp_cuda(double h_x[], double h_y[], double h_z[], int n) {
double *d_x, *d_y, *d_z;
size_t size;
/* Define vector length */
size = n*sizeof(double);
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
/* Kernel Call */
Vec_add<<<1,1000>>>(d_x, d_y, d_z, n);
cudaThreadSynchronize();
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
} /* main */
|
10,859 | // #CSCS CUDA Training
//
// #Example 3.1 - transpose matrix
//
// #Author: Ugo Varetto
//
// #Goal: compute the transpose of a matrix and time operation using
// GPU's on-board performance counters through streams; print the result in ms (10^-3 s)
//
// #Rationale: shows how to time GPU computation
//
// #Solution: straightworwad, simply compute the thread id associated with the element
// and copy the transposed data into the output matrix; wrap kernel calls with event
// recording and print time information
//
// #Code: typical flow + timing:
// 1) compute launch grid configuration
// 2) allocate data on host(cpu) and device(gpu)
// 3) initialize data directly on the GPU
// 4) create events
// 5) record start time
// 6) launch kernel
// 7) synchronize events to guarantee that kernel execution is finished
// 8) record stop time
// 9) read data back
// 10) print timing information as stop - start time
// 11) delete events
// 12) free memory
// The code uses the default stream 0; streams are used to sychronize operations
// to guarantee that all operations in the same stream are executed sequentially.
//
// #Compilation: nvcc -arch=sm_13 3_1_transpose-timing.cu -o transpose-timing
//
// #Execution: ./transpose-timing
//
// #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to
// cudaThreadSynchronize() is required to wait for the end of kernel execution from
// a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost)
// kernel execution is guaranteed to be terminated before data are copied
//
// #Note: the code is C++ also because the default compilation mode for CUDA is C++, all functions
// are named with C++ convention and the syntax is checked by default against C++ grammar rules
//
// #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better
//
// #Note: -arch=sm_13 is the lowest architecture version that supports double precision
//
// #Note: the example can be extended to read configuration data and matrix size from the command line
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
typedef float real_t;
__global__ void transpose( const real_t* in, real_t *out, int num_rows, int num_columns ) {
const int col = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int input_index = row * num_columns + col;
const int output_index = col * num_rows + row;
out[ output_index ] = in[ input_index ];
}
__global__ void init_matrix( real_t* in ) {
const int c = threadIdx.x + blockDim.x * blockIdx.x;
const int r = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = c + gridDim.x * blockDim.x * r;
in[ idx ] = (real_t) idx;
}
void print_matrix( const real_t* m, int r, int c, int stride ) {
for( int i = 0; i != r; ++i ) {
for( int j = 0; j != c; ++j ) std::cout << m[ i * stride + j ] << ' ';
std::cout << '\n';
}
std::cout << std::endl;
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const dim3 BLOCKS( 512, 512 );
const dim3 THREADS_PER_BLOCK( 16, 16 );
const int ROWS = 512 * 16; // 8192
const int COLUMNS = 512 * 16; // 8192
const size_t SIZE = ROWS * COLUMNS * sizeof( real_t );
// device storage
real_t* dev_in = 0;
real_t* dev_out = 0;
cudaMalloc( &dev_in, SIZE );
cudaMalloc( &dev_out, SIZE );
// host storage
std::vector< real_t > outmatrix( ROWS * COLUMNS );
// initialize matrix with kernel; much faster than using
// for loops on the cpu
init_matrix<<<dim3( COLUMNS, ROWS ), 1>>>( dev_in );
cudaMemcpy( &outmatrix[ 0 ], dev_in, SIZE, cudaMemcpyDeviceToHost );
// print upper 4x4 left corner of input matrix
std::cout << "INPUT MATRIX - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl;
print_matrix( &outmatrix[ 0 ], 4, 4, COLUMNS );
// create events for timing execution
cudaEvent_t start = cudaEvent_t();
cudaEvent_t stop = cudaEvent_t();
cudaEventCreate( &start );
cudaEventCreate( &stop );
// record time into start event
cudaEventRecord( start, 0 ); // 0 is the default stream id
// execute kernel
transpose<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_in, dev_out, ROWS, COLUMNS );
// issue request to record time into stop event
cudaEventRecord( stop, 0 );
// synchronize stop event to wait for end of kernel execution on stream 0
cudaEventSynchronize( stop );
// compute elapsed time (done by CUDA run-time)
float elapsed = 0.f;
cudaEventElapsedTime( &elapsed, start, stop );
std::cout << "Elapsed time (ms): " << elapsed << std::endl;
// copy output data from device(gpu) to host(cpu)
cudaMemcpy( &outmatrix[ 0 ], dev_out, SIZE, cudaMemcpyDeviceToHost );
// print upper 4x4 corner of transposed matrix
std::cout << "\nOUTPUT MATRIX - " << COLUMNS << " rows, " << ROWS << " columns" << std::endl;
print_matrix( &outmatrix[ 0 ], 4, 4, ROWS );
// free memory
cudaFree( dev_in );
cudaFree( dev_out );
// release events
cudaEventDestroy( start );
cudaEventDestroy( stop );
return 0;
}
|
10,860 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define R 4
#define C 40
/*
* It returns the length of a string pointed by pointer s,
* It acts like the cpu strlen() function
*/
__device__ int gpu_strlen(char * s)
{
int i = 0;
while(s[i] != '\0')
{
i++;
}
return i;
}
/*
* It returns 0 if input character ch is NOT an alphabetical letter
* Otherwise, it returns one.
*/
__device__ int gpu_isAlpha(char ch)
{
//potentially think about using isalpha() function
if(ch == 'a' || ch == 'b' || ch == 'c' || ch == 'd' || ch == 'e' || ch == 'f' || ch == 'g' || ch == 'h' || ch == 'i' || ch == 'j' || ch == 'k' || ch == 'l' || ch == 'm' || ch == 'n' || ch == 'o' || ch == 'p' || ch == 'q' || ch == 'r' || ch == 's' || ch == 't' || ch == 'u' || ch == 'v' || ch == 'w' || ch == 'x' || ch == 'y' || ch == 'z')
return 1;
else if(ch == 'A' || ch == 'B' || ch == 'C' || ch == 'D' || ch == 'E' || ch == 'F' || ch == 'G' || ch == 'H' || ch == 'I' || ch == 'J' || ch == 'K' || ch == 'L' || ch == 'M' || ch == 'N' || ch == 'O' || ch == 'P' || ch == 'Q' || ch == 'R' || ch == 'S' || ch == 'T' || ch == 'U' || ch == 'V' || ch == 'W' || ch == 'X' || ch == 'Y' || ch == 'Z')
return 1;
else
return 0;
}
/* Cuda kernel to count number of words in each line of text pointed by a.
* The output is stored back in 'out' array.
* numLine specifies the num of lines in a, maxLineLen specifies the maximal
* num of characters in one line of text.
*/
__global__ void wordCount( char **a, int **out, int numLine, int maxLineLen )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int currLen = gpu_strlen(a[iy]);
//each thread process one character within a line
if( iy < numLine && ix < currLen && gpu_isAlpha(a[iy][ix]) != 1 )
{
out[iy][ix] += 1;
}
__syncthreads();
if(out[iy][ix] == 1 && ix < currLen)
out[iy][ix + 1] = 0;
/*
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if( row < maxLineLen || col < numLine)
return;
out[iy][ix] = 1;
int rowLen = gpu_strlen(a[row]);
if(col < rowLen)
return;
if(gpu_isAlpha(a[row][col]) == 1) {
out[row][col] = 1;
} else {
out[row][col] = 1;
}
__syncthreads();
if(col != 1 && out[row][col - 1] == 1) {
out[row][col] = 0;
}
*/
}
/* Print out the all lines of text in a on stdout
*/
void printArr( char **a, int lines )
{
int i;
for(i=0; i<lines; i++)
{
printf("%s\n", a[i]);
}
}
int main()
{
int i;
char **d_in, **h_in, **h_out;
int h_count_in[R][C], **h_count_out, **d_count_in;
//allocate
h_in = (char **)malloc(R * sizeof(char *));
h_out = (char **)malloc(R * sizeof(char *));
h_count_out = (int **)malloc(R * sizeof(int *));
cudaMalloc((void ***)&d_in, sizeof(char *) * R);
cudaMalloc((void ***)&d_count_in, sizeof(int *) * R);
//alocate for string data
for(i = 0; i < R; ++i)
{
cudaMalloc((void **) &h_out[i],C * sizeof(char));
h_in[i]=(char *)calloc(C, sizeof(char));//allocate or connect the input data to it
//!!!!!!!!!!!!!!!!!
strcpy(h_in[i], "for you:: he ");
cudaMemcpy(h_out[i], h_in[i], strlen(h_in[i]) + 1, cudaMemcpyHostToDevice);
}
cudaMemcpy(d_in, h_out, sizeof(char *) * R,cudaMemcpyHostToDevice);
//alocate for output occurrence
for(i = 0; i < R; ++i)
{
cudaMalloc((void **) &h_count_out[i], C * sizeof(int));
cudaMemset(h_count_out[i], 0, C * sizeof(int));
}
cudaMemcpy(d_count_in, h_count_out, sizeof(int *) * R,cudaMemcpyHostToDevice);
printArr(h_in, R);
printf("\n\n");
//set up kernel configuartion variables
dim3 grid, block;
block.x = 2;
block.y = 2;
grid.x = ceil((float)C / block.x);
grid.y = ceil((float)R / block.y); //careful must be type cast into float, otherwise, integer division used
//printf("grid.x = %d, grid.y=%d\n", grid.x, grid.y );
//launch kernel
wordCount<<<grid, block>>>( d_in, d_count_in, R, C);
//copy data back from device to host
for(i = 0; i < R; ++i) {
cudaMemcpy(h_count_in[i], h_count_out[i], sizeof(int) * C,cudaMemcpyDeviceToHost);
}
printf("Occurrence array obtained from device:\n");
for(i = 0; i < R; i ++) {
for(int j = 0; j < C; j ++)
printf("%4d", h_count_in[i][j]);
printf("\n");
}
return 0;
}
|
10,861 | #include "includes.h"
__global__ void vecAdd (int *a, int *b, int *c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < N){
c[index] = a[index] + b[index];
}
} |
10,862 | /*
* Compute beta parameters.
*
* The so-called beta parameters were developed in SNO as a measure of event
* isotropy:
*
* The lth beta parameter, beta_l, is defined as the average value of the
* Legendre polynomial, P_l, of the cosine of the angle between each pair
* PMT hits in the event.
*
* beta_l = <P_l(cos(theta_ik)> where i != k
*
* Again, the angle is taken with respect to the fitted vertex position.
* The combination beta_14 = beta_1 + 4 * beta_4 was selected by the SNO
* collaboration for use in signal extraction due to the good separability
* it provides and the ease of parameterisation of the Gaussian-like
* distribution.
*
* - Measurement of the 8B Solar Neutrino Energy Spectrum at the Sudbury
* Neutrino Observatory, Jeanne R. Wilson, p. 179 (Ph.D. Thesis)
*/
#include <map>
#include <string>
#include <cuda.h>
#include <stdio.h>
__global__ void get_coeffs(int2* pairs, float3* hit_pos, float3* fit_pos, float* p1, float* p4, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
int2 pair = pairs[i];
float3 A = hit_pos[pair.x];
float3 B = hit_pos[pair.y];
float3 C = *fit_pos;
float c = sqrtf((A.x-B.x)*(A.x-B.x) + (A.y-B.y)*(A.y-B.y) + (A.z-B.z)*(A.z-B.z));
float a = sqrtf((C.x-B.x)*(C.x-B.x) + (C.y-B.y)*(C.y-B.y) + (C.z-B.z)*(C.z-B.z));
float b = sqrtf((C.x-A.x)*(C.x-A.x) + (C.y-A.y)*(C.y-A.y) + (C.z-A.z)*(C.z-A.z));
// dust off that trig!
p1[i] = (float)(-0.5) * (c*c - a*a - b*b) / (a*b);
// Legendre P4(x) = (1/8) (25x**4 - 30x**2 + 3)
p4[i] = (float)(0.125) * (25*p1[i]*p1[i]*p1[i]*p1[i] - 30*p1[i]*p1[i] + 3);
}
}
float get_beta14(float3 &fit_pos, float3* hit_pos, int nhits) {
// compute list of pmt pairs (this isn't really necessary)
const int npairs = nhits * (nhits - 1) / 2;
int2* pairs = (int2*) malloc(npairs * sizeof(int2));
unsigned pidx = 0;
for (unsigned i=0; i<nhits-1; i++) {
for (unsigned j=i+1; j<nhits; j++) {
pairs[pidx] = make_int2(i, j);
pidx++;
}
}
// allocate device memory and copy over
int2* pairs_device;
float* p1_device;
float* p4_device;
float3* hit_pos_device;
float3* fit_pos_device;
cudaMalloc(&pairs_device, npairs * sizeof(int2));
cudaMalloc(&p1_device, npairs * sizeof(float));
cudaMalloc(&p4_device, npairs * sizeof(float));
cudaMalloc(&hit_pos_device, nhits * sizeof(float3));
cudaMalloc(&fit_pos_device, sizeof(float3));
cudaMemcpy(pairs_device, pairs, npairs * sizeof(int2), cudaMemcpyHostToDevice);
cudaMemcpy(hit_pos_device, hit_pos, nhits * sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpy(fit_pos_device, &fit_pos, sizeof(float3), cudaMemcpyHostToDevice);
// execute kernel and retreive results
int blocksize = 512;
int nblocks = npairs / blocksize + 1;
get_coeffs<<<nblocks, blocksize>>>(pairs_device, hit_pos_device, fit_pos_device, p1_device, p4_device, npairs);
float* p1 = (float*) malloc(npairs * sizeof(float));
float* p4 = (float*) malloc(npairs * sizeof(float));
cudaMemcpy(p1, p1_device, npairs * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(p4, p4_device, npairs * sizeof(float), cudaMemcpyDeviceToHost);
// compute average
float p1ave = 0.0;
float p4ave = 0.0;
for (unsigned i=0; i<npairs; i++) {
p1ave += p1[i];
p4ave += p4[i];
}
p1ave /= npairs;
p4ave /= npairs;
float beta14 = p1ave + 4.0 * p4ave;
free(pairs);
free(p1);
free(p4);
cudaFree(pairs_device);
cudaFree(hit_pos_device);
cudaFree(fit_pos_device);
cudaFree(p1_device);
cudaFree(p4_device);
return beta14;
}
|
10,863 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
__global__ void sum_array_gpu(int *a,int *b,int *c,int size)
{
int gid = blockDim.x * blockDim.y * gridDim.x * blockIdx.y + blockDim.x * blockDim.y * blockIdx.x + blockDim.x * threadIdx.y + threadIdx.x;
if (gid < size)
{
c[gid] = a[gid] + b[gid];
}
//printf("gid : %d, a[gid] : %d, b[gid] : %d, c[gid] : %d\n", gid, a[gid], b[gid], c[gid]);
}
void sum_array_cpu(int *a, int *b, int *c, int size)
{
for(int i=0;i<size;i++){
c[i] = a[i] + b[i];
}
}
bool checkResult(int *a, int *b, int size)
{
for(int i=0;i<size;i++){
if(a[i]!=b[i]){
printf("the %d th current value of a[i] and b[i] is: %d, %d\n",i,a[i],b[i]);
return false;
}
//printf("the current value of a[i] and b[i] are the same\n");
}
return true;
}
int main(int argc, char *argv[])
{
int size = 1000;
int dim_x = 2;
int dim_y = 2;
int block_x = 16;
int block_y = 16;
int byte_size = size * sizeof(int);
int *a_input,*b_input,*c_output,*gpu_output;
a_input = (int*)malloc(byte_size);
b_input = (int*)malloc(byte_size);
c_output = (int*)malloc(byte_size);
gpu_output = (int*)malloc(byte_size);
for(int i=0;i<size;i++)
{
a_input[i] = i;
b_input[i] = i*2;
}
//cpu matrix sum calculation
sum_array_cpu(a_input,b_input,c_output,size);
int * a_gpu_input, * b_gpu_input, *c_gpu_output;
cudaMalloc((void**)&a_gpu_input, byte_size);
cudaMalloc((void**)&b_gpu_input, byte_size);
cudaMalloc((void**)&c_gpu_output, byte_size);
cudaMemcpy(a_gpu_input,a_input,byte_size,cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu_input,b_input,byte_size,cudaMemcpyHostToDevice);
dim3 block(block_x,block_y);
dim3 grid(dim_x,dim_y);
printf("dimension of each block is: %d, %d\n", block.x, block.y);
printf("dimension of grid is: %d, %d\n", grid.x, grid.y);
sum_array_gpu<<<grid,block>>>(a_gpu_input,b_gpu_input,c_gpu_output,size);
cudaDeviceSynchronize();
//memory transfer back to host
cudaMemcpy(gpu_output,c_gpu_output,byte_size,cudaMemcpyDeviceToHost);
bool test = checkResult(c_output,gpu_output,size);
if(test==true){
printf("the result is true\n");
}else{
printf("the result is false\n");
}
cudaFree(a_gpu_input);
cudaFree(b_gpu_input);
cudaFree(c_gpu_output);
free(a_input);
free(b_input);
free(c_output);
cudaDeviceReset();
return 0;
}
|
10,864 | #include "includes.h"
__device__ float activateRandomly(float probability, float random)
{
return random < probability;
}
__global__ void RBMRandomActivationKernel( float *outputPtr, float *randomPtr, int size )
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < size)
{
outputPtr[i] = activateRandomly(outputPtr[i], randomPtr[i]);
}
} |
10,865 | #include <stdbool.h>
#define BLOCK_DIM 32
#define N_BLOCKS (48 * 2)
// bug?? #define N_BLOCKS (48 * 4)
#define N_WORKERS (N_BLOCKS * BLOCK_DIM)
#define N_INIT_DISTRIBUTION (N_WORKERS * 4)
#define N_INPUTS (N_WORKERS * 8)
#define PLAN_LEN_MAX 255
#define BLACKLIST_BITS 10
#define STATE_WIDTH 4
#define STATE_N (STATE_WIDTH * STATE_WIDTH)
typedef unsigned char uchar;
typedef signed char Direction;
#define dir_reverse(dir) ((Direction)(3 - (dir)))
#define DIR_N 4
#define DIR_FIRST 0
#define DIR_UP 0
#define DIR_RIGHT 1
#define DIR_LEFT 2
#define DIR_DOWN 3
/* stack implementation */
__device__ __shared__ static struct dir_stack_tag
{
uchar i, j;
int init_depth;
uchar buf[PLAN_LEN_MAX];
} stack[BLOCK_DIM];
#define STACK (stack[threadIdx.x])
typedef struct search_stat_tag
{
bool solved;
int len;
long long nodes_expanded;
int nodes_pruned;
} search_stat;
typedef struct input_tag
{
uchar tiles[STATE_N];
int init_depth;
Direction parent_dir;
} Input;
__host__ static inline long long
korf_hash(uchar tiles[])
{
long long h = tiles[0];
for (int i = 1; i < STATE_N; ++i)
h += h*3 + tiles[i];
return h;
}
__device__ static inline long long
korf_hash_dev(uchar tiles[])
{
long long h = tiles[0];
for (long long i = 1; i < STATE_N; ++i)
h += h*3 + tiles[i];
return h;
}
typedef struct blacklist_entry_tag
{
uchar tiles[STATE_N];
long long hash;
int g_value;
} BlackListEntry;
__shared__ BlackListEntry blacklist_dev[1 << BLACKLIST_BITS];
__device__ static inline void
stack_init(Input input)
{
STACK.i = 0;
STACK.j = 0;
STACK.init_depth = input.init_depth;
}
__device__ static inline void
stack_put(Direction dir)
{
STACK.buf[STACK.i] = dir;
++STACK.i;
}
__device__ static inline bool
stack_is_empty(void)
{
return STACK.i == STACK.j;
}
__device__ static inline Direction
stack_pop(void)
{
--STACK.i;
return STACK.buf[STACK.i];
}
__device__ static inline Direction
stack_peak(void)
{
return STACK.buf[STACK.i - 1];
}
/* state implementation */
static char assert_state_width_is_four[STATE_WIDTH == 4 ? 1 : -1];
#define POS_X(pos) ((pos) &3)
#define POS_Y(pos) ((pos) >> 2)
/*
* goal: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
__device__ __shared__ static struct state_tag
{
uchar tile[STATE_N];
uchar empty;
uchar h_value; /* ub of h_value is 6*16 */
} state[BLOCK_DIM];
#define STATE_TILE(i) (state[threadIdx.x].tile[(i)])
#define STATE_EMPTY (state[threadIdx.x].empty)
#define STATE_HVALUE (state[threadIdx.x].h_value)
#define distance(i, j) ((i) > (j) ? (i) - (j) : (j) - (i))
#define H_DIFF(opponent, empty, empty_dir) \
h_diff_table_shared[opponent][empty][empty_dir]
__device__ __shared__ static signed char h_diff_table_shared[STATE_N][STATE_N]
[DIR_N];
__device__ static void
state_init_hvalue(void)
{
uchar from_x[STATE_N], from_y[STATE_N];
STATE_HVALUE = 0;
for (int i = 0; i < STATE_N; ++i)
{
from_x[STATE_TILE(i)] = POS_X(i);
from_y[STATE_TILE(i)] = POS_Y(i);
}
for (int i = 1; i < STATE_N; ++i)
{
STATE_HVALUE += distance(from_x[i], POS_X(i));
STATE_HVALUE += distance(from_y[i], POS_Y(i));
}
}
__device__ static void
state_tile_fill(Input input)
{
for (int i = 0; i < STATE_N; ++i)
{
if (input.tiles[i] == 0)
STATE_EMPTY = i;
STATE_TILE(i) = input.tiles[i];
}
}
__device__ static inline bool
state_is_goal(void)
{
return STATE_HVALUE == 0;
}
__device__ static char assert_direction2
[DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1];
__device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N];
__device__ static inline bool
state_movable(Direction dir)
{
return movable_table_shared[STATE_EMPTY][dir];
}
__device__ static char assert_direction
[DIR_UP == 0 && DIR_RIGHT == 1 && DIR_LEFT == 2 && DIR_DOWN == 3 ? 1 : -1];
__device__ __constant__ const static int pos_diff_table[DIR_N] = {
-STATE_WIDTH, 1, -1, +STATE_WIDTH};
__device__ static inline bool
state_move_with_limit(Direction dir, unsigned int f_limit)
{
int new_empty = STATE_EMPTY + pos_diff_table[dir];
int opponent = STATE_TILE(new_empty);
int new_h_value = STATE_HVALUE + H_DIFF(opponent, new_empty, dir);
if (STACK.i + STACK.init_depth + 1 + new_h_value > f_limit)
return false;
STATE_HVALUE = new_h_value;
STATE_TILE(STATE_EMPTY) = opponent;
STATE_EMPTY = new_empty;
return true;
}
__device__ static inline void
state_move(Direction dir)
{
int new_empty = STATE_EMPTY + pos_diff_table[dir];
int opponent = STATE_TILE(new_empty);
STATE_HVALUE += H_DIFF(opponent, new_empty, dir);
STATE_TILE(STATE_EMPTY) = opponent;
STATE_EMPTY = new_empty;
}
__device__ static inline bool
blacklist_query(int *g_value)
{
long long hash = korf_hash_dev(state[threadIdx.x].tile);
BlackListEntry e = blacklist_dev[hash & ((1u<<BLACKLIST_BITS) - 1)];
if (e.hash != hash)
return false;
for (int i = 0; i < STATE_N; ++i)
if (e.tiles[i] != STATE_TILE(i))
return false;
return true;
}
/*
* solver implementation
*/
__device__ static void
idas_internal(int f_limit, Input *input, int *input_ends, search_stat *stat)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int id = tid + bid * blockDim.x;
for (int i = id == 0 ? 0 : input_ends[id - 1];
i < input_ends[id]; ++i)
{
long long nodes_expanded = 0;
int nodes_pruned = 0;
uchar dir = 0;
int blacklist_g;
Input this_input = input[i];
stack_init(this_input);
state_tile_fill(this_input);
state_init_hvalue();
for (;;)
{
if (state_is_goal())
asm("trap;"); /* solution found */
/* if continue search until solution found, just return true */
if (((stack_is_empty() && dir_reverse(dir) != this_input.parent_dir) ||
stack_peak() != dir_reverse(dir)) &&
state_movable(dir))
{
++nodes_expanded;
/* sometimes check idle here */
/* and load balance if needed */
if (state_move_with_limit(dir, f_limit))
{
if (!(blacklist_query(&blacklist_g) &&
blacklist_g <= this_input.init_depth + STACK.i))
{
stack_put(dir);
dir = 0;
continue;
}
else
++nodes_pruned;
}
}
while (++dir == DIR_N)
{
if (stack_is_empty())
goto END_THIS_NODE;
dir = stack_pop();
state_move(dir_reverse(dir));
}
}
END_THIS_NODE:
stat[i].nodes_expanded = nodes_expanded;
stat[i].nodes_pruned = nodes_pruned;
/* if my own works have done, get other's work here */
/* if every work finished, then return*/
}
}
__global__ void
idas_kernel(Input *input, int *input_ends, signed char *plan,
BlackListEntry *blacklist, search_stat *stat,
int f_limit, signed char *h_diff_table, bool *movable_table)
{
int tid = threadIdx.x;
for (int dir = 0; dir < DIR_N; ++dir)
for (int i = tid; i < STATE_N; i += blockDim.x)
if (i < STATE_N)
movable_table_shared[i][dir] = movable_table[i * DIR_N + dir];
for (int i = 0; i < STATE_N * DIR_N; ++i)
for (int j = tid; j < STATE_N; j += blockDim.x)
if (j < STATE_N)
h_diff_table_shared[j][i / DIR_N][i % DIR_N] =
h_diff_table[j * STATE_N * DIR_N + i];
for (int i = tid; i < 1<<BLACKLIST_BITS; i += blockDim.x)
blacklist_dev[i] = blacklist[i];
__syncthreads();
idas_internal(f_limit, input, input_ends, stat);
}
/* host library implementation */
#include <errno.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef UNABLE_LOG
#define elog(...) fprintf(stderr, __VA_ARGS__)
#else
#define elog(...) ;
#endif
void *
palloc(size_t size)
{
void *ptr = malloc(size);
if (!ptr)
elog("malloc failed\n");
return ptr;
}
void *
repalloc(void *old_ptr, size_t new_size)
{
void *ptr = realloc(old_ptr, new_size);
if (!ptr)
elog("realloc failed\n");
return ptr;
}
void
pfree(void *ptr)
{
if (!ptr)
elog("empty ptr\n");
free(ptr);
}
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned char idx_t;
/*
* [0,0] [1,0] [2,0] [3,0]
* [0,1] [1,1] [2,1] [3,1]
* [0,2] [1,2] [2,2] [3,2]
* [0,3] [1,3] [2,3] [3,3]
*/
/*
* goal state is
* [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
*/
typedef struct state_tag_cpu
{
int depth; /* XXX: needed? */
uchar pos[STATE_WIDTH][STATE_WIDTH];
idx_t i, j; /* pos of empty */
Direction parent_dir;
int h_value;
} * State;
#define v(state, i, j) ((state)->pos[i][j])
#define ev(state) (v(state, state->i, state->j))
#define lv(state) (v(state, state->i - 1, state->j))
#define dv(state) (v(state, state->i, state->j + 1))
#define rv(state) (v(state, state->i + 1, state->j))
#define uv(state) (v(state, state->i, state->j - 1))
static uchar from_x[STATE_WIDTH * STATE_WIDTH],
from_y[STATE_WIDTH * STATE_WIDTH];
static inline void
fill_from_xy(State from)
{
for (idx_t x = 0; x < STATE_WIDTH; ++x)
for (idx_t y = 0; y < STATE_WIDTH; ++y)
{
from_x[v(from, x, y)] = x;
from_y[v(from, x, y)] = y;
}
}
static char assert_state_width_is_four2[STATE_WIDTH == 4 ? 1 : -1];
static inline int
heuristic_manhattan_distance(State from)
{
int h_value = 0;
fill_from_xy(from);
for (idx_t i = 1; i < STATE_N; ++i)
{
h_value += distance(from_x[i], i & 3);
h_value += distance(from_y[i], i >> 2);
}
return h_value;
}
bool
state_is_goal(State state)
{
return state->h_value == 0;
}
static inline State
state_alloc(void)
{
return (State) palloc(sizeof(struct state_tag_cpu));
}
static inline void
state_free(State state)
{
pfree(state);
}
State
state_init(uchar v_list[STATE_WIDTH * STATE_WIDTH], int init_depth)
{
State state = state_alloc();
int cnt = 0;
state->depth = init_depth;
state->parent_dir = (Direction) -1;
for (idx_t j = 0; j < STATE_WIDTH; ++j)
for (idx_t i = 0; i < STATE_WIDTH; ++i)
{
if (v_list[cnt] == 0)
{
state->i = i;
state->j = j;
}
v(state, i, j) = v_list[cnt++];
}
state->h_value = heuristic_manhattan_distance(state);
return state;
}
void
state_fini(State state)
{
state_free(state);
}
State
state_copy(State src)
{
State dst = state_alloc();
memcpy(dst, src, sizeof(*src));
return dst;
}
static inline bool
state_left_movable(State state)
{
return state->i != 0;
}
static inline bool
state_down_movable(State state)
{
return state->j != STATE_WIDTH - 1;
}
static inline bool
state_right_movable(State state)
{
return state->i != STATE_WIDTH - 1;
}
static inline bool
state_up_movable(State state)
{
return state->j != 0;
}
bool
state_movable(State state, Direction dir)
{
return (dir != DIR_LEFT || state_left_movable(state)) &&
(dir != DIR_DOWN || state_down_movable(state)) &&
(dir != DIR_RIGHT || state_right_movable(state)) &&
(dir != DIR_UP || state_up_movable(state));
}
#define h_diff(who, from_i, from_j, dir) \
(h_diff_table[((who) << 6) + ((from_j) << 4) + ((from_i) << 2) + (dir)])
static int h_diff_table[STATE_N * STATE_N * DIR_N] = {
1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1,
1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1,
1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1,
1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1,
-1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1,
1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1,
-1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1,
-1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1,
1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1,
-1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1,
1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1,
1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1,
-1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1,
1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1,
-1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1,
1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1,
1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1,
1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1,
1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1,
1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1,
1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1,
1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1,
1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1,
1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1,
1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1,
1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1,
1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1,
1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1,
-1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1,
-1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1,
1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1,
1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1,
-1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1,
1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1,
1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1,
-1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1,
1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1,
-1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1};
void
state_move(State state, Direction dir)
{
idx_t who;
assert(state_movable(state, dir));
switch (dir)
{
case DIR_LEFT:
who = ev(state) = lv(state);
state->i--;
break;
case DIR_DOWN:
who = ev(state) = dv(state);
state->j++;
break;
case DIR_RIGHT:
who = ev(state) = rv(state);
state->i++;
break;
case DIR_UP:
who = ev(state) = uv(state);
state->j--;
break;
default:
elog("unexpected direction");
assert(false);
}
state->h_value =
state->h_value + h_diff(who, state->i, state->j, dir_reverse(dir));
state->parent_dir = dir;
}
bool
state_pos_equal(State s1, State s2)
{
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
if (v(s1, i, j) != v(s2, i, j))
return false;
return true;
}
size_t
state_hash(State state)
{
/* FIXME: for A* */
size_t hash_value = 0;
for (idx_t i = 0; i < STATE_WIDTH; ++i)
for (idx_t j = 0; j < STATE_WIDTH; ++j)
hash_value ^= (v(state, i, j) << ((i * 3 + j) << 2));
return hash_value;
}
int
state_get_hvalue(State state)
{
return state->h_value;
}
int
state_get_depth(State state)
{
return state->depth;
}
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
typedef enum {
HT_SUCCESS = 0,
HT_FAILED_FOUND,
HT_FAILED_NOT_FOUND,
} HTStatus;
/* XXX: hash function for State should be surveyed */
inline static size_t
hashfunc(State key)
{
return state_hash(key);
}
typedef struct ht_entry_tag *HTEntry;
struct ht_entry_tag
{
HTEntry next;
State key;
int value;
};
static HTEntry
ht_entry_init(State key)
{
HTEntry entry = (HTEntry) palloc(sizeof(*entry));
entry->key = state_copy(key);
entry->next = NULL;
return entry;
}
static void
ht_entry_fini(HTEntry entry)
{
pfree(entry);
}
typedef struct ht_tag
{
size_t n_bins;
size_t n_elems;
HTEntry *bin;
} * HT;
static bool
ht_rehash_required(HT ht)
{
return ht->n_bins <= ht->n_elems; /* TODO: local policy is also needed */
}
static size_t
calc_n_bins(size_t required)
{
/* NOTE: n_bins is used for mask and hence it should be pow of 2, fon now */
size_t size = 1;
assert(required > 0);
while (required > size)
size <<= 1;
return size;
}
HT
ht_init(size_t init_size_hint)
{
size_t n_bins = calc_n_bins(init_size_hint);
HT ht = (HT) palloc(sizeof(*ht));
ht->n_bins = n_bins;
ht->n_elems = 0;
assert(sizeof(*ht->bin) <= SIZE_MAX / n_bins);
ht->bin = (HTEntry *) palloc(sizeof(*ht->bin) * n_bins);
memset(ht->bin, 0, sizeof(*ht->bin) * n_bins);
return ht;
}
static void
ht_rehash(HT ht)
{
HTEntry *new_bin;
size_t new_size = ht->n_bins << 1;
assert(ht->n_bins<SIZE_MAX>> 1);
new_bin = (HTEntry *) palloc(sizeof(*new_bin) * new_size);
memset(new_bin, 0, sizeof(*new_bin) * new_size);
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
size_t idx = hashfunc(entry->key) & (new_size - 1);
entry->next = new_bin[idx];
new_bin[idx] = entry;
entry = next;
}
}
pfree(ht->bin);
ht->n_bins = new_size;
ht->bin = new_bin;
}
void
ht_fini(HT ht)
{
for (size_t i = 0; i < ht->n_bins; ++i)
{
HTEntry entry = ht->bin[i];
while (entry)
{
HTEntry next = entry->next;
state_fini(entry->key);
ht_entry_fini(entry);
entry = next;
}
}
pfree(ht->bin);
pfree(ht);
}
HTStatus
ht_insert(HT ht, State key, int **value)
{
size_t i;
HTEntry entry, new_entry;
if (ht_rehash_required(ht))
ht_rehash(ht);
i = hashfunc(key) & (ht->n_bins - 1);
entry = ht->bin[i];
while (entry)
{
if (state_pos_equal(key, entry->key))
{
*value = &entry->value;
return HT_FAILED_FOUND;
}
entry = entry->next;
}
new_entry = ht_entry_init(key);
new_entry->next = ht->bin[i];
ht->bin[i] = new_entry;
*value = &new_entry->value;
assert(ht->n_elems < SIZE_MAX);
ht->n_elems++;
return HT_SUCCESS;
}
/*
* Priority Queue implementation
*/
#include <assert.h>
#include <stdint.h>
typedef struct pq_entry_tag
{
State state;
int f, g;
} PQEntryData;
typedef PQEntryData *PQEntry;
/* tiebreaking is done comparing g value */
static inline bool
pq_entry_higher_priority(PQEntry e1, PQEntry e2)
{
return e1->f < e2->f || (e1->f == e2->f && e1->g >= e2->g);
}
/*
* NOTE:
* This priority queue is implemented doubly reallocated array.
* It will only extend and will not shrink, for now.
* It may be improved by using array of layers of iteratively widened array
*/
typedef struct pq_tag
{
size_t n_elems;
size_t capa;
PQEntryData *array;
} * PQ;
static inline size_t
calc_init_capa(size_t capa_hint)
{
size_t capa = 1;
assert(capa_hint > 0);
while (capa < capa_hint)
capa <<= 1;
return capa - 1;
}
PQ
pq_init(size_t init_capa_hint)
{
PQ pq = (PQ) palloc(sizeof(*pq));
pq->n_elems = 0;
pq->capa = calc_init_capa(init_capa_hint);
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array = (PQEntryData *) palloc(sizeof(PQEntryData) * pq->capa);
return pq;
}
void
pq_fini(PQ pq)
{
for (size_t i = 0; i < pq->n_elems; ++i)
state_fini(pq->array[i].state);
pfree(pq->array);
pfree(pq);
}
static inline bool
pq_is_full(PQ pq)
{
assert(pq->n_elems <= pq->capa);
return pq->n_elems == pq->capa;
}
static inline void
pq_extend(PQ pq)
{
pq->capa = (pq->capa << 1) + 1;
assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData));
pq->array =
(PQEntryData *) repalloc(pq->array, sizeof(PQEntryData) * pq->capa);
}
static inline void
pq_swap_entry(PQ pq, size_t i, size_t j)
{
PQEntryData tmp = pq->array[i];
pq->array[i] = pq->array[j];
pq->array[j] = tmp;
}
static inline size_t
pq_up(size_t i)
{
/* NOTE: By using 1-origin, it may be written more simply, i >> 1 */
return (i - 1) >> 1;
}
static inline size_t
pq_left(size_t i)
{
return (i << 1) + 1;
}
static void
heapify_up(PQ pq)
{
for (size_t i = pq->n_elems; i > 0;)
{
size_t ui = pq_up(i);
assert(i > 0);
if (!pq_entry_higher_priority(&pq->array[i], &pq->array[ui]))
break;
pq_swap_entry(pq, i, ui);
i = ui;
}
}
void
pq_put(PQ pq, State state, int f, int g)
{
if (pq_is_full(pq))
pq_extend(pq);
pq->array[pq->n_elems].state = state_copy(state);
pq->array[pq->n_elems].f = f; /* this may be abundant */
pq->array[pq->n_elems].g = g;
heapify_up(pq);
++pq->n_elems;
}
static void
heapify_down(PQ pq)
{
size_t sentinel = pq->n_elems;
for (size_t i = 0;;)
{
size_t ri, li = pq_left(i);
if (li >= sentinel)
break;
ri = li + 1;
if (ri >= sentinel)
{
if (pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
pq_swap_entry(pq, i, li);
/* Reached the bottom */
break;
}
/* NOTE: If p(ri) == p(li), it may be good to go right
* since the filling order is left-first */
if (pq_entry_higher_priority(&pq->array[li], &pq->array[ri]))
{
if (!pq_entry_higher_priority(&pq->array[li], &pq->array[i]))
break;
pq_swap_entry(pq, i, li);
i = li;
}
else
{
if (!pq_entry_higher_priority(&pq->array[ri], &pq->array[i]))
break;
pq_swap_entry(pq, i, ri);
i = ri;
}
}
}
State
pq_pop(PQ pq)
{
State ret_state;
if (pq->n_elems == 0)
return NULL;
ret_state = pq->array[0].state;
--pq->n_elems;
pq->array[0] = pq->array[pq->n_elems];
heapify_down(pq);
return ret_state;
}
void
pq_dump(PQ pq)
{
elog("%s: n_elems=%zu, capa=%zu\n", __func__, pq->n_elems, pq->capa);
for (size_t i = 0, cr_required = 1; i < pq->n_elems; i++)
{
if (i == cr_required)
{
elog("\n");
cr_required = (cr_required << 1) + 1;
}
elog("%d,", pq->array[i].f);
elog("%d ", pq->array[i].g);
}
elog("\n");
}
static HT closed, BL1, BL2;
bool
distribute_astar(State init_state, Input input[], int input_ends[], int distr_n,
int *cnt_inputs, int *min_fvalue)
{
int cnt = 0;
State state;
PQ q = pq_init(distr_n + 10);
HTStatus ht_status;
int * ht_value;
bool solved = false;
closed = ht_init(10000);
BL1 = ht_init(1000);
BL2 = ht_init(1000);
ht_status = ht_insert(closed, init_state, &ht_value);
*ht_value = 0;
pq_put(q, state_copy(init_state), state_get_hvalue(init_state), 0);
++cnt;
while ((state = pq_pop(q)))
{
--cnt;
if (state_is_goal(state))
{
solved = true;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value <= state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(q, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= distr_n)
break;
}
*cnt_inputs = cnt;
if (!solved)
{
int minf = INT_MAX;
for (int id = 0; id < cnt; ++id)
{
State state = pq_pop(q);
assert(state);
for (int i = 0; i < STATE_N; ++i)
input[id].tiles[i] = state->pos[i%STATE_WIDTH][i/STATE_WIDTH];
input[id].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[id].init_depth = state_get_depth(state);
input[id].parent_dir = state->parent_dir;
if (minf > state_get_depth(state) + state_get_hvalue(state))
minf = state_get_depth(state) + state_get_hvalue(state);
}
*min_fvalue = minf;
printf("distr_n=%d, n_worers=%d, cnt=%d\n", distr_n, N_WORKERS, cnt);
for (int id = 0; id < N_WORKERS; ++id)
input_ends[id] = (distr_n / N_WORKERS) * (id + 1) - 1;
input_ends[N_WORKERS - 1] = cnt;
}
pq_fini(q);
return solved;
}
static int
input_devide(Input input[], search_stat stat[], int i, int devide_n, int tail)
{
int cnt = 0;
int * ht_value;
State state = state_init(input[i].tiles, input[i].init_depth);
state->parent_dir = input[i].parent_dir;
PQ pq = pq_init(32);
HTStatus ht_status = ht_insert(BL1, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value > state_get_depth(state))
*ht_value = state_get_depth(state);
pq_put(pq, state, state_get_hvalue(state), 0);
++cnt;
while ((state = pq_pop(pq)))
{
--cnt;
if (state_is_goal(state))
{
/* It may not be optimal goal */
pq_put(pq, state, state_get_depth(state) + state_get_hvalue(state),
state_get_depth(state));
++cnt;
break;
}
ht_status = ht_insert(closed, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state))
{
state_fini(state);
continue;
}
else
*ht_value = state_get_depth(state);
for (int dir = 0; dir < DIR_N; ++dir)
{
if (state->parent_dir != dir_reverse(dir) &&
state_movable(state, (Direction) dir))
{
State next_state = state_copy(state);
state_move(next_state, (Direction) dir);
next_state->depth++;
ht_status = ht_insert(closed, next_state, &ht_value);
if (ht_status == HT_FAILED_FOUND &&
*ht_value < state_get_depth(next_state))
state_fini(next_state);
else
{
++cnt;
*ht_value = state_get_depth(next_state);
pq_put(pq, next_state,
*ht_value + state_get_hvalue(next_state), *ht_value);
}
}
}
state_fini(state);
if (cnt >= devide_n)
break;
}
for (int id = 0; id < cnt; ++id)
{
int estimation_after_devision = stat[i].nodes_expanded / cnt;
int ofs = id == 0 ? i : tail - 1 + id;
State state = pq_pop(pq);
assert(state);
ht_status = ht_insert(BL2, state, &ht_value);
if (ht_status == HT_FAILED_FOUND && *ht_value > state_get_depth(state))
*ht_value = state_get_depth(state);
for (int j = 0; j < STATE_N; ++j)
input[ofs].tiles[j] = state->pos[j%STATE_WIDTH][j/STATE_WIDTH];
input[ofs].tiles[state->i + (state->j * STATE_WIDTH)] = 0;
input[ofs].init_depth = state_get_depth(state);
input[ofs].parent_dir = state->parent_dir;
stat[ofs].nodes_expanded = estimation_after_devision;
}
pq_fini(pq);
return cnt == 0 ? 0 : cnt - 1;
}
static void
fill_blacklist_internal(BlackListEntry blacklist[], HT bl)
{
for (unsigned int i = 0; i < bl->n_bins; ++i)
{
HTEntry e = bl->bin[i];
while (e)
{
HTEntry next = e->next;
State s = e->key;
uchar til[STATE_N];
for (int j = 0; j < STATE_N; ++j)
til[j] = s->pos[j%STATE_WIDTH][j/STATE_WIDTH];
int hash = korf_hash(til);
BlackListEntry *ble = &blacklist[hash & (1u << BLACKLIST_BITS)];
ble->hash = hash;
for (int p = 0; p < STATE_N; ++p)
ble->tiles[p] = til[p];
ble->g_value = state_get_depth(s);
e = next;
}
}
}
static void
fill_blacklist(BlackListEntry blacklist[])
{
fill_blacklist_internal(blacklist, BL2);
fill_blacklist_internal(blacklist, BL1);
}
/* main */
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#define exit_failure(...) \
do \
{ \
printf(__VA_ARGS__); \
exit(EXIT_FAILURE); \
} while (0)
static int
pop_int_from_str(const char *str, char **end_ptr)
{
long int rv = strtol(str, end_ptr, 0);
errno = 0;
if (errno != 0)
exit_failure("%s: %s cannot be converted into long\n", __func__, str);
else if (end_ptr && str == *end_ptr)
exit_failure("%s: reach end of string", __func__);
if (rv > INT_MAX || rv < INT_MIN)
exit_failure("%s: too big number, %ld\n", __func__, rv);
return (int) rv;
}
#define MAX_LINE_LEN 100
static void
load_state_from_file(const char *fname, uchar *s)
{
FILE *fp;
char str[MAX_LINE_LEN];
char *str_ptr = str, *end_ptr;
fp = fopen(fname, "r");
if (!fp)
exit_failure("%s: %s cannot be opened\n", __func__, fname);
if (!fgets(str, MAX_LINE_LEN, fp))
exit_failure("%s: fgets failed\n", __func__);
for (int i = 0; i < STATE_N; ++i)
{
s[i] = pop_int_from_str(str_ptr, &end_ptr);
str_ptr = end_ptr;
}
fclose(fp);
}
#undef MAX_LINE_LEN
#define CUDA_CHECK(call) \
do \
{ \
const cudaError_t e = call; \
if (e != cudaSuccess) \
exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \
__LINE__, e, cudaGetErrorString(e)); \
} while (0)
#define h_d_t(op, i, dir) \
(h_diff_table[(op) *STATE_N * DIR_N + (i) *DIR_N + (dir)])
__host__ static void
init_mdist(signed char h_diff_table[])
{
for (int opponent = 0; opponent < STATE_N; ++opponent)
{
int goal_x = POS_X(opponent), goal_y = POS_Y(opponent);
for (int i = 0; i < STATE_N; ++i)
{
int from_x = POS_X(i), from_y = POS_Y(i);
for (uchar dir = 0; dir < DIR_N; ++dir)
{
if (dir == DIR_LEFT)
h_d_t(opponent, i, dir) = goal_x > from_x ? -1 : 1;
if (dir == DIR_RIGHT)
h_d_t(opponent, i, dir) = goal_x < from_x ? -1 : 1;
if (dir == DIR_UP)
h_d_t(opponent, i, dir) = goal_y > from_y ? -1 : 1;
if (dir == DIR_DOWN)
h_d_t(opponent, i, dir) = goal_y < from_y ? -1 : 1;
}
}
}
}
#undef h_d_t
#define m_t(i, d) (movable_table[(i) *DIR_N + (d)])
__host__ static void
init_movable_table(bool movable_table[])
{
for (int i = 0; i < STATE_N; ++i)
for (unsigned int d = 0; d < DIR_N; ++d)
{
if (d == DIR_RIGHT)
m_t(i, d) = (POS_X(i) < STATE_WIDTH - 1);
else if (d == DIR_LEFT)
m_t(i, d) = (POS_X(i) > 0);
else if (d == DIR_DOWN)
m_t(i, d) = (POS_Y(i) < STATE_WIDTH - 1);
else if (d == DIR_UP)
m_t(i, d) = (POS_Y(i) > 0);
}
}
#undef m_t
static void
avoid_unused_static_assertions(void)
{
(void) assert_direction[0];
(void) assert_direction2[0];
(void) assert_state_width_is_four[0];
(void) assert_state_width_is_four2[0];
}
static char dir_char[] = {'U', 'R', 'L', 'D'};
int
main(int argc, char *argv[])
{
int cnt_inputs;
int input_size = sizeof(Input) * N_INPUTS;
Input input[N_INPUTS];
Input *d_input;
int input_ends_size = sizeof(int) * N_WORKERS;
int input_ends[N_WORKERS];
int *d_input_ends;
int plan_size = sizeof(signed char) * PLAN_LEN_MAX * N_INPUTS;
signed char plan[PLAN_LEN_MAX * N_INPUTS];
signed char *d_plan;
int stat_size = sizeof(search_stat) * N_INPUTS;
search_stat stat[N_INPUTS];
search_stat *d_stat;
int blacklist_size = sizeof(BlackListEntry) * (1 << BLACKLIST_BITS);
BlackListEntry h_blacklist[N_INPUTS];
BlackListEntry *d_blacklist;
bool movable_table[STATE_N * DIR_N];
bool * d_movable_table;
int movable_table_size = sizeof(bool) * STATE_N * DIR_N;
signed char h_diff_table[STATE_N * STATE_N * DIR_N];
signed char *d_h_diff_table;
int h_diff_table_size = sizeof(signed char) * STATE_N * STATE_N * DIR_N;
int min_fvalue = 0;
if (argc < 2)
{
printf("usage: bin/cumain <ifname>\n");
exit(EXIT_FAILURE);
}
load_state_from_file(argv[1], input[0].tiles);
{
State init_state = state_init(input[0].tiles, 0);
if (distribute_astar(init_state, input, input_ends, N_INIT_DISTRIBUTION,
&cnt_inputs, &min_fvalue))
{
puts("solution is found by distributor");
return 0;
}
}
init_mdist(h_diff_table);
init_movable_table(movable_table);
CUDA_CHECK(cudaMalloc((void **) &d_input, input_size));
CUDA_CHECK(cudaMalloc((void **) &d_input_ends, input_ends_size));
CUDA_CHECK(cudaMalloc((void **) &d_plan, plan_size));
CUDA_CHECK(cudaMalloc((void **) &d_stat, stat_size));
CUDA_CHECK(cudaMalloc((void **) &d_blacklist, blacklist_size));
CUDA_CHECK(cudaMalloc((void **) &d_movable_table, movable_table_size));
CUDA_CHECK(cudaMalloc((void **) &d_h_diff_table, h_diff_table_size));
CUDA_CHECK(cudaMemcpy(d_movable_table, movable_table, movable_table_size,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_h_diff_table, h_diff_table, h_diff_table_size,
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemset(d_input, 0, input_size));
CUDA_CHECK(cudaMemset(d_plan, 0, plan_size));
CUDA_CHECK(cudaMemset(d_stat, 0, stat_size));
CUDA_CHECK(cudaMemset(d_blacklist, 0, blacklist_size));
for (uchar f_limit = min_fvalue;; f_limit += 2)
{
elog("f=%d\n", (int) f_limit);
CUDA_CHECK(
cudaMemcpy(d_input, input, input_size, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_input_ends, input_ends, input_ends_size,
cudaMemcpyHostToDevice));
fill_blacklist(h_blacklist);
CUDA_CHECK(cudaMemcpy(d_blacklist, h_blacklist, blacklist_size,
cudaMemcpyHostToDevice));
elog("BL1: %zu, BL2: %zu\n", BL1->n_elems, BL2->n_elems);
elog("kernel(block=%d, thread=%d)\n", N_BLOCKS, BLOCK_DIM);
idas_kernel<<<N_BLOCKS, BLOCK_DIM>>>(d_input, d_input_ends, d_plan,
d_blacklist, d_stat, f_limit,
d_h_diff_table, d_movable_table);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaMemcpy(plan, d_plan, plan_size, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(stat, d_stat, stat_size, cudaMemcpyDeviceToHost));
for (int i = 0; i < cnt_inputs; ++i)
if (stat[i].solved)
{
elog("core id = %d\n", i);
printf("cpu len=%d: \n", input[i].init_depth);
/* CPU side output */
// FIXME: Not implemented, for now. It is easy to search path
// from init state to this root.
/* GPU side output */
printf("gpu len=%d: ", stat[i].len);
for (int j = 0; j < stat[i].len; ++j)
printf("%c ", dir_char[(int) plan[i * PLAN_LEN_MAX + j]]);
putchar('\n');
goto solution_found;
}
long long int sum_of_expansion = 0;
for (int i = 0; i < cnt_inputs; ++i)
sum_of_expansion += stat[i].nodes_expanded;
long long int sum_of_pruned = 0;
for (int i = 0; i < cnt_inputs; ++i)
sum_of_pruned += stat[i].nodes_pruned;
long long int increased = 0;
long long int avarage_expected_load = sum_of_expansion / N_WORKERS;
int stat_cnt[10] = {0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < cnt_inputs; ++i)
{
if (stat[i].nodes_expanded < avarage_expected_load)
stat_cnt[0]++;
else if (stat[i].nodes_expanded < 2 * avarage_expected_load)
stat_cnt[1]++;
else if (stat[i].nodes_expanded < 4 * avarage_expected_load)
stat_cnt[2]++;
else if (stat[i].nodes_expanded < 8 * avarage_expected_load)
stat_cnt[3]++;
else if (stat[i].nodes_expanded < 16 * avarage_expected_load)
stat_cnt[4]++;
else if (stat[i].nodes_expanded < 32 * avarage_expected_load)
stat_cnt[5]++;
else
stat_cnt[6]++;
int policy =
stat[i].nodes_expanded / (avarage_expected_load + 1) + 1;
if (policy > 1 && stat[i].nodes_expanded > 20)
increased += input_devide(input, stat, i, policy,
cnt_inputs + increased);
}
elog("STAT: sum of expanded nodes: %lld\n", sum_of_expansion);
elog("STAT: avarage expanded nodes: %lld\n", avarage_expected_load);
elog("STAT: av=%d, 2av=%d, 4av=%d, 8av=%d, 16av=%d, 32av=%d, more=%d\n",
stat_cnt[0], stat_cnt[1], stat_cnt[2], stat_cnt[3], stat_cnt[4],
stat_cnt[5], stat_cnt[6]);
elog("STAT: sum of pruned nodes: %lld\n", sum_of_pruned);
if (cnt_inputs + increased > N_INPUTS)
{
elog("cnt_inputs too large");
abort();
}
cnt_inputs += increased;
elog("input count: %d\n", cnt_inputs);
/* NOTE: optionally sort here by expected cost or g/h-value */
int id = 0;
for (int i = 0, load = 0; i < cnt_inputs; ++i)
{
load += stat[i].nodes_expanded;
if (load >= avarage_expected_load)
{
load = 0;
input_ends[id++] = i;
}
}
while (id < N_WORKERS)
input_ends[id++] = cnt_inputs;
}
solution_found:
CUDA_CHECK(cudaFree(d_input));
CUDA_CHECK(cudaFree(d_input_ends));
CUDA_CHECK(cudaFree(d_plan));
CUDA_CHECK(cudaFree(d_stat));
CUDA_CHECK(cudaFree(d_movable_table));
CUDA_CHECK(cudaFree(d_h_diff_table));
CUDA_CHECK(cudaDeviceReset());
avoid_unused_static_assertions();
return 0;
}
|
10,866 | #define N 96
#include <stdio.h>
__global__ void helloFromGPU(){
printf("HelloFrom GPU! %d %d\n", blockIdx.x, threadIdx.x);
}
int main()
{
printf("Hello World from CPU\n");
helloFromGPU<<<3, 32>>>();
cudaDeviceReset();
return 0;
}
|
10,867 | #include <stdio.h>
/*
The "__global__" tag tells nvcc that the function will execute on the device
but will be called from the host. Notice that we must use pointers!
*/
__global__
void add_int( int *a, int *b, int *c){
*c = *a + *b;
printf("blockIdx: %d\n",blockIdx.x);
}
// Main program
int main(void){
//host memory != device memory, must allocate differently
//device pointers point to GPU Memory
//host pointers point to CPU memory
int a, b, c; //host copies
int *dev_a, *dev_b, *dev_c; //device copies
int size = sizeof( int ); //size of an interger
//allocate space on device
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = 2; //storing values in host
b = 7;
// now we need the values to be copied to the device
cudaMemcpy( dev_a, &a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, &b, size, cudaMemcpyHostToDevice );
// launch the add_int kernel on the GPU
add_int<<<3,1>>>(dev_a, dev_b, dev_c);
//now we want the values back on the CPU
cudaMemcpy( &c, dev_c, size, cudaMemcpyDeviceToHost );
printf("C: %d\n",c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// your basic hello world program
printf("Hello, World!\n");
return 0;
}
|
10,868 | // kernel.cu
// Alex Getz
#include <cstddef>
#include <stdexcept>
#include <memory>
#include <cuda_profiler_api.h>
#include <fstream>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstring>
#include <string>
#include <random>
#include <functional>
#include <math.h>
#include <algorithm>
#include <array>
#define MAX_ENTRIES 11897026
#define B_SIZE 2000
#define TPB 128
#define SHMEM_SIZE 1024 * sizeof(int)
///////////////////////////////////////////////////////////////////////////////////////
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if(result!=cudaSuccess){
fprintf(stderr,"CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result==cudaSuccess);
}
#endif
return result;
}
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} /*while(0)*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess){
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
/* /// DEVICE FUNCTIONS /// */
__device__ float getnextrand(curandState *state){
return (float)(curand_uniform(state));
}
__device__ int getnextrandscaled(curandState *state, unsigned long int scale){
return (unsigned long int) scale * getnextrand(state);
}
/* /// DEVICE KERNELS /// */
__global__ void initCurand(curandState *state, unsigned long seed){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(idx+seed, 0, 0, &state[idx]);
}
__global__ void bootstrap(double *output_mean, int *d_sample, curandState *state)
{
__shared__ int partial_Sums[SHMEM_SIZE];
unsigned int idx = threadIdx.x + (blockIdx.x*blockDim.x);
unsigned int tNum = threadIdx.x;
int bNum = blockIdx.x;
int bSize = blockDim.x;
unsigned long int ts = 0;
long long int tSum = 0;
int count = 0;
for(unsigned int i=tNum; i<MAX_ENTRIES; i+=bSize){
ts = getnextrandscaled(&state[idx], MAX_ENTRIES);
tSum += d_sample[ts];
count++;
}
partial_Sums[tNum] = tSum / count;
__syncthreads();
// Perform Sum reduction across all the threads of the block
for(int s=(bSize/2); s>0; s >>= 1){
// Each thread does work unless the index goes off the block
if(tNum<s){
partial_Sums[tNum] += partial_Sums[tNum+s];
}
__syncthreads();
}
// Use first thread of each block to write results back to main mem
if(tNum==0){
output_mean[bNum] = (double)partial_Sums[0] / (double)bSize;
}
}
/* /// HOST GLOBAL FUNCTIONS /// */
void throw_error(cudaError_t err){
if(err != cudaSuccess)
throw std::runtime_error(cudaGetErrorString(err));
}
/* /// HOST GLOBAL STRUCTS & VARIABLES /// */
struct cuda_free_deleter_t{
void operator()(void* ptr) const
{
cudaFree(ptr);
}
};
template <typename T>
auto cudaAllocBuffer(std::size_t size){
void *ptr;
throw_error(cudaMalloc(&ptr, size*sizeof(T)));
return std::unique_ptr<T, cuda_free_deleter_t> { static_cast<T*>(ptr) };
}
int main(){
int *BaseSample, *d_Base;
double *d_mean, *h_mean;
//curandState *devStates;
checkCuda( cudaMallocHost((void**)&BaseSample,MAX_ENTRIES*sizeof(int)));
//checkCuda( cudaMalloc((void**)&devStates,2048*1024*sizeof(curandState)));
std::string line;
uintmax_t m_numLines = 0;
std::ifstream fs("../data/allCountries.txt");
if(!fs){
std::cout<<"ERROR\n";
}else{
while (std::getline(fs, line))
{
int counter=0;
std::stringstream ss;
std::string temp;
// std::cout<<"\n"<<line<<"\n";
ss << line;
std::getline(ss,temp,'\t');
// std::cout<<temp<<", position: "<<++counter<<"\n";
while(std::getline(ss,temp,'\t')){
if(temp.length() == 4){
BaseSample[m_numLines] = std::atoi(temp.c_str());
// std::cout<<temp<<", position: "<<++counter<<"\n";
break;
} else{ ++counter; }
}
m_numLines++;
// if(m_numLines==5){ break; }
}
}
//std::cout << "m_numLines = " << m_numLines << "\nMoving on...\n\n";
fs.close();
//std::cout << "Element 300,000 of BaseSample: " << BaseSample[300000]<<std::endl;
///--- Calculating the Mean ---///
///////////////////////////////////////////////////////////////////////////
std::cout<<"Sample has been generated.\nCalculating the mean...\n";
long long int BaseSum = 0;
for(int i=0;i<MAX_ENTRIES;i++){
BaseSum += BaseSample[i];
}
double BaseMean = (double)BaseSum / (double)MAX_ENTRIES;
std::cout<<"Mean has been calculated! Moving on...\n\n";
///--- KERNEL OPERATIONS ---///
///////////////////////////////////////////////////////////////////////////
checkCuda( cudaMalloc((void**)&d_Base,MAX_ENTRIES*sizeof(int)));
checkCuda( cudaMemcpy(d_Base,BaseSample,MAX_ENTRIES*sizeof(int),cudaMemcpyHostToDevice));
//checkCuda( cudaFreeHost(BaseSample) );
checkCuda( cudaMalloc((void**)&d_mean,2048*sizeof(double)));
checkCuda( cudaMallocHost((void**)&h_mean,2048*sizeof(double)));
std::cout<<"Launching initCurand Kernel now\n\n";
//////////////////////////////////////
//checkCuda( cudaProfilerStart() );
try{
constexpr int block_size = 512;
constexpr int num_blocks = 4096;
auto devStates = cudaAllocBuffer<curandState>(num_blocks * block_size);
initCurand<<<num_blocks, block_size>>>(devStates.get(),1234);
throw_error(cudaPeekAtLastError());
throw_error(cudaDeviceSynchronize());
std::cout<<"Curand Kernel Launch Try block SUCCESSFUL!\n";
std::cout<<"Launching Bootstrap Kernel now\n\n";
bootstrap<<<2048,1024>>>(d_mean,d_Base,devStates.get());
throw_error(cudaPeekAtLastError());
throw_error(cudaDeviceSynchronize());
std::cout<<"Bootstrap Kernel Launch Try Block SUCCESSFUL!\n";
}
catch (const std::exception& e)
{
std::cerr << "Error: " << e.what() << '\n';
return -1;
}
catch (...)
{
std::cerr << "Unknown Exception";
return -1;
}
std::cout<<"Kernels appear complete, attempting to copy data back to Host\n";
checkCuda( cudaMemcpy(h_mean,d_mean,2048*sizeof(double),cudaMemcpyDeviceToHost) );
/* This loop is meant for testing the validity of the memcpy output
for(int i=0;i<2048;++i){
std::cout<<"element "<<i<<" : "<<h_mean[i]<<std::endl;
}
*/
// Standard Error of the bootstrap means
int n = 2048;
double SumOfMeans=0;
for(int i=0;i<n;i++){
SumOfMeans+=h_mean[i];
}
double MeanOfMeans = SumOfMeans / (double)n;
std::cout<<"MeanofMeans: "<<MeanOfMeans<<"\n";
double SqrDiff=0;
for(int i=0;i<n;i++){
SqrDiff += (h_mean[i]-MeanOfMeans) * (h_mean[i]-MeanOfMeans);
}
double SqrdVariance = SqrDiff / (n-1);
double BootError = ((n-1)/(n*n)) * SqrdVariance;
std::cout<<"SqrDiff, SqrdVariance, & BootError: "<<SqrDiff<<", "<<SqrdVariance<<", "<<BootError<<"\n";
//std::sort(h_mean,h_mean+2048);
std::cout<<"\nStandard Error is: "<<BootError<<"\n\n\n";
double C_Arr[2048] = {};
for(int i=0;i<n;i++){
C_Arr[i]=h_mean[i]-BaseMean;
}
std::sort(std::begin(C_Arr),std::end(C_Arr));
double L = 2048.0 * 0.1;
int Lower = (int)L;
double H = 2048 * 0.9;
int Higher = (int)H;
int LowerBound = C_Arr[Lower];
int UpperBound = C_Arr[Higher];
double Left = BaseMean - (double)LowerBound;
double Right = BaseMean - (double)UpperBound;
std::cout<<"\n\n\n------------------------------------------------\n";
std::cout<<"The Confidence Interval is: 80%\n";
std::cout<<"The Standard Error is: "<<BootError<<"\n";
std::cout<<"This is on the 10th & 90th percentiles: ["<<Left<<", "<<Right<<"]\n";
checkCuda( cudaFree(d_Base) );
checkCuda( cudaFree(d_mean) );
//checkCuda( cudaFree(devStates) );
checkCuda( cudaFreeHost(BaseSample) );
checkCuda( cudaFreeHost(h_mean) );
printf("\n\n\nDONE\n");
return 0;
}
//////////////////// Calculate Statistics. Soon to be offloaded
// int finalMean[400]={0};
// // std::vector<int> *meanVector;
// int bnum = 0;
// int sum1;
// int sum2 = 0;
// // int temp1;
// for (int a=0;a<400;++a){
// sum1=0;
// for(int b=0;b<100;++b){
// sum1+=h_mean[b+(100*bnum)];
// }
// finalMean[a]=sum1/100;
// // temp1 = sum1/100;
// // meanVector[a].push_back( temp1 );
// sum2 += std::pow( (finalMean[a]-meanOriginal), 2 );
// bnum++;
// // std::cout<<"Final Mean "<<a<<" : "<<finalMean[a]<<std::endl;
// }
// printf("\n\n\n");
// std::sort(finalMean,finalMean+SAMPLE_SIZE);
// std::cout<<"sum2 is "<<sum2<<std::endl;
// int div = 400;
// std::cout<<"div is "<<div<<std::endl;
// float stdDeviation = sqrt( (sum2/div) );
// std::cout<<"Standard Deviation is "<<stdDeviation<<std::endl;
// float stdErrorFactor = ( 100.0 / (100.0-1.0) );
// std::cout<<"The Error Factor is "<<stdErrorFactor<<std::endl;
// float stdError = sqrt( stdErrorFactor ) * stdDeviation;
// std::cout<<"Standard Error is "<<stdError<<std::endl;
// int tempA; int tempB;
// float lowerCI = 400 * ( 0.05/2 );
// tempA = finalMean[(int)lowerCI];
// std::cout<<"Lower (5%) Confidence Interval is "<<tempA<<std::endl;
// float higherCI = 400 * ( 1 - (0.05/2) );
// tempB = finalMean[(int)higherCI];
// std::cout<<"Higher (95%) Confidence Interval is "<<tempB<<std::endl;
|
10,869 | /*
*To find sum of two large arrays
*/
#include <stdio.h>
const long long ARRAY_SIZE = 320000;
const long long ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//kernal
__global__ void sum(float *d_sum, float *d_a, float *d_b) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
d_sum[id] = d_a[id] + d_b[id];
}
//to check the final result
int checkSum(float *h_a, float *h_b, float *h_sum) {
int flag = 1;
for(int i = 0; i < ARRAY_SIZE; i++) {
if(h_sum[i] != h_a[i] + h_b[i]) {
flag = 0;
break;
}
}
return flag;
}
int main() {
float h_a[ARRAY_SIZE], h_b[ARRAY_SIZE], h_sum[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
h_a[i] = rand();
h_b[i] = rand();
}
float *d_a, *d_b, *d_sum;
cudaMalloc((void**) &d_a, ARRAY_BYTES);
cudaMalloc((void**) &d_b, ARRAY_BYTES);
cudaMalloc((void**) &d_sum, ARRAY_BYTES);
cudaMemcpy(d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, ARRAY_BYTES, cudaMemcpyHostToDevice);
sum<<<(int)ceil(ARRAY_SIZE/32.0), 32>>>(d_sum, d_a, d_b);
cudaMemcpy(h_sum, d_sum, ARRAY_BYTES, cudaMemcpyDeviceToHost);
if(checkSum(h_a, h_b, h_sum)) {
printf("The result is computed correctly!");
}
else {
printf("The result is not computed correctly!");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_sum);
return 0;
}
|
10,870 | #include "includes.h"
__global__ void ApplyThreshold( float* probabilitiesInputs, float* binaryOutput, float* probability, int count ) {
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < count)
{
if (probabilitiesInputs[id] < probability[0])
{
binaryOutput[id] = 0.0f;
}
else
{
binaryOutput[id] = 1.0f;
}
}
} |
10,871 | #include "includes.h"
__global__ void compute_d_w_kernel(float *v, float *h, float *dw, bool is_init, int input_size, int lu_padding, int channel_num, int filter_num, int filter_size, int feature_map_size){
int imgIdx = blockIdx.y / (feature_map_size / 32);
int filterIdx = blockIdx.x / (channel_num * feature_map_size / 32);
int channelIdx = (blockIdx.x % (channel_num * feature_map_size / 32)) /
(feature_map_size / 32);
int tx = (blockIdx.x % (channel_num * feature_map_size / 32)) %
(feature_map_size / 32) *32 + threadIdx.x;
int ty = (blockIdx.y % (feature_map_size / 32)) * 32 + threadIdx.y;
__shared__ float shV[32+MAX_FILETER_SIZE][32+MAX_FILETER_SIZE];
__shared__ float shH[32][32];
float sign;
if(is_init){
sign = 1.0f;
}else{
sign = -1.0f;
}
v = v + imgIdx * channel_num * input_size * input_size +
channelIdx * input_size * input_size;
h = h + imgIdx * filter_num * feature_map_size * feature_map_size +
filterIdx * feature_map_size * feature_map_size;
dw = dw + filterIdx * channel_num * filter_size * filter_size +
channelIdx * filter_size * filter_size;
float local_dw = 0.0f;
for(int loadX = 0; loadX <= 32; loadX += filter_size){
for(int loadY = 0; loadY <= 32; loadY += filter_size){
if(loadX < 32 && loadY < 32){
//TODO:feature map overflow
shH[threadIdx.y+loadY][threadIdx.x+loadX] =
h[(ty+loadY)*feature_map_size + (tx+loadX)];
}
if((tx+loadX) < lu_padding ||
(ty+loadY) < lu_padding ||
(tx+loadX) >= (input_size+lu_padding) ||
(ty+loadY) >= (input_size+lu_padding)){
shV[threadIdx.y+loadY][threadIdx.x+loadX] = 0;
}else{
shV[threadIdx.y+loadY][threadIdx.x+loadX] =
v[(ty+loadY-lu_padding)*input_size + (tx+loadX-lu_padding)];
}
}
}
__syncthreads();
for(int i = 0; i < 32; i++){
for(int j = 0; j < 32; j++){
local_dw += shV[threadIdx.y+i][threadIdx.x+j] *
shH[i][j];
}
}
atomicAdd(dw + threadIdx.y*filter_size + threadIdx.x, sign * local_dw);
} |
10,872 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define NUM_BLOCKS 32
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
int main(int argc, char **argv)
{
// launch the kernel
hello <<<NUM_BLOCKS, BLOCK_WIDTH >>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all!\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaDeviceReset();
getchar();
return 0;
} |
10,873 | #include "includes.h"
__global__ void convolution1d_tiles_constant_kernel(int *In, int *Out){
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; // Index 1d iterator.
__shared__ int Tile[TILE_SIZE + Mask_size - 1];
int n = Mask_size/2;
int halo_left_index = (blockIdx.x - 1 ) * blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - n ){
Tile[threadIdx.x - (blockDim.x - n )] = (halo_left_index < 0) ? 0 : In[halo_left_index];
}
if(index<N_elements){Tile[n + threadIdx.x] = In[index];
}else{Tile[n + threadIdx.x] = 0;}
int halo_right_index = (blockIdx.x + 1 ) * blockDim.x + threadIdx.x;
if (threadIdx.x < n) {
Tile[n + blockDim.x + threadIdx.x]= (halo_right_index >= N_elements) ? 0 : In[halo_right_index];
}
__syncthreads();
int Value = 0;
for (unsigned int j = 0; j < Mask_size; j ++) {
Value += Tile[threadIdx.x + j] * Global_Mask[j];
}
Out[index] = Value;
} |
10,874 | #pragma warning (disable : 4267)
#pragma warning (disable : 4244)
#include <thrust/random.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/random/normal_distribution.h>
#include <thrust/device_ptr.h>
#include <curand.h>
#include <cuda_runtime.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include "Example_MC_BS.cuh"
const unsigned int DEFAULT_RAND_N = 10000000;
const unsigned int DEFAULT_SEED = 1;
struct estimate_BS3 : public thrust::unary_function<float, float>
{
__host__ __device__
float operator()(float W)
{
float S0 = 20.0f;
float sig = 0.28f;
float r = 0.045f;
float K = 21.0f;
float T = 0.5f;
float sqrtT = sqrtf(T);
float sig2 = sig*sig;
float ST = S0 * expf((r - 0.5f*sig2)*T + sig*sqrtT*W);
float ST_at = S0 * expf((r - 0.5f*sig2)*T - sig*sqrtT*W);
return expf(-r*T)*(((ST-K > 0.0f)? ST-K:0.0f) +
((ST_at-K > 0.0f)? ST_at-K:0.0f))/2.0f;
}
};
void exmpl_thrust_MC_BS2()
{
unsigned int M = 200;
unsigned int rand_n = DEFAULT_RAND_N;
unsigned int seed = DEFAULT_SEED;
curandGenerator_t prngGPU;
curandCreateGenerator(&prngGPU, CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(prngGPU, seed);
float estimate = 0.0f;
float *d_rand;
cudaMalloc((void **)&d_rand, rand_n * sizeof(float));
thrust::device_ptr<float> d_rand_b = thrust::device_pointer_cast(d_rand);
thrust::device_ptr<float> d_rand_e = d_rand_b + rand_n;
for (unsigned int i = 0; i < M; ++i)
{
curandGenerateNormal(prngGPU, (float *) d_rand, rand_n, 0.0f, 1.0f);
estimate += thrust::transform_reduce( d_rand_b,
d_rand_e,
estimate_BS3(),
0.0f,
thrust::plus<float>());
}
estimate /= (rand_n*M);
std::cout << std::setprecision(10);
std::cout << "Option price is approximately " << estimate << std::endl;
curandDestroyGenerator(prngGPU);
cudaFree(d_rand);
cudaDeviceReset();
}; |
10,875 | #include <cuda.h>
#include <stdio.h>
void printMatrix(float *matrix, int rows, int columns)
{
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++)
printf("%g ", matrix[i * rows + j]);
printf("\n");
}
printf("\n");
}
#define CUDA_CHECK_RETURN(value)\
{\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
}
__global__ void initMatrix_1D(float *matrix)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
matrix[i] = i;
}
__global__ void initMatrix_2D_I(float *matrix)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int I = gridDim.x * blockDim.x;
// int J = gridDim.y * blockDim.y;
// matrix[j * I + i] = j * I + i;
// matrix[i + j * I] = i;
// matrix[i + j * I] = j;
// matrix[i + j * I] = I;
// matrix[i + j * I] = J;
matrix[i + j * I] = threadIdx.x;
// matrix[i + j * I] = threadIdx.y;
// matrix[i + j * I] = gridDim.x;
// matrix[i + j * I] = gridDim.y;
// matrix[i + j * I] = blockDim.x;
// matrix[i + j * I] = blockDim.y;
// matrix[i + j * I] = blockIdx.x;
// matrix[i + j * I] = blockIdx.y;
}
__global__ void initMatrix_2D_J(float *matrix)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
// int I = gridDim.x * blockDim.x;
int J = gridDim.y * blockDim.y;
matrix[j + i * J] = j + i * J;
}
__global__ void transp(float *matrix1, float *matrix2, float *check)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int I = gridDim.x * blockDim.x;
matrix2[i * I + j] = matrix1[j * I + i];
check[i * I + j] = j * I + i;
}
int main(int argc, char *argv[])
{
int blocks = (argc > 1) ? atoi(argv[1]) : 8;
int threads = (argc > 2) ? atoi(argv[2]) : 4;
int rows = 8;
int columns = 8;
int size_matrix = rows * columns;
float *dmatrix1, *hmatrix1;
float *dmatrix2, *hmatrix2;
float *dcheck, *hcheck;
float *dmatrix3, *hmatrix3;
cudaMalloc((void**) &dmatrix1, size_matrix * sizeof(float));
cudaMalloc((void**) &dmatrix2, size_matrix * sizeof(float));
cudaMalloc((void**) &dcheck, size_matrix * sizeof(float));
cudaMalloc((void**) &dmatrix3, size_matrix * sizeof(float));
hmatrix1 = (float*) calloc(size_matrix, sizeof(float));
hmatrix2 = (float*) calloc(size_matrix, sizeof(float));
hcheck = (float*) calloc(size_matrix, sizeof(float));
hmatrix3 = (float*) calloc(size_matrix, sizeof(float));
initMatrix_2D_I<<<dim3(2, 2), dim3(8)>>>(dmatrix3);
cudaDeviceSynchronize();
cudaMemcpy(hmatrix3, dmatrix3, size_matrix * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(hmatrix3, rows, columns);
// initMatrix_1D<<<dim3(blocks), dim3(threads)>>>(dmatrix1);
initMatrix_2D_I<<<dim3(blocks), dim3(2, 2)>>>(dmatrix1);
cudaDeviceSynchronize();
cudaMemcpy(hmatrix1, dmatrix1, size_matrix * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(hmatrix1, rows, columns);
#if 0
initMatrix_2D_J<<<dim3(blocks), dim3(2, 2)>>>(dmatrix2);
cudaDeviceSynchronize();
cudaMemcpy(hmatrix2, dmatrix2, size_matrix * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(hmatrix2, rows, columns);
#endif
transp<<<dim3(blocks), dim3(2, 2)>>>(dmatrix1, dmatrix2, dcheck);
cudaDeviceSynchronize();
cudaMemcpy(hmatrix2, dmatrix2, size_matrix * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(hmatrix2, rows, columns);
cudaMemcpy(hcheck, dcheck, size_matrix * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(hcheck, rows, columns);
cudaFree(dmatrix1);
cudaFree(dmatrix2);
cudaFree(dcheck);
free(hmatrix1);
free(hmatrix2);
free(hcheck);
return 0;
}
|
10,876 | extern "C"
{
__global__ void DYbinaryentropyXsigmoidY_32(const int lengthX, const float *x, const float *y, const float *t, float *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[0]*(1.0/(1.0+exp(-y[i]))-x[i])/lengthX;
}
}
} |
10,877 | #include <stdio.h>
__global__ void helloWorld() {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("Hello World! My ThreadId is %2d\n", i);
}
int main() {
helloWorld<<<1, 256>>>();
cudaDeviceSynchronize();
return 0;
}
|
10,878 | #include "includes.h"
__global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
// g_idata[idx] = g_idata[idx] + g_idata[idx + blockDim.x] + g_idata[idx + 2*blockDim.x] + g_idata[idx + 3*blockDim.x];
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
10,879 | #define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_sxx(z,x) d_sxx[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)]
#define d_sxz(z,x) d_sxz[(x)*(nz)+(z)]
#define d_mem_dszz_dz(z,x) d_mem_dszz_dz[(x)*(nz)+(z)]
#define d_mem_dsxz_dx(z,x) d_mem_dsxz_dx[(x)*(nz)+(z)]
#define d_mem_dsxz_dz(z,x) d_mem_dsxz_dz[(x)*(nz)+(z)]
#define d_mem_dsxx_dx(z,x) d_mem_dsxx_dx[(x)*(nz)+(z)]
#define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)]
#define d_mem_dvz_dx(z,x) d_mem_dvz_dx[(x)*(nz)+(z)]
#define d_mem_dvx_dz(z,x) d_mem_dvx_dz[(x)*(nz)+(z)]
#define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_Mu(z,x) d_Mu[(x)*(nz)+(z)]
#define d_ave_Mu(z,x) d_ave_Mu[(x)*(nz)+(z)]
#define d_ave_Byc_a(z,x) d_ave_Byc_a[(x)*(nz)+(z)]
#define d_ave_Byc_b(z,x) d_ave_Byc_b[(x)*(nz)+(z)]
#include<stdio.h>
__global__ void el_velocity_adj(
float *d_vz, float *d_vx, float *d_szz, float *d_sxx, float *d_sxz, \
float *d_mem_dszz_dz, float *d_mem_dsxz_dx, float *d_mem_dsxz_dz, float *d_mem_dsxx_dx, \
float *d_mem_dvz_dz, float *d_mem_dvz_dx, float *d_mem_dvx_dz, float *d_mem_dvx_dx, \
float *d_Lambda, float *d_Mu, float *d_ave_Mu, float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b, \
float *d_K_z_half, float *d_a_z_half, float *d_b_z_half, \
float *d_K_x_half, float *d_a_x_half, float *d_b_x_half, \
float *d_K_z, float *d_a_z, float *d_b_z, \
float *d_K_x, float *d_a_x, float *d_b_x, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
float dpsixx_dx = 0.0;
float dszz_dx = 0.0;
float dsxx_dx = 0.0;
float dpsixz_dz = 0.0;
float dsxz_dz = 0.0;
float dpsizz_dz = 0.0;
float dszz_dz = 0.0;
float dsxx_dz = 0.0;
float dpsizx_dx = 0.0;
float dsxz_dx = 0.0;
float c1 = 9.0/8.0;
float c2 = 1.0/24.0;
float lambda = d_Lambda(gidz,gidx);
float mu = d_Mu(gidz,gidx);
if(gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) {
// update vx
dpsixx_dx = (-c1*(d_mem_dvx_dx(gidz,gidx+1)-d_mem_dvx_dx(gidz,gidx)) \
+ c2*(d_mem_dvx_dx(gidz,gidx+2)-d_mem_dvx_dx(gidz,gidx-1)))/dx;
dszz_dx = (-c1*(d_szz(gidz,gidx+1)-d_szz(gidz,gidx)) + c2*(d_szz(gidz,gidx+2)-d_szz(gidz,gidx-1)))/dx;
dsxx_dx = (-c1*(d_sxx(gidz,gidx+1)-d_sxx(gidz,gidx)) + c2*(d_sxx(gidz,gidx+2)-d_sxx(gidz,gidx-1)))/dx;
dpsixz_dz = (-c1*(d_mem_dvx_dz(gidz,gidx)-d_mem_dvx_dz(gidz-1,gidx)) \
+ c2*(d_mem_dvx_dz(gidz+1,gidx)-d_mem_dvx_dz(gidz-2,gidx)))/dz;
dsxz_dz = (-c1*(d_sxz(gidz,gidx)-d_sxz(gidz-1,gidx)) + c2*(d_sxz(gidz+1,gidx)-d_sxz(gidz-2,gidx)))/dz;
d_vx(gidz, gidx) += (d_a_x[gidx]*dpsixx_dx + lambda*dszz_dx/d_K_x[gidx]*dt \
+ (lambda+2.0*mu)*dsxx_dx/d_K_x[gidx]*dt + d_a_z_half[gidz]*dpsixz_dz \
+ d_ave_Mu(gidz,gidx)/d_K_z_half[gidz]*dsxz_dz*dt);
//update phi_xx_x and phi_xz_z
if(gidx<nPml || gidx>nx-nPml-1){
d_mem_dsxx_dx(gidz, gidx) = d_b_x_half[gidx]*d_mem_dsxx_dx(gidz, gidx) + d_ave_Byc_b(gidz, gidx)*d_vx(gidz, gidx)*dt;
}
if(gidz<nPml || (gidz>nz-nPml-nPad-1)){
d_mem_dsxz_dz(gidz, gidx) = d_b_z[gidz]*d_mem_dsxz_dz(gidz, gidx) + d_ave_Byc_b(gidz, gidx)*d_vx(gidz, gidx)*dt;
}
// update vz
dpsizz_dz = (-c1*(d_mem_dvz_dz(gidz+1,gidx)-d_mem_dvz_dz(gidz,gidx)) \
+ c2*(d_mem_dvz_dz(gidz+2,gidx)-d_mem_dvz_dz(gidz-1,gidx)))/dz;
dszz_dz = (-c1*(d_szz(gidz+1,gidx)-d_szz(gidz,gidx)) + c2*(d_szz(gidz+2,gidx)-d_szz(gidz-1,gidx)))/dz;
dsxx_dz = (-c1*(d_sxx(gidz+1,gidx)-d_sxx(gidz,gidx)) + c2*(d_sxx(gidz+2,gidx)-d_sxx(gidz-1,gidx)))/dz;
dpsizx_dx = (-c1*(d_mem_dvz_dx(gidz,gidx)-d_mem_dvz_dx(gidz,gidx-1)) \
+ c2*(d_mem_dvz_dx(gidz,gidx+1)-d_mem_dvz_dx(gidz,gidx-2)))/dx;
dsxz_dx = (-c1*(d_sxz(gidz,gidx)-d_sxz(gidz,gidx-1)) + c2*(d_sxz(gidz,gidx+1)-d_sxz(gidz,gidx-2)))/dx;
d_vz(gidz, gidx) += (d_a_z[gidz]*dpsizz_dz + (lambda+2.0*mu)*dszz_dz/d_K_z[gidz]*dt \
+ lambda*dsxx_dz/d_K_z[gidz]*dt + d_a_x_half[gidx]*dpsizx_dx \
+ d_ave_Mu(gidz,gidx)/d_K_x_half[gidx]*dsxz_dx*dt);
// update phi_xz_x and phi_zz_z
if(gidx<nPml || gidx>nx-nPml-1){
d_mem_dsxz_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dsxz_dx(gidz, gidx) + d_ave_Byc_a(gidz, gidx)*d_vz(gidz, gidx)*dt;
}
if(gidz<nPml || (gidz>nz-nPml-nPad-1)){
d_mem_dszz_dz(gidz, gidx) = d_b_z_half[gidz]*d_mem_dszz_dz(gidz, gidx) + d_ave_Byc_a(gidz, gidx)*d_vz(gidz, gidx)*dt;
}
}
else {
return;
}
} |
10,880 | #include <stdio.h>
#include "orbit_integrator_cuda.cu"
#define N 512 * 512 * 64
float x[N];
float *x_device;
cudaError_t err;
int main(int argc, char** argv) {
for(int i = 0; i < N; i++) {
x[i] = i;
}
printf("x[%d] = %f\n", N-1, x[N-1]);
if( argc > 1) {
cudaMalloc((void**) &x_device, sizeof(float)*N);
err = cudaGetLastError ();
printf("malloc: %s\n", cudaGetErrorString(err));
cudaMemcpy(x_device, x, sizeof(float)*N, cudaMemcpyHostToDevice);
err = cudaGetLastError ();
printf("copy: %s\n", cudaGetErrorString(err));
dim3 dimBlock(512,64);
square<<<dimBlock, 512>>>(x_device);
err = cudaGetLastError ();
printf("call: %s\n", cudaGetErrorString(err));
cudaMemcpy(x, x_device, sizeof(float)*N, cudaMemcpyDeviceToHost);
err = cudaGetLastError ();
printf("cpy: %s\n", cudaGetErrorString(err));
} else {
for(int i = 0; i < N; i++) {
for(int j = 0; j < M; j++)
x[i] = x[i] + M;
}
}
for(int i = 0; i < 10; i++) {
printf("x[%d] = %f\n", i, x[i]);
}
printf("x[%d] = %f\n", N-1, x[N-1]);
return 0;
}
|
10,881 | #include "includes.h"
__global__ void insertArray ( const int n, const int indx, const float *ss, float *zz ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
zz[indx+i] = ss[i];
}
} |
10,882 | #include "AntGPU.cuh"
#include <stdio.h>
__device__
AntGPU::AntGPU(int initialLocation, int matrixDim, curandState_t randState) :
position(initialLocation),
possibleLocations(new int[matrixDim]),
goodnessNumerators(new double[matrixDim]),
possibleLocationsLastIndex(matrixDim - 1),
m_randomState(randState)
{
}
__device__
void AntGPU::Venture(int* route, const double* distanceMatrix, const double* pheromoneMatrix, int matrixDim, double alpha, double beta)
{
while (possibleLocationsLastIndex >= 0)
{
int nextHop = SelectNextHop(distanceMatrix, pheromoneMatrix, matrixDim, alpha, beta);
GoTo(nextHop, route, distanceMatrix, matrixDim);
}
//printf("Distance Traveled: %f\n", distance + distanceMatrix[position]);
route[matrixDim] = route[0];
distance += distanceMatrix[route[matrixDim - 1] * matrixDim + route[0]];
}
__device__
int AntGPU::SelectNextHop(const double* distance_matrix, const double* pheromoneMatrix, int matrixDim, double alpha, double beta)
{
double denominator = 0;
for (int i = 0; i <= possibleLocationsLastIndex; ++i)
{
int possiblePosition = possibleLocations[i];
double goodnessNumerator = pow(pheromoneMatrix[position * matrixDim + possiblePosition], alpha) * pow(1.0 / distance_matrix[position * matrixDim + possiblePosition], beta);
goodnessNumerators[possiblePosition] = goodnessNumerator;
denominator += goodnessNumerator;
}
double sum = 0;
double random = curand_uniform_double(&m_randomState);
for (int i = 0; i <= possibleLocationsLastIndex; ++i)
{
int possiblePosition = possibleLocations[i];
double numerator = goodnessNumerators[possiblePosition];
double probability = numerator / denominator;
if (random <= sum + probability)
{
possibleLocationsNextIndex = i;
return possiblePosition;
}
sum += probability;
}
return -1;
}
__device__
void AntGPU::GoTo(int next, int* route, const double* distanceMatrix, int matrixDim)
{
route[routeIndex++] = next;
possibleLocations[possibleLocationsNextIndex] = possibleLocations[possibleLocationsLastIndex--];
distance += distanceMatrix[position * matrixDim + next];
position = next;
}
__device__
void AntGPU::Reset(int* route, int initialLocation, int matrixDim)
{
routeIndex = 0;
distance = 0;
position = initialLocation;
possibleLocationsLastIndex = matrixDim - 1;
for (int i = 0; i < matrixDim; ++i) { possibleLocations[i] = i; }
route[routeIndex++] = initialLocation;
possibleLocations[initialLocation] = possibleLocations[possibleLocationsLastIndex--];
}
|
10,883 | __global__
void add(float2 *in, float2 *out, float2 *out2)
{
int index = blockIdx.x;
float2 a = in[index];
out[index] = a;
//(float2)
in->x = 0.0;
in->y = 0.0;
out->x = 0.0;
out->y = 0.0;
//*(float2)a.x = index;
}
|
10,884 | #include "includes.h"
__global__ void fix_nan_and_inf_kernel(float *input, size_t size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val)) {
input[index] = 1.0f / (fabs((float)index) + 1); // pseudo random value
}
}
} |
10,885 | #include<stdio.h>
#include<cuda.h>
__global__ void mtrxVecMult(float* d_a, float* d_b, float* d_x)
{
int i = threadIdx.x;
int j = blockIdx.x;
int n = blockDim.x;
float aij = d_a[j*n+i];
d_x[i] = d_b[j]*aij;
}
int main(int argc, char** argv)
{
const int n = 8;
const int A_BYTES = n * n * sizeof(float);
const int B_BYTES = n * sizeof(float);
float h_a[n*n];
float h_b[n];
float h_x[n];
for (int i=0; i < n; i++)
{
for (int j=0; j < n; j++) h_a[i*n+j] = 3*j;
h_b[i] = 2;
}
//declare GPU memory pointers
float *d_a;
float *d_b;
float *d_x;
//allocate memory on the device
cudaMalloc((void**)&d_a,A_BYTES);
cudaMalloc((void**)&d_b,B_BYTES);
cudaMalloc((void**)&d_x,B_BYTES);
//transfer the array to the GPU
//destination, source, size, method
cudaMemcpy(d_x,h_x,B_BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,B_BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_a,h_a,A_BYTES,cudaMemcpyHostToDevice);
//launch the kernel
mtrxVecMult<<<n,n>>>(d_a,d_b,d_x);
cudaDeviceSynchronize();
cudaGetLastError();
//copy the results back onto the device
//destination, source, size, method
cudaMemcpy(h_x,d_x,B_BYTES,cudaMemcpyDeviceToHost);
for (int i=0; i<n; i++) {
printf("%.2f",h_x[i]);
printf("\n");
}
//free memory previously allocated on the device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_x);
}
|
10,886 | #include "includes.h"
__global__ void CuKnlSetField( double xCells, double yCells, double* energy0, double* energy1)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
energy1[gid] = energy0[gid];
} |
10,887 | #include "includes.h"
__global__ void cu_getRange(const float *src, float* dst, const int xstart, const int xend, const int ystart, const int yend, const int colssrc, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = xend - xstart + 1;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst + ystart;
int csrc = cdst + xstart;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
} |
10,888 | #include "includes.h"
__global__ void atemp(double* A, double* y, double* tmp, int NX, int NY)
{
int j;
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Α(T)*temp
if (i <= NY){
for (j = 0; j < NX; j++) {
y[i] = y[i] + A[i + j*NY] * tmp[j];
}
}
} |
10,889 | /*
CUDA C++ Program to add 2 1-dimensional vectors of length N
1 blocks, N threads in block
*/
#include<iostream>
#include<cuda_runtime.h>
#define N 10
__global__ void vecadd(int* a, int* b, int* c)
{
int idx = threadIdx.x;
c[idx] = a[idx] + b[idx];
}
int main()
{
/* Set up variables on host*/
int* a = new int[N];
int* b = new int[N];
int* c = new int[N];
/* Input values on host*/
unsigned int size = N*sizeof(int);
for(int i=0; i<N; ++i)
{
a[i] = 2*i;
b[i] = 3*i+1;
}
/* Setting up variables on device. i.e. GPU */
int* d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
/* Copy data from host to device */
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
/*
Kernel Launch
Grid contains 1 block
That block has N threads
Hence index of vector is thread index
*/
vecadd<<<1, N>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
/* Copy result from GPU device to host */
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
/* Print result */
for(int i=0; i<N; ++i)
{
std::cout<<c[i]<<' ';
}
std::cout<<'\n';
/* Cleanup device and host memory */
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete a;
delete b;
delete c;
return 0;
} |
10,890 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cmath>
#include "matrix_operations.cuh"
#define OptNofThreads 128
#define OptNofBlocks 128
#define OptNofBlocksX 32
#define OptNofBlocksY 32
template <typename T>
__global__ void get_first_layer_dropout(T* first_layer_weights, T* dropped_first_layer, unsigned int* first_dropout_indices, int n_of_neurons, int n_for_first_dropout) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
int rand_index = first_dropout_indices[bdx*n_for_first_dropout + tdx] % n_of_neurons;
first_dropout_indices[bdx*n_for_first_dropout + tdx] = rand_index;
dropped_first_layer[bdx*n_for_first_dropout + tdx] = first_layer_weights[bdx*(n_of_neurons + 1) + rand_index];
first_layer_weights[bdx*(n_of_neurons + 1) + rand_index] = 0.0f;
}
template <typename T>
__global__ void get_second_layer_dropout(T* second_layer_weights, T* dropped_second_layer, unsigned int* second_dropout_indices, int n_of_second_layer_neurons, int n_for_second_dropout) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
int rand_index = second_dropout_indices[bdx*n_for_second_dropout + tdx] % n_of_second_layer_neurons;
second_dropout_indices[bdx*n_for_second_dropout + tdx] = rand_index;
dropped_second_layer[bdx*n_for_second_dropout + tdx] = second_layer_weights[bdx*(n_of_second_layer_neurons + 1) + rand_index];
second_layer_weights[bdx*(n_of_second_layer_neurons + 1) + rand_index] = 0.0f;
}
template <typename T>
__global__ void get_third_layer_dropout(T* third_layer_weights, T* dropped_third_layer, unsigned int* third_dropout_indices, int n_of_third_layer_neurons, int n_for_third_dropout) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
int rand_index = third_dropout_indices[bdx*n_for_third_dropout + tdx] % n_of_third_layer_neurons;
third_dropout_indices[bdx*n_for_third_dropout + tdx] = rand_index;
dropped_third_layer[bdx*n_for_third_dropout + tdx] = third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + rand_index];
third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + rand_index] = 0.0f;
}
template <typename T>
__global__ void take_first_layer_dropout(T* first_layer_weights, T* dropped_first_layer, unsigned int* first_dropout_indices, int n_of_neurons, int n_for_first_dropout) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
first_layer_weights[bdx*(n_of_neurons + 1) + first_dropout_indices[bdx*n_for_first_dropout + tdx]] = dropped_first_layer[bdx*n_for_first_dropout + tdx];
}
template <typename T>
__global__ void take_second_layer_dropout(T* second_layer_weights, T* dropped_second_layer, unsigned int* second_dropout_indices, int n_of_second_layer_neurons, int n_for_second_dropout) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
second_layer_weights[bdx*(n_of_second_layer_neurons + 1) + second_dropout_indices[bdx*n_for_second_dropout + tdx]] = dropped_second_layer[bdx*n_for_second_dropout + tdx];
}
template <typename T>
__global__ void take_third_layer_dropout(T* third_layer_weights, T* dropped_third_layer, unsigned int* third_dropout_indices, int n_of_third_layer_neurons, int n_for_third_dropout) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + third_dropout_indices[bdx*n_for_third_dropout + tdx]] = dropped_third_layer[bdx*n_for_third_dropout + tdx];
}
template<typename T>
__global__ void get_first_layer_output(T* images, T* first_layer_weights, T* first_layer_output, T* sigmoid_multiplier, int n_of_images, int n_of_second_layer_neurons, int n_of_neurons) {
int tdx = threadIdx.x;
int idx = threadIdx.x;
int bdx = blockIdx.x;
while (tdx < n_of_images) {
if (bdx < n_of_second_layer_neurons) {
first_layer_output[bdx * n_of_images + tdx] = 0.0f;
sigmoid_multiplier[bdx*n_of_images + idx] = 0.0f;
}
else {
first_layer_output[bdx * n_of_images + tdx] = 1.0f;
}
if (bdx < n_of_second_layer_neurons) {
for (int i = 0; i < n_of_neurons; ++i) {
first_layer_output[bdx * n_of_images + tdx] += images[tdx*n_of_neurons + i] *
first_layer_weights[bdx * n_of_neurons + i];
}
first_layer_output[bdx * n_of_images + tdx] = (float)1.0f /
(1 + exp(-first_layer_output[bdx * n_of_images + tdx]));
}
tdx += OptNofThreads;
}
}
template <typename T>
__global__ void get_second_layer_output(T* first_layer_output, T* second_layer_output, T* second_layer_weights, T* error_multiplier, int n_of_images, int n_of_second_layer_neurons, int n_of_third_layer_neurons) {
int tdx = threadIdx.x;
int idx = threadIdx.x;
int bdx = blockIdx.x;
__shared__ T second_layer_output_shared[OptNofThreads];
while (bdx < n_of_third_layer_neurons + 1) {
while (idx < n_of_images) {
if (bdx < n_of_third_layer_neurons) {
second_layer_output_shared[tdx] = 0;
error_multiplier[bdx*n_of_images + idx] = 0.0f;
}
else {
second_layer_output_shared[tdx] = 1;
}
if (bdx < n_of_third_layer_neurons) {
for (int i = 0; i < n_of_second_layer_neurons + 1; ++i) {
second_layer_output_shared[tdx] += second_layer_weights[bdx*(n_of_second_layer_neurons + 1) + i] * first_layer_output[i*n_of_images + idx];
}
second_layer_output_shared[tdx] = 1.0f / (1 + exp(-second_layer_output_shared[tdx]));
}
second_layer_output[n_of_images*bdx + idx] = second_layer_output_shared[tdx];
idx += OptNofThreads;
}
if (idx >= n_of_images) {
idx = threadIdx.x;
}
bdx += OptNofBlocks;
}
}
template <typename T>
__global__ void get_third_layer_output(T* third_layer_weights, T* second_layer_output, T* third_layer_output, T* third_layer_sums, int n_of_images, int n_of_third_layer_neurons) {
int tdx = threadIdx.x;
int idx = threadIdx.x;
int bdx = blockIdx.x;
__shared__ T output[OptNofThreads];
while (idx < n_of_images) {
output[tdx] = 0.0f;
for (int i = 0; i < n_of_third_layer_neurons + 1; ++i) {
output[tdx] += second_layer_output[i*n_of_images + idx] * third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + i];
}
output[tdx] = exp(output[tdx]);
third_layer_output[bdx*n_of_images + idx] = output[tdx];
if (bdx == 0) {
third_layer_sums[idx] = 0.0f;
}
idx += OptNofThreads;
}
}
template <typename T>
__global__ void get_posibilities(T* third_layer_output, T* third_layer_sums, T* posibilities, int n_of_images, int n_of_models) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
int bldx = blockDim.x;
__shared__ T sums_array[10];
__shared__ T second_layer_sum[1];
while (bdx < n_of_images) {
sums_array[tdx] = third_layer_output[tdx*n_of_images + bdx];
if (tdx == 0) {
second_layer_sum[0] = 0.0f;
}
atomicAdd(&second_layer_sum[0], sums_array[tdx]);
posibilities[tdx*n_of_images + bdx] = (float)sums_array[tdx] / second_layer_sum[0];
bdx += OptNofBlocks;
}
}
template <typename T>
__global__ void get_errors(T* posibilities, T* labels, T* errors, T* square_error, int n_of_images, int n_of_models, int i) {
int idx = threadIdx.x;
int tdx = threadIdx.x;
int bdx = blockIdx.x;
if (tdx == 0) {
square_error[bdx] = 0.0f;
}
__shared__ T error_sums[OptNofThreads];
while (tdx < n_of_images) {
float posibility = posibilities[bdx*n_of_images + tdx];
float label = labels[bdx*n_of_images + tdx];
errors[bdx*n_of_images + tdx] = (label*(posibility - 1) + (1 - label)*posibility);
error_sums[idx] = (label - posibility)*(label - posibility);
atomicAdd(&square_error[0], error_sums[idx]);
tdx += OptNofThreads;
}
}
template <typename T>
__global__ void get_third_layer_correction(T* errors, T* second_layer_output, T* third_layer_correction, T* third_layer_weights, T* error_multiplier, int n_of_images, int n_of_third_layer_neurons, int n_of_models) {
int idx = threadIdx.x;
int bdx = blockIdx.x;
if (idx == 0) {
for (int i = 0; i < n_of_third_layer_neurons + 1; ++i) {
third_layer_correction[bdx*(n_of_third_layer_neurons + 1) + i] = 0.0f;
}
}
while (idx < n_of_images) {
float error = errors[bdx*n_of_images + idx];
for (int i = 0; i < n_of_third_layer_neurons + 1; ++i) {
atomicAdd(&third_layer_correction[bdx*(n_of_third_layer_neurons + 1) + i], second_layer_output[i*n_of_images + idx] * error);
atomicAdd(&error_multiplier[i*n_of_images + idx], error * third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + i]);
}
idx += OptNofThreads;
}
}
template <typename T>
__global__ void third_layer_weights_update(T* third_layer_weights, T* third_layer_correction, T* previous_third_layer, int n_of_images, int n_of_third_layer_neurons, float alpha) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
while (tdx < n_of_third_layer_neurons + 1) {
previous_third_layer[bdx*(n_of_third_layer_neurons + 1) + tdx] = third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + tdx];
third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + tdx] -= alpha * third_layer_correction[bdx*(n_of_third_layer_neurons + 1) + tdx];
tdx += OptNofThreads;
}
}
template <typename T>
__global__ void third_layer_weights_back(T* third_layer_weights, T* previous_third_layer, int n_of_third_layer_neurons) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
while (tdx < n_of_third_layer_neurons + 1) {
third_layer_weights[bdx*(n_of_third_layer_neurons + 1) + tdx] = previous_third_layer[bdx*(n_of_third_layer_neurons + 1) + tdx];
tdx += OptNofThreads;
}
}
template <typename T>
__global__ void get_second_layer_correction(T* errors, T* first_layer_output, T* second_layer_output, T* second_layer_weights, T* third_layer_weights, T* second_layer_correction, T* error_multiplier, T* sigmoid_multiplier,
int n_of_images, int n_of_second_layer_neurons, int n_of_third_layer_neurons, int n_of_models) {
int tdx = threadIdx.x;
int idx = threadIdx.x;
int bdx = blockIdx.x;
int bdy = blockIdx.y;
__shared__ T additions[OptNofThreads];
if (tdx == 0) {
while (bdy < n_of_third_layer_neurons) {
while (bdx < n_of_second_layer_neurons + 1) {
second_layer_correction[bdy*(n_of_second_layer_neurons + 1) + bdx] = 0.0f;
bdx += OptNofBlocksX;
}
if (bdx >= n_of_second_layer_neurons + 1) {
bdx = blockIdx.x;
}
bdy += OptNofBlocksY;
}
}
bdx = blockIdx.x;
bdy = blockIdx.y;
while (bdy < n_of_third_layer_neurons) {
while (bdx < n_of_second_layer_neurons + 1) {
while (idx < n_of_images) {
additions[tdx] = 0;
additions[tdx] = error_multiplier[bdy*n_of_images + idx] * (second_layer_output[bdy*n_of_images + idx] * (1 - second_layer_output[bdy*n_of_images + idx])) * first_layer_output[bdx*n_of_images + idx];
if (bdx < n_of_second_layer_neurons) {
sigmoid_multiplier[bdx*n_of_images + idx] += error_multiplier[bdy*n_of_images + idx] *
second_layer_output[bdy*n_of_images + idx] * (1 - second_layer_output[bdy*n_of_images + idx])*second_layer_weights[bdy*(n_of_second_layer_neurons + 1) + bdx];
}
atomicAdd(&second_layer_correction[bdy*(n_of_second_layer_neurons + 1) + bdx], additions[tdx]);
idx += OptNofThreads;
}
if (idx >= n_of_images) {
idx = tdx;
}
bdx += OptNofBlocksX;
}
if (bdx >= n_of_second_layer_neurons + 1) {
bdx = blockIdx.x;
}
bdy += OptNofBlocksY;
}
}
template <typename T>
__global__ void second_layer_weights_update(T* second_layer_weights, T* correction, T* previous_second_layer, float alpha, int n_of_second_layer_neurons) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
previous_second_layer[bdx*(n_of_second_layer_neurons + 1) + tdx] = second_layer_weights[bdx*(n_of_second_layer_neurons + 1) + tdx];
second_layer_weights[bdx*(n_of_second_layer_neurons + 1) + tdx] -= alpha * correction[bdx*(n_of_second_layer_neurons + 1) + tdx];
}
template <typename T>
__global__ void second_layer_weights_back(T* second_layer_weights, T* previous_second_layer, int n_of_second_layer_neurons) {
int tdx = threadIdx.x;
int bdx = blockIdx.x;
second_layer_weights[bdx*(n_of_second_layer_neurons + 1) + tdx] = previous_second_layer[bdx*(n_of_second_layer_neurons + 1) + tdx];
}
template <typename T>
__global__ void get_first_layer_correction(T* errors, T* images, T* first_layer_output, T* second_layer_output, T* third_layer_weights, T* second_layer_weights, T* first_layer_correction, T* sigmoid_multiplier,
int n_of_images, int n_of_neurons, int n_of_second_layer_neurons, int n_of_third_layer_neurons, int n_of_models) {
int idx = threadIdx.x;
int tdx = threadIdx.x;
int bdx = blockIdx.x;
int bdy = blockIdx.y;
if (tdx == 0) {
while (bdy < n_of_second_layer_neurons) {
while (bdx < n_of_neurons) {
first_layer_correction[bdy * n_of_neurons + bdx] = 0.0f;
bdx += OptNofBlocksX;
}
if (bdx >= n_of_neurons) {
bdx = blockIdx.x;
}
bdy += OptNofBlocksY;
}
}
__threadfence();
bdx = blockIdx.x;
bdy = blockIdx.y;
while (bdy < n_of_second_layer_neurons) {
while (bdx < n_of_neurons) {
while (idx < n_of_images) {
float main_multiplier;
main_multiplier = first_layer_output[bdy*n_of_images + idx] * (1 - first_layer_output[bdy*n_of_images + idx])* sigmoid_multiplier[bdy*n_of_images + idx];
atomicAdd(&first_layer_correction[bdy*n_of_neurons + bdx], main_multiplier*images[idx*n_of_neurons + bdx]);
idx += OptNofThreads;
}
if (idx >= n_of_images) {
idx = threadIdx.x;
}
bdx += OptNofBlocksX;
}
if (bdx >= n_of_neurons) {
bdx = blockIdx.x;
}
bdy += OptNofBlocksY;
}
}
template <typename T>
__global__ void first_layer_weights_update(T* first_layer_weights, T* first_layer_correction, T* previous_second_layer, float alpha, int n_of_neurons, int n_of_second_layer_neurons) {
int idx = threadIdx.x;
int bdx = blockIdx.x;
while (bdx < n_of_second_layer_neurons) {
while (idx < n_of_neurons) {
previous_second_layer[bdx*n_of_neurons + idx] = first_layer_weights[bdx*n_of_neurons + idx];
first_layer_weights[bdx*n_of_neurons + idx] -= alpha * first_layer_correction[bdx*n_of_neurons + idx];
idx += OptNofThreads;
}
if (idx >= n_of_neurons) {
idx = threadIdx.x;
}
bdx += OptNofBlocks;
}
}
template <typename T>
__global__ void first_layer_weights_back(T* first_layer_weights, T* previous_second_layer, int n_of_neurons, int n_of_second_layer_neurons) {
int idx = threadIdx.x;
int bdx = blockIdx.x;
while (bdx < n_of_second_layer_neurons) {
while (idx < n_of_neurons) {
first_layer_weights[bdx*n_of_neurons + idx] = previous_second_layer[bdx*n_of_neurons + idx];
idx += OptNofThreads;
}
if (idx >= n_of_neurons) {
idx = threadIdx.x;
}
bdx += OptNofBlocks;
}
} |
10,891 | /******************************************************************
File : lcsRedistributeParticles.cu
Author : Mingcheng Chen
Last Update : January 29th, 2013
*******************************************************************/
#include <stdio.h>
#define BLOCK_SIZE 512
__device__ inline int Sign(double a, double eps) {
return a < -eps ? -1 : a > eps;
}
__device__ inline int GetLocalTetID(int blockID, int tetID,
int *startOffsetsInLocalIDMap,
int *blocksOfTets,
int *localIDsOfTets) { // blockID and tetID are all global IDs.
int offset = startOffsetsInLocalIDMap[tetID];
int endOffset = -1;
while (1) {
if (blocksOfTets[offset] == blockID) return localIDsOfTets[offset];
if (endOffset == -1) endOffset = startOffsetsInLocalIDMap[tetID + 1];
offset++;
if (offset >= endOffset) return -1;
}
}
__device__ inline int GetBlockID(int x, int y, int z, int numOfBlocksInY, int numOfBlocksInZ) {
return (x * numOfBlocksInY + y) * numOfBlocksInZ + z;
}
__global__ void CollectActiveBlocksKernel(int *activeParticles,
int *exitCells,
double *placesOfInterest,
int *localTetIDs,
int *blockLocations,
int *interestingBlockMap,
int *startOffsetsInLocalIDMap,
int *blocksOfTets,
int *localIDsOfTets,
int *interestingBlockMarks,
int *activeBlocks,
int *activeBlockIndices,
int *numOfActiveBlocks, // Initially 0
int mark,
int numOfActiveParticles, //int numOfStages,
int numOfBlocksInX, int numOfBlocksInY, int numOfBlocksInZ,
double globalMinX, double globalMinY, double globalMinZ,
double blockSize,
double epsilon) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID < numOfActiveParticles) {
int particleID = activeParticles[globalID];
double posX = placesOfInterest[particleID * 3];
double posY = placesOfInterest[particleID * 3 + 1];
double posZ = placesOfInterest[particleID * 3 + 2];
int x = (int)((posX - globalMinX) / blockSize);
int y = (int)((posY - globalMinY) / blockSize);
int z = (int)((posZ - globalMinZ) / blockSize);
// Intuitive block ID
int blockID = GetBlockID(x, y, z, numOfBlocksInY, numOfBlocksInZ);
int tetID = exitCells[particleID];
int localTetID = GetLocalTetID(blockID, tetID, startOffsetsInLocalIDMap, blocksOfTets, localIDsOfTets);
/// DEBUG ///
/*
if (particleID == 303)
printf("x, y, z: %d %d %d\n", x, y, z);
*/
if (localTetID == -1) {
int dx[3], dy[3], dz[3];
int lx = 1, ly = 1, lz = 1;
dx[0] = dy[0] = dz[0] = 0;
double xLower = globalMinX + x * blockSize;
double yLower = globalMinY + y * blockSize;
double zLower = globalMinZ + z * blockSize;
if (!Sign(xLower - posX, 10 * epsilon)) dx[lx++] = -1;
if (!Sign(yLower - posY, 10 * epsilon)) dy[ly++] = -1;
if (!Sign(zLower - posZ, 10 * epsilon)) dz[lz++] = -1;
if (!Sign(xLower + blockSize - posX, 10 * epsilon)) dx[lx++] = 1;
if (!Sign(yLower + blockSize - posY, 10 * epsilon)) dy[ly++] = 1;
if (!Sign(zLower + blockSize - posZ, 10 * epsilon)) dz[lz++] = 1;
// Check every necessary neightbor
for (int i = 0; localTetID == -1 && i < lx; i++)
for (int j = 0; localTetID == -1 && j < ly; j++)
for (int k = 0; k < lz; k++) {
if (i + j + k == 0) continue;
int _x = x + dx[i];
int _y = y + dy[j];
int _z = z + dz[k];
if (_x < 0 || _y < 0 || _z < 0 ||
_x >= numOfBlocksInX || _y >= numOfBlocksInY || _z >= numOfBlocksInZ)
continue;
blockID = GetBlockID(_x, _y, _z, numOfBlocksInY, numOfBlocksInZ);
/// DEBUG ///
// if (particleID == 303 && tetID == 6825504) printf("_x = %d, _y = %d, _z = %d, blockID = %d\n", _x, _y, _z, blockID);
localTetID = GetLocalTetID(blockID, tetID, startOffsetsInLocalIDMap,
blocksOfTets, localIDsOfTets);
if (localTetID != -1) break;
}
/// DEBUG ///
if (localTetID == -1) {
/*
if (particleID == 303) {
printf("%lf %lf %lf\n", posX, posY, posZ);
printf("tetID = %d\n", tetID);
printf("%lf %lf %lf\n", xLower, yLower, zLower);
for (int i = 0; i < lx; i++)
printf(" %d", dx[i]);
printf("\n");
for (int i = 0; i < ly; i++)
printf(" %d", dy[i]);
printf("\n");
for (int i = 0; i < lz; i++)
printf(" %d", dz[i]);
printf("\n");
}
return;
*/
while (1);
}
}
// localTetID must not be -1 at that point.
localTetIDs[particleID] = localTetID;
int interestingBlockID = interestingBlockMap[blockID];
blockLocations[particleID] = interestingBlockID;
int oldMark = atomicAdd(interestingBlockMarks + interestingBlockID, 0);
int index;
if (oldMark < mark) {
int delta = mark - oldMark;
int newMark = atomicAdd(interestingBlockMarks + interestingBlockID, delta);
if (newMark >= mark)
atomicAdd(interestingBlockMarks + interestingBlockID, -delta);
else {
index = atomicAdd(numOfActiveBlocks, 1);
activeBlocks[index] = interestingBlockID;
activeBlockIndices[interestingBlockID] = index;
}
}
// This one cannot be calculated in that kernel
//activeBlockOfParticles[particleID] = index;
}
}
__global__ void GetNumOfParticlesByStageInBlocksKernel(int *numOfParticlesByStageInBlocks,
int *particleOrders,
int *stages,
int *activeParticles,
int *blockLocations,
int *activeBlockIndices,
int numOfStages, int numOfActiveParticles) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID < numOfActiveParticles) {
int particleID = activeParticles[globalID];
int posi = activeBlockIndices[blockLocations[particleID]] * numOfStages + stages[particleID];
particleOrders[particleID] = atomicAdd(numOfParticlesByStageInBlocks + posi, 1);
}
}
__global__ void CollectParticlesToBlocksKernel(int *numOfParticlesByStageInBlocks, // Now it is a prefix sum array.
int *particleOrders,
int *stages,
int *activeParticles,
int *blockLocations,
int *activeBlockIndices,
int *blockedParticleList,
int numOfStages, int numOfActiveParticles
) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID < numOfActiveParticles) {
int particleID = activeParticles[globalID];
int interestingBlockID = blockLocations[particleID];
int activeBlockID = activeBlockIndices[interestingBlockID];
int stage = stages[particleID];
int position = numOfParticlesByStageInBlocks[activeBlockID * numOfStages + stage] + particleOrders[particleID];
blockedParticleList[position] = particleID;
}
}
extern "C"
void CollectActiveBlocks(int *activeParticles,
int *exitCells,
double *placesOfInterest,
int *localTetIDs,
int *blockLocations,
int *interestingBlockMap,
int *startOffsetsInLocalIDMap,
int *blocksOfTets,
int *localIDsOfTets,
int *interestingBlockMarks,
int *activeBlocks,
int *activeBlockIndices,
int *numOfActiveBlocks, // Initially 0
int mark,
int numOfActiveParticles, //int numOfStages,
int numOfBlocksInX, int numOfBlocksInY, int numOfBlocksInZ,
double globalMinX, double globalMinY, double globalMinZ,
double blockSize,
double epsilon) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((numOfActiveParticles - 1) / dimBlock.x + 1, 1, 1);
CollectActiveBlocksKernel<<<dimGrid, dimBlock>>>(activeParticles, exitCells, placesOfInterest, localTetIDs, blockLocations, interestingBlockMap,
startOffsetsInLocalIDMap, blocksOfTets, localIDsOfTets, interestingBlockMarks, activeBlocks,
activeBlockIndices, numOfActiveBlocks, // Initially 0
mark, numOfActiveParticles, numOfBlocksInX, numOfBlocksInY, numOfBlocksInZ,
globalMinX, globalMinY, globalMinZ, blockSize, epsilon);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
extern "C"
void GetNumOfParticlesByStageInBlocks(int *numOfParticlesByStageInBlocks,
int *particleOrders,
int *stages,
int *activeParticles,
int *blockLocations,
int *activeBlockIndices,
int numOfStages, int numOfActiveParticles) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((numOfActiveParticles - 1) / dimBlock.x + 1, 1, 1);
GetNumOfParticlesByStageInBlocksKernel<<<dimGrid, dimBlock>>>(numOfParticlesByStageInBlocks, particleOrders, stages, activeParticles, blockLocations,
activeBlockIndices, numOfStages, numOfActiveParticles);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
extern "C"
void CollectParticlesToBlocks(int *numOfParticlesByStageInBlocks, // Now it is a prefix sum array.
int *particleOrders,
int *stages,
int *activeParticles,
int *blockLocations,
int *activeBlockIndices,
int *blockedParticleList,
int numOfStages, int numOfActiveParticles) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((numOfActiveParticles - 1 ) / dimBlock.x + 1, 1, 1);
CollectParticlesToBlocksKernel<<<dimGrid, dimBlock>>>(numOfParticlesByStageInBlocks, // Now it is a prefix sum array.
particleOrders, stages, activeParticles, blockLocations, activeBlockIndices, blockedParticleList,
numOfStages, numOfActiveParticles);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
|
10,892 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <time.h>
struct timespec start, end;
long long int time_taken;
__device__ int is_a_match(char *password_crack)
{
char password_number_1[] = "AA9996";
char password_number_2[] = "AS1234";
char password_number_3[] = "LK9091";
char password_number_4[] = "FD1223";
char *number_1 = password_crack;
char *number_2 = password_crack;
char *number_3 = password_crack;
char *number_4 = password_crack;
char *password_1 = password_number_1;
char *password_2 = password_number_2;
char *password_3 = password_number_3;
char *password_4 = password_number_4;
while(*number_1 == *password_1)
{
if(*number_1 == '\0')
{
printf("***match found*** %s\n",password_number_1);
break;
}
number_1++;
password_1++;
}
while(*number_2 == *password_2)
{
if(*number_2 == '\0')
{
printf("***match found*** %s\n",password_number_2);
break;
}
number_2++;
password_2++;
}
while(*number_3 == *password_3)
{
if(*number_3 == '\0')
{
printf("***match found*** %s\n",password_number_3);
break;
}
number_3++;
password_3++;
}
while(*number_4 == *password_4)
{
if(*number_4 == '\0')
{
printf("***match found*** %s\n",password_number_4);
return 1;
}
number_4++;
password_4++;
}
return 0;
}
__global__ void kernel()
{
char a,b,c,d;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(a='0'; a<='9'; a++){
for(b='0'; b<='9'; b++){
for(c='0';c<='9';c++){
for(d='0';d<='9';d++){
password[2] = a;
password[3] = b;
password[4]= c;
password[5]=d;
if(is_a_match(password)) {
//printf("***match found***");
}
else {
//printf(" %s\n", password);
}
}
}
}
}
}
int t_difference(struct timespec *start, struct timespec *end, long long int *difference)
{
long long int ds = end->tv_sec - start->tv_sec;
long long int dn = end->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
t_difference(&start, &end, &time_taken);
printf("Total time taken was %lldns or %0.9lfs\n", time_taken, (time_taken/1.0e9));
return 0;
}
|
10,893 | #include <stdio.h>
#include <assert.h>
void checkCUDAError(const char *msg);
__global__ void ranksort(int *d_a, int *d_b)
{
extern __shared__ float sdata[];
int tid=threadIdx.x;
int current=d_a[blockDim.x*blockIdx.x+tid], current_order=0;
for(int i=0; i<blockIdx.x; i++){
sdata[tid]=sdata[tid+blockDim.x]=d_a[tid+blockDim.x*i];
__syncthreads();
for(int j=0; j<blockDim.x; j++) /* note for duplicate number */
if(sdata[tid+j]<=current)current_order++;
__syncthreads();
}
for(int i=blockIdx.x+1; i<gridDim.x; i++){
sdata[tid]=sdata[tid+blockDim.x]=d_a[tid+blockDim.x*i];
__syncthreads();
for(int j=0; j<blockDim.x; j++) /* note for duplicate number */
if(sdata[tid+j]<current)current_order++;
__syncthreads();
}
sdata[tid]=sdata[tid+blockDim.x]=current;
__syncthreads();
for(int j=0; j<blockDim.x; j++) /* note for duplicate number */
if(sdata[tid+j]<current || (sdata[tid+j]==current && tid+j>=blockDim.x))
current_order++;
__syncthreads();
d_b[current_order]=current;
}
void vector_print(int *v, int n)
{
for(int i=0; i<n; i++)
printf("%d ", v[i]);
printf("\n");
}
int main( int argc, char** argv)
{
int *h_a;
int *d_a;
int *d_b;
int numBlocks = 256;
int numThreadsPerBlock = 256;
int numThreads = numBlocks*numThreadsPerBlock;
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc((void**)&d_a, memSize);
cudaMalloc((void**)&d_b, memSize);
for(int i=0; i<numThreads; i++){
h_a[i]=numThreads-i;
}
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
size_t sharedMemSize=numThreadsPerBlock*2*sizeof(int);
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
ranksort<<<dimGrid, dimBlock, sharedMemSize>>>(d_a, d_b);
cudaThreadSynchronize();
checkCUDAError("kernel execution");
cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
for(int i = 1; i < numBlocks*numThreadsPerBlock; i++){
assert(h_a[i] >= h_a[i-1]);
}
cudaFree(d_a);
cudaFree(d_b);
free(h_a);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
10,894 | #include "includes.h"
__global__ void threshKernel(unsigned char * image, unsigned char* moddedimage, int size, int threshold)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
if (image[i] > threshold)
{
moddedimage[i] = 255;
}
else
{
moddedimage[i] = 0;
}
}
} |
10,895 | #include "includes.h"
__global__ void CudaPermuteCudnnToPV( float *dest, float *src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY) {
// parameter dimensions are in dest PV format
int srcNx = nx / manyScaleX;
int srcNy = ny / manyScaleY;
int srcInFeatures = inFeatures * manyScaleX * manyScaleY;
int kDest = (blockIdx.x * blockDim.x) + threadIdx.x;
if (kDest < outFeatures * ny * nx * inFeatures) {
int kOF = kDest / (ny * nx * inFeatures);
int kY = (kDest % (ny * nx * inFeatures)) / (nx * inFeatures);
int kX = (kDest % (nx * inFeatures)) / inFeatures;
int kIF = (kDest % inFeatures);
// Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX / manyScaleX;
kY = kY / manyScaleY;
int sOF = srcInFeatures * srcNy * srcNx;
int sIF = srcNy * srcNx;
int sY = srcNx;
int kSrc = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
} |
10,896 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <utility>
using namespace std;
__global__ void jacobiKernel(float* A, float* b, int matrix_size, float* x_prev, float* x_now)
{
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < matrix_size)
{
float sum = b[thread_idx];
int A_idx = matrix_size * thread_idx;
for (int i = 0; i < matrix_size; ++i)
sum -= thread_idx != i ? A[A_idx + i] * x_prev[i] : 0;
x_now[thread_idx] = sum / A[A_idx + thread_idx];
}
}
int main(int argc, char *argv[])
{
FILE* matrix_file = fopen(argv[1], "r");
int matrix_size = 0, iter_count = 20000;
fscanf(matrix_file, "%d", &matrix_size);
float* A = new float[matrix_size * matrix_size];
float* b = new float[matrix_size];
float* x = new float[matrix_size];
float* A_cuda, *b_cuda, *x_prev_cuda, *x_now_cuda;
assert( cudaSuccess == cudaMalloc((void **) &A_cuda, matrix_size * matrix_size * sizeof(float)));
assert( cudaSuccess == cudaMalloc((void **) &b_cuda, matrix_size * sizeof(float)));
assert( cudaSuccess == cudaMalloc((void **) &x_prev_cuda, matrix_size * sizeof(float)));
assert( cudaSuccess == cudaMalloc((void **) &x_now_cuda, matrix_size * sizeof(float)));
for (int i = 0; i < matrix_size; i++) {
x[i] = 0;
for (int j = 0; j < matrix_size; j++) {
fscanf(matrix_file, "%f", &A[i* matrix_size + j]);
}
fscanf(matrix_file, "%f", &b[i]);
}
cudaMemcpy(A_cuda, A, sizeof(float) * matrix_size * matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(b_cuda, b, sizeof(float) * matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(x_prev_cuda, x, sizeof(float) * matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(x_now_cuda, x, sizeof(float) * matrix_size, cudaMemcpyHostToDevice);
int blocks_count = 32;
int threads_count = matrix_size / blocks_count + 1;
for (int i = 0; i < iter_count; i++) {
jacobiKernel<<< blocks_count, threads_count >>>(A_cuda, b_cuda, matrix_size, x_prev_cuda, x_now_cuda);
swap(x_prev_cuda, x_now_cuda);
}
cudaMemcpy(x, x_prev_cuda, sizeof(float) * matrix_size, cudaMemcpyDeviceToHost);
for (int i = 0; i < matrix_size; i++) {
printf("%f ", x[i]);
}
cudaFree(A_cuda);
cudaFree(b_cuda);
cudaFree(x_prev_cuda);
cudaFree(x_now_cuda);
delete[] A;
delete[] b;
delete[] x;
return 0;
}
|
10,897 | #include <stdio.h>
__device__ __constant__ char d_coef[2];
__global__
void add_arrays(char *a, int *b)
{
a[threadIdx.x] = d_coef[0];
}
/*
char g_coef[2]={'a','b'};
void pre()
{
cudaMemcpyToSymbol(d_coef,g_coef,sizeof(char)*2);
}
*/
void pre();
#define N 7
int main()
{
pre();
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1,0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
// print the contents of a[]
//printf("%s", a);
// Allocate and Transfer memory to the device
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
// Perform the array addition
dim3 dimBlock( N );
dim3 dimGrid ( 1 );
add_arrays<<<dimGrid, dimBlock>>>(ad, bd);
// Copy the Contents from the GPU
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
// Display the results
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
10,898 | #include <stdio.h>
#define BLOCKSIZE 16
//Edited matrix multiply to calculate distance
__global__ void distKernel(float *devA, float *devB, float *devC, int rows, int cols, int K)
{
//Get the thread's x and y locations for its run
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
//Allocate shared memory to hold parts of A and B
__shared__ float tileA[BLOCKSIZE][BLOCKSIZE];
__shared__ float tileB[BLOCKSIZE][BLOCKSIZE];
//Use sum to get the result for a specific element
float sum = 0.0;
//Use iter to see if the loop should be run again
int iter = 0;
do{
//Check if the x thread falls within bounds of the matrices
if ((idy < rows) && (threadIdx.x + BLOCKSIZE*iter < K)){
tileA[threadIdx.y][threadIdx.x] = devA[threadIdx.x + idy*K + BLOCKSIZE*iter];
}
else {
tileA[threadIdx.y][threadIdx.x] = 0.0;
}
//Check if the y thread falls within bounds of the matrices
if ((threadIdx.y + BLOCKSIZE*iter < K) && (idx < cols)){
tileB[threadIdx.y][threadIdx.x] = devB[idx + (threadIdx.y + BLOCKSIZE*iter)*cols];
}
else {
tileB[threadIdx.y][threadIdx.x] = 0.0;
}
//Sync to ensure that all of the data has been grabbed for the tiles in this warp
__syncthreads();
//Sum the squared distance between the terms
for (int i = 0; i < BLOCKSIZE; i++){
sum += (tileA[threadIdx.y][i] - tileB[i][threadIdx.x])*(tileA[threadIdx.y][i] - tileB[i][threadIdx.x]);
}
//Iterate the number done
iter++;
//Sync the threads again to ensure they have all done their work before going through the loop to get data
__syncthreads();
//Check if the tiles have covered all of C
} while (BLOCKSIZE*iter < K);
//If the thread falls within the matrix C, fill in its element, scaled by alpha and beta
if ((idy < rows) && (idx < cols)){
devC[idx + idy*cols] = sum;
}
}
|
10,899 | #include "includes.h"
__global__ void vector_addition (int *a, int *b, int *c, int n) {
for (int i=0; i<n; i++) {
c[i] = a[i] + b[i];
}
} |
10,900 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/** YOUR CODE GOES BELOW **/
int num_threads = blockDim.x * gridDim.x;;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < numEdges; i+=num_threads)
{
if(matches[src[i]] == -1 && matches[dst[i]] == -1)
keepEdges[i] = 1;
else
keepEdges[i] = 0;
}
/** YOUR CODE GOES ABOVE **/
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.