serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,201 | /*
sample to show the matrix multiply. it is for square.
need a little bit modification for rectangle.
*/
#include <iostream>
using namespace std;
static void HandleError( cudaError_t err,const char *file, int line ) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << file << line << endl;
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
const int DIM = 4;
const int block_size = 4;
__global__ void multi(int * A, int * B, int * C)
{
__shared__ int As[block_size][block_size];
__shared__ int Bs[block_size][block_size];
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int len = DIM * block_size;
// Index of the first sub-matrix of A processed by the block
int aBegin = len * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + len - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = block_size;
// Index of the first sub-matrix of B processed by the block
int bBegin = block_size * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = block_size * len;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
As[ty][tx] = A[a + len * ty + tx];
Bs[ty][tx] = B[b + len * ty + tx];
__syncthreads();
for (int k = 0; k < block_size; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = len * (by * block_size + ty) + block_size * bx + tx;
C[c] = Csub;
}
int main()
{
int * inGlobe;
int * in2Globe;
int * outGlobe;
HANDLE_ERROR(cudaMalloc((void**)&inGlobe,DIM * DIM * block_size * block_size * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&in2Globe,DIM * DIM * block_size * block_size * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&outGlobe,DIM * DIM * block_size * block_size * sizeof(int)));
int tmp[DIM*DIM* block_size * block_size];
for(int i=0;i<DIM*DIM;++i)
for(int j=0;j<block_size * block_size;++j)
tmp[i*DIM*DIM + j] = 2;
HANDLE_ERROR(cudaMemcpy(inGlobe,tmp,DIM*DIM* block_size * block_size*sizeof(int),cudaMemcpyHostToDevice));
for(int i=0;i<DIM*DIM;++i)
for(int j=0;j<block_size * block_size;++j)
tmp[i*DIM*DIM + j] = 5;
HANDLE_ERROR(cudaMemcpy(in2Globe,tmp,DIM*DIM* block_size * block_size*sizeof(int),cudaMemcpyHostToDevice));
for(int i=0;i<DIM*DIM;++i)
for(int j=0;j<block_size * block_size;++j)
tmp[i*DIM*DIM + j] = 0;
HANDLE_ERROR(cudaMemcpy(outGlobe,tmp,DIM*DIM* block_size * block_size*sizeof(int),cudaMemcpyHostToDevice));
dim3 bdim(DIM,DIM);
dim3 tdim(block_size,block_size);
multi<<<bdim,tdim>>>(inGlobe,in2Globe,outGlobe);
HANDLE_ERROR(cudaMemcpy(tmp,outGlobe,DIM*DIM* block_size * block_size*sizeof(int),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
for(int i=0;i<DIM*DIM;++i)
{
for(int j=0;j<block_size * block_size;++j)
cout << " " << tmp[i*DIM*DIM + j];
cout << endl;
}
int k;
cin >> k;
return 0;
}
|
12,202 | /*
Programa C CUDA
@author Juan Manuel Tortajada
@mail ai.robotics.inbox@gmail.com
*/
#include <iostream>
#include <sys/time.h>
__global__
void operar_vectores_GPU( int n, float *A, float *B, float *C, float *D, float *E, float *F, float *G, float *H, float *K ){
int indice_hilo_unico = ( blockIdx.x * blockDim.x ) + threadIdx.x;
/*
Asegura que para una longitud de los vectores
no multiplo del nº de hilos por bloque
no existan hilos accediendo a posiciones de memoria fuera del vector
(debordamiento)
*/
if(indice_hilo_unico < n){
C[indice_hilo_unico] = A[indice_hilo_unico] + B[indice_hilo_unico];
F[indice_hilo_unico] = D[indice_hilo_unico] - E[indice_hilo_unico];
G[indice_hilo_unico] = K[indice_hilo_unico] * H[indice_hilo_unico];
}
}
int main(void){
float tiempo_transcurrido_ms;
cudaEvent_t inicio,fin;
cudaEventCreate(&inicio);
cudaEventCreate(&fin);
int N = 1<<20; // 1 048 574 elementos
float *A, *B, *C, *D, *E, *F, *G, *H, *K; // Vectores en el Host
float *d_A, *d_B, *d_C, *d_D, *d_E, *d_F, *d_G, *d_H, *d_K; // Vectores en el dispositivo(GPU)
// Reserva de memoria en el Host
A = (float *)malloc( N*sizeof(float) );
B = (float *)malloc( N*sizeof(float) );
C = (float *)malloc( N*sizeof(float) );
D = (float *)malloc( N*sizeof(float) );
E = (float *)malloc( N*sizeof(float) );
F = (float *)malloc( N*sizeof(float) );
G = (float *)malloc( N*sizeof(float) );
H = (float *)malloc( N*sizeof(float) );
K = (float *)malloc( N*sizeof(float) );
// Reserva de memoria en el dispositivo (GPU)
cudaMalloc( &d_A, N*sizeof(float) );
cudaMalloc( &d_B, N*sizeof(float) );
cudaMalloc( &d_C, N*sizeof(float) );
cudaMalloc( &d_D, N*sizeof(float) );
cudaMalloc( &d_E, N*sizeof(float) );
cudaMalloc( &d_F, N*sizeof(float) );
cudaMalloc( &d_G, N*sizeof(float) );
cudaMalloc( &d_H, N*sizeof(float) );
cudaMalloc( &d_K, N*sizeof(float) );
// Inicializacion de vectores (valores aleatorios [0,1e6])
for(int i = 0; i < N; i++){
A[i] = 1e6 * ( rand()/RAND_MAX );
B[i] = 1e6 * ( rand()/RAND_MAX );
C[i] = 1e6 * ( rand()/RAND_MAX );
D[i] = 1e6 * ( rand()/RAND_MAX );
E[i] = 1e6 * ( rand()/RAND_MAX );
F[i] = 1e6 * ( rand()/RAND_MAX );
G[i] = 1e6 * ( rand()/RAND_MAX );
H[i] = 1e6 * ( rand()/RAND_MAX );
K[i] = 1e6 * ( rand()/RAND_MAX );
}
// Copia de datos del Host al Dispositivo(GPU)
cudaMemcpy( d_A, A, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_B, B, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_C, C, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_D, D, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_E, E, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_F, F, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_G, G, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_H, H, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_K, K, N*sizeof(float), cudaMemcpyHostToDevice );
cudaEventRecord(inicio); // Inicio del temporizador en la GPU
/*
Numero de bloques: 1
Hilos por bloque: 256
*/
operar_vectores_GPU<<<1, 256>>>( N, d_A, d_B, d_C, d_D, d_E, d_F, d_G, d_H, d_K );
cudaEventRecord(fin); // Parada del temporizador en la GPU
cudaEventSynchronize(fin); // Espera a que los datos esten listos
// Una vez los datos estan listos, se copia el resultado del dispositivo(GPU) al Host
cudaMemcpy( A, d_A, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( B, d_B, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( C, d_C, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( D, d_D, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( E, d_E, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( F, d_F, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( G, d_G, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( H, d_H, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( K, d_K, N*sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &tiempo_transcurrido_ms, inicio, fin ); // Calculo del tiempo transcurrido [ms]
cudaEventDestroy(inicio);
cudaEventDestroy(fin);
// Comprueba los primeros 10 elementos de los tres vectores resultado
for(int i = 0; i < 10; i++){
bool test1 = ( C[i] == A[i] + B[i] );
bool test2 = ( F[i] == D[i] - E[i] );
bool test3 = ( G[i] == K[i] * H[i] );
printf( "\nC[%i] = A[%i] + B[%i] :%s\n", i, i, i, test1 ? "correcto" : "erroneo");
printf( "F[%i] = D[%i] - E[%i] :%s\n", i, i, i, test2 ? "correcto" : "erroneo");
printf( "G[%i] = K[%i] * H[%i] :%s\n", i, i, i, test3 ? "correcto" : "erroneo");
}
printf("\nTiempo transcurrido (GPU : kernel operarVectores) : %f ms\n\n", tiempo_transcurrido_ms);
// Liberacion de memoria (GPU)
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_D);
cudaFree(d_E);
cudaFree(d_F);
cudaFree(d_G);
cudaFree(d_H);
cudaFree(d_K);
// Liberacion de memoria (CPU)
free(A);
free(B);
free(C);
free(D);
free(E);
free(F);
free(G);
free(H);
free(K);
return 0;
}
|
12,203 | //CUDA implementation of mergesort |
12,204 | /*
============================================================================
Name : ergasia4_final.cu
Author : Christophoros Bekos (mpekchri@auth.gr)
Version :
Copyright : @
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#define threads_per_warp 32
#define num_of_threads 256
__device__ void sigmoid(float& z) {
z = 1.0 / (1.0 + exp(-(z)));
}
__device__ void backpropagate_some_cols(float* result, int rows_per_block,
int col_length, float* matrix, float* vector, int last_block, int size,
float* sigm_der) {
// README :
// each block uses rows threads
// each block modifies rows columns ( cols columns per block)
// each thread modifies one column , column's length is col_length
// cols : number of columns that this block will modify
// one last block has less job to do, this one takes parameter last_block == 1
// and size (after index exceeds size in last block, no computation must be made)
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
int block_id = blockIdx.x;
extern __shared__ float shared[];
float* temp = shared;
float* m = &temp[rows_per_block];
float* v = &m[col_length * rows_per_block];
float* res = &v[col_length * rows_per_block];
// move data in shared memory
for (int i = thread_id * col_length;
i < thread_id * col_length + col_length; i++) {
m[i] = matrix[i];
}
v[thread_id] = 0;
v[thread_id] = vector[thread_id] * (thread_id < col_length);
__syncthreads();
int cnt = 0;
for (int i = thread_id * col_length;
i < thread_id * col_length + col_length; i++) {
m[i] = m[i] * v[cnt];
cnt++;
}
__syncthreads();
temp[thread_id] = 0;
for (int i = thread_id * col_length;
i < thread_id * col_length + col_length; i++) {
temp[thread_id] += m[i];
}
__syncthreads();
result[thread_id] = temp[thread_id] * sigm_der[thread_id];
}
__global__ void backpropagate(float* result, int rows_per_block, int col_length,
float* matrix, float* vector, int last_block, int size,
float* sigm_der) {
int block_id = blockIdx.y * gridDim.x + blockIdx.x;
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
backpropagate_some_cols(&result[block_id * rows_per_block], rows_per_block,
col_length, &matrix[block_id * rows_per_block], vector,
(block_id == last_block), size,
&sigm_der[block_id * rows_per_block]);
}
__device__ void hadamard_product_small(float* sh_a, float* sh_b, float* sh_res,
int multiplier, int size) {
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// start the computations
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
sh_res[i] = sh_b[i] * sh_a[i] * ((int) (i < size));
}
// result is stored in sh_b vector\
//done
}
__device__ void array_sum_small(float* sha, float& result, int size,
int start) {
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// start the computations
for (int i = threads_per_warp; i < num_of_threads; i = i * 2) {
// switch 1 : even warps add their's neighbors contents
switch ((int) floor(thread_id / (double) i) % 2) {
case 0:
// thread_id % i == even
// add the "more next vector"
sha[thread_id] = sha[thread_id]
+ sha[i + thread_id]
* ((int) (start + thread_id + i < size));
break;
default:
// thread_id % i == odd
// do nothing
break;
}
__syncthreads();
// switch2 : odd warps clean up their content
switch ((int) floor(thread_id / (double) i) % 2) {
case 0:
// thread_id % i == even
// do nothing
break;
default:
// thread_id % i == odd
// clean up
sha[thread_id] = 0;
//__syncthreads();
break;
}
__syncthreads();
}
// loop ended, sha[0:threads_per_warp] got the sum
if (thread_id == 0) {
for (int i = 0; i < threads_per_warp; i++) {
result = result + sha[i];
sha[i] = 0;
}
}
}
__device__ void mull_feedforward_one_col(float* result, int rows, int cols,
float* matrix, float* vector, int multiplier, int size, float bias,
float* sigm_der) {
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
int block_id = blockIdx.y * gridDim.x + blockIdx.x;
extern __shared__ float shared[];
float* temp = shared;
float* m = &temp[rows * multiplier];
float* v = &m[rows * multiplier];
float* res = &v[rows * multiplier];
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
m[i] = matrix[i] * ((i < size));
}
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
v[i] = vector[i] * ((i < size));
}
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
res[i] = 0.0;
}
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
temp[i] = 0.0;
}
__syncthreads();
hadamard_product_small(m, v, temp, multiplier, size);
__syncthreads();
for (int i = multiplier - 1; i >= 0; i--) {
array_sum_small(&temp[i * num_of_threads], res[0], size,
(i * num_of_threads));
__syncthreads();
}
if (thread_id == 0) {
float tmp = (res[thread_id] + bias);
sigmoid(tmp);
result[block_id] = tmp;
sigm_der[block_id] = tmp * (1 - tmp);
}
}
__global__ void feedforward(float* result, int rows, int cols, float* matrix,
float* vector, int multiplier, int size, float* biases,
float* sigm_der) {
int block_id = blockIdx.y * gridDim.x + blockIdx.x;
mull_feedforward_one_col(result, rows, cols, &matrix[block_id * size],
vector, multiplier, size, biases[block_id], sigm_der);
}
__global__ void compute_d_L(float* a, float* y, float* sigm_der, float* d_L) {
extern __shared__ float shared[];
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
shared[thread_id] = a[thread_id];
shared[thread_id] = shared[thread_id] - y[thread_id];
shared[thread_id] = shared[thread_id] * sigm_der[thread_id];
d_L[thread_id] = shared[thread_id];
}
void cpu_feedforward(float* a_old, int rows, int cols, float** a_new, float* w,
float* b, float* sigm_der_result);
void train(int num_of_layers, int* s, float** w, float** b, float** alfa,
float** delta, float** sigm_derivative);
float getRandom(int min, int max);
float* transformOutput(int output, int size);
float* cost_derivative(float* a, float* y, int size);
float* mull_feedforward(int rows, int cols, float* matrix, float* vector);
float* hadamard_product(int size, float* a, float* b);
void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new,
float* sigm_der, float* w);
void cuda_train(int num_of_layers, int* s, float** w, float** b, float** alfa,
float** delta, float** sigm_derivative,float* gpu_y,int* rows_for_backprop,cudaStream_t default_stream);
int main(void) {
// SECTION 1 :
// define network's size :
int num_of_layers = 3;
int* s = new int[num_of_layers]; // size of layers
int* rows_for_backprop = new int[num_of_layers - 1];
rows_for_backprop[0] = 0; // always zero , it's not used
rows_for_backprop[1] = 100;
s[0] = 784;
s[1] = 30;
s[2] = 10;
// SECTION 2 :
// define network's structures
float **w, **gpu_w;
float **b, **gpu_b, **sigm_derivative, **gpu_sigm_derivative, **delta,
**gpu_delta, **alfa, **gpu_alfa;
float* gpu_y;
cudaMalloc((void**) &gpu_y, sizeof(float) * (s[num_of_layers - 1]));
//float **c_w, **c_b;
w = new float*[num_of_layers];
gpu_w = new float*[num_of_layers];
//c_w = new float*[num_of_layers];
b = new float*[num_of_layers];
gpu_b = new float*[num_of_layers];
//c_b = new float*[num_of_layers];
delta = new float*[num_of_layers];
sigm_derivative = new float*[num_of_layers];
alfa = new float*[num_of_layers];
gpu_delta = new float*[num_of_layers];
gpu_sigm_derivative = new float*[num_of_layers];
gpu_alfa = new float*[num_of_layers];
alfa[0] = new float[s[0]];
cudaMalloc((void**) &gpu_alfa[0], sizeof(float) * (s[0]));
w[0] = NULL;
b[0] = NULL;
gpu_w[0] = NULL;
gpu_b[0] = NULL;
//c_w[0] = NULL;
//c_b[0] = NULL;
sigm_derivative[0] = NULL;
delta[0] = NULL;
for (int i = 1; i < num_of_layers; i++) {
w[i] = new float[s[i - 1] * s[i]];
cudaMalloc((void**) &gpu_w[i], sizeof(float) * (s[i - 1] * s[i]));
//c_w[i] = new float[s[i - 1] * s[i]];
sigm_derivative[i] = new float[s[i]];
cudaMalloc((void**) &gpu_sigm_derivative[i], sizeof(float) * (s[i]));
b[i] = new float[s[i]];
cudaMalloc((void**) &gpu_b[i], sizeof(float) * (s[i]));
//c_b[i] = new float[s[i]];
delta[i] = new float[s[i]];
cudaMalloc((void**) &gpu_delta[i], sizeof(float) * (s[i]));
alfa[i] = new float[s[i]];
cudaMalloc((void**) &gpu_alfa[i], sizeof(float) * (s[i]));
}
for (int i = 1; i < num_of_layers; i++) {
for (int j = 0; j < s[i]; j++) {
b[i][j] = 1;
}
}
for (int i = 1; i < num_of_layers; i++) {
for (int j = 0; j < s[i - 1] * s[i]; j++) {
w[i][j] = 0.5;
}
}
// SECTION 3 :
// Cuda initial data transfer
cudaStream_t default_stream;
cudaStreamCreate(&default_stream);
for (int i = 1; i < num_of_layers; i++) {
cudaMemcpyAsync(gpu_w[i], w[i], sizeof(float) * (s[i - 1] * s[i]),
cudaMemcpyHostToDevice, default_stream);
cudaMemcpyAsync(gpu_b[i], b[i], sizeof(float) * (s[i]),
cudaMemcpyHostToDevice, default_stream);
}
cudaStreamSynchronize(default_stream);
// SECTION 4 :
// train function - missing : update_sums(...) and gradient_descent(...) (check c++ code in the other file)
struct timeval t1, t2;
double time_c, time_h;
gettimeofday(&t1, 0);
train(num_of_layers, s, w, b, alfa, delta, sigm_derivative);
gettimeofday(&t2, 0);
time_c = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec)
/ 1000.0;
gettimeofday(&t1, 0);
cuda_train(num_of_layers, s, gpu_w, b, gpu_alfa, gpu_delta,gpu_sigm_derivative,gpu_y,rows_for_backprop,default_stream);
gettimeofday(&t2, 0);
time_h = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec)
/ 1000.0;
printf("gpu time %0.6f , cpu time %0.6f \n", time_c, time_h);
printf("Accelaration %0.6f %\n", ((time_h / time_c) * 100));
printf("success\n");
return 0;
}
void cuda_train(int num_of_layers, int* s, float** w, float** b, float** alfa,
float** delta, float** sigm_derivative,float* gpu_y,int* rows_for_backprop,cudaStream_t default_stream) {
// float learning_rate = 0.5;
int epochs = 1;
int batch_size = 1;
int yd = 0;
float* y, *cost;
int blocks = 0;
int numofthreads = 256;
int multiplier;
float cache = 11000 * sizeof(float);
float* a = new float[s[0]];
int num_of_blocks;
int rows_per_block;
int last_block;
int size_for_last_block;
for (int ep = 0; ep < epochs; ep += (batch_size)) {
// reset_sums(); --> NO CUDA VERSION OF IT
for (int batch = 0; batch < batch_size; batch++) {
// alfa[0] = read_tuple(ep + batch, &y_int); --> NO CUDA VERSION OF IT
// since we don't read alfa[0] from file (in order to proper simulate it)
// we will update alfa[0] with random values in each iteration
// in any case, time would be wasted ,in order alfa[0] to be transfered in gpu
for (int i = 0; i < s[0]; i++) {
a[i] = getRandom(-1, 1);
}
// same goes for yd (y desired) READING VERSION FOR .CU FILE ISN'T YET CREATED
yd = 0;
// feedforward(&alfa[0]);
cudaMemcpy(alfa[0], a, sizeof(float) * (s[0]),
cudaMemcpyHostToDevice);
for (int i = 1; i < num_of_layers; i++) {
multiplier = floor(s[i - 1] / numofthreads) + 1;
if (s[i - 1] < numofthreads) {
multiplier = 1;
}
feedforward<<<s[i], numofthreads, cache>>>(alfa[i],
numofthreads, s[i], w[i], alfa[i - 1], multiplier,
s[i - 1], b[i], sigm_derivative[i]);
if (i == 1) {
// while gpu running , compute y and store it in cuda
y = transformOutput(yd, s[num_of_layers - 1]);
cudaMemcpyAsync(gpu_y, y,
sizeof(float) * (s[num_of_layers - 1]),
cudaMemcpyHostToDevice, default_stream);
}
cudaDeviceSynchronize();
// no need to copy data back -> all implementation in cuda
}
// wait for y copy - just to be sure - actually y copy must has been done way before you reach this statement
cudaStreamSynchronize (default_stream);
// feedforward completed, compute cost_derivative
compute_d_L<<<1, s[num_of_layers - 1],
s[num_of_layers - 1] * sizeof(float)>>>(
alfa[num_of_layers - 1], gpu_y,
sigm_derivative[num_of_layers - 1],
delta[num_of_layers - 1]);
cudaDeviceSynchronize();
// backpropagate the error
for (int i = num_of_layers - 2; i > 0; i--) {
rows_per_block = rows_for_backprop[i];
num_of_blocks = floor(s[i] / rows_per_block) + 1;
last_block = floor(s[i] / rows_per_block);
size_for_last_block = s[i] - floor(s[i] / rows_per_block) * rows_per_block;
backpropagate<<<num_of_blocks, rows_per_block, cache>>>(delta[i], rows_per_block, s[i+1], w[i + 1],delta[i + 1], last_block, size_for_last_block,sigm_derivative[i - 1]);
cudaDeviceSynchronize();
}
// update_sums(); --> NO CUDA VERSION OF IT
}
// gradient_descent(learning_rate, batch_size); --> NO CUDA VERSION OF IT
}
}
void train(int num_of_layers, int* s, float** w, float** b, float** alfa,
float** delta, float** sigm_derivative) {
// float learning_rate = 0.5;
int epochs = 1;
int batch_size = 1;
int yd = 0;
float* y, *cost;
for (int ep = 0; ep < epochs; ep += (batch_size)) {
// reset_sums(); --> NO CUDA VERSION OF IT
for (int batch = 0; batch < batch_size; batch++) {
// alfa[0] = read_tuple(ep + batch, &y_int); --> NO CUDA VERSION OF IT
// since we don't read alfa[0] from file (in order to proper simulate it)
// we will update alfa[0] with random values in each iteration
// in any case, time would be wasted ,in order alfa[0] to be transfered in gpu
for (int i = 0; i < s[0]; i++) {
alfa[0][i] = getRandom(-1, 1);
}
// same goes for yd (y desired) READING VERSION FOR .CU FILE ISN'T YET CREATED
yd = 0;
y = transformOutput(yd, s[num_of_layers - 1]);
// feedforward(&alfa[0]);
for (int i = 1; i < num_of_layers; i++) {
cpu_feedforward(alfa[i - 1], s[i - 1], s[i], &alfa[i], w[i],
b[i], sigm_derivative[i]);
}
// NO TIME TO WRITE A CUDA IMPLEMENTATIION FOR THEM
cost = cost_derivative(alfa[num_of_layers - 1], y,
s[num_of_layers - 1]);
delta[num_of_layers - 1] = hadamard_product(s[num_of_layers - 1],
cost, sigm_derivative[num_of_layers - 1]);
// backpropagate(delta[num_of_layers-1]);
for (int i = num_of_layers - 2; i > 0; i--) {
cpu_backpropagate(delta[i + 1], s[i], s[i + 1], &delta[i],
sigm_derivative[i], w[i + 1]);
}
// update_sums(); --> NO CUDA VERSION OF IT
}
// gradient_descent(learning_rate, batch_size); --> NO CUDA VERSION OF IT
}
}
float getRandom(int min, int max) {
return (((max - min) * ((float) rand() / (float) RAND_MAX) + min) * 100)
/ 100;
}
float* transformOutput(int output, int size) {
// transforms a singleton input (named output:int) into
// a vector (named result:*double)
float* result = new float[size];
for (int i = 0; i < size; i++) {
result[i] = 0;
}
result[output] = 1;
return result;
}
float* cost_derivative(float* a, float* y, int size) {
// derivative of C with respect to a (a == output layer's content )
float* result = new float[size];
for (int i = 0; i < size; i++) {
result[i] = a[i] - y[i];
}
return result;
}
// FOR FEEDFORWARD IN CPU
float* hadamard_product(int size, float* a, float* b) {
// returns the datamard product for vectors a and b
// (return a.*b in matlab)
// size = length of arrays a and b
float* result = new float[size];
for (int i = 0; i < size; i++) {
result[i] = a[i] * b[i];
}
return result;
}
float* mull_feedforward(int rows, int cols, float* matrix, float* vector) {
// TESTED
// returns "cols x 1" vector
float* temp = NULL;
float* res = new float[cols];
for (int j = 0; j < cols; j++) {
temp = hadamard_product(rows, &matrix[j * rows], vector);
res[j] = 0;
for (int i = 0; i < rows; i++) {
res[j] += temp[i];
}
delete[] temp;
}
return res;
}
void vector_add(int size, float* a, float* b) {
for (int i = 0; i < size; i++) {
a[i] += b[i];
}
}
float sigm(float z) {
return 1.0 / (1.0 + exp(-z));
}
void sigmoid(float** z, int size) {
for (int i = 0; i < size; i++) {
(*z)[i] = sigm(((*z)[i]));
}
}
float* compute_z(float* a, float* w, float* b, int rows, int cols) {
float* result = mull_feedforward(rows, cols, w, a);
vector_add(cols, result, b);
return result;
}
void compute_sigm_der(float* a, float* result, int size) {
for (int i = 0; i < size; i++) {
result[i] = a[i] * (1 - a[i]);
}
}
void cpu_feedforward(float* a_old, int rows, int cols, float** a_new, float* w,
float* b, float* sigm_der_result) {
a_new[0] = compute_z(a_old, w, b, rows, cols);
sigmoid(&a_new[0], cols);
compute_sigm_der(a_new[0], sigm_der_result, cols);
}
// FOR BACKPROPAGATE
float* mull_backpropagate(int rows, int cols, float* matrix, float* vector) {
// TESTED
// returns "rows x 1" vector
float* temp = NULL;
float* res = new float[rows];
for (int j = 0; j < rows; j++) {
temp = hadamard_product(cols, &matrix[j * cols], vector);
res[j] = 0;
for (int i = 0; i < cols; i++) {
res[j] += temp[i];
}
delete[] temp;
}
return res;
}
void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new,
float* sigm_der, float* w) {
float* w_d;
w_d = mull_backpropagate(rows, cols, w, d_L);
d_new[0] = hadamard_product(rows, w_d, sigm_der);
delete[] w_d;
}
|
12,205 | #include <cuda_runtime.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define BLOCK_SIZE 256
int UI(int argc, char* argv[], int* jkl);
void initData(int* data, int len);
void showData(int* data, int len);
void histCountCPU(int* data, int* hist, int len, int bin);
void checkCUDAError(cudaError_t e);
__global__
void histCountGPU(int* d_data, int* d_hist, int len, int bin, int part);
void resultCheck(int* result_CPU, int* result_GPU, int size);
int main(int argc, char* argv[]){
// reset rand seed
srand((unsigned)time(NULL));
clock_t start, finish;
int total_time;
// Go through UI first.
// In UI section, only command with valid param can go to the next step.
int UIStatus;
int param[2];
UIStatus = UI(argc, argv, param);
if (UIStatus != 0) {
printf("\nApplication terminates.\n");
return 0;
}
// UI section ends
// Initialize data array with int type
const int bin = param[0];
const int len = param[1];
int* data = (int*)malloc(len * sizeof(int));
initData(data, len);
showData(data, len);
printf("Done initializing data array with length %d.\n", len);
// Initialzing ends
// CPU code for calculating the histogram
// Use this result to varify the kernel result later
// Using calloc to initialize the hist with zeros.
int* hist_CPU = (int*)calloc(len, sizeof(int));
int* hist_GPU = (int*)calloc(len, sizeof(int));
start = clock();
histCountCPU(data, hist_CPU, len, bin);
finish = clock();
total_time = (int)(finish - start);
printf("\nhist_CPU:");
showData(hist_CPU, bin);
printf("Done histogrm calculation with CPU in %d miliseconds.\n", total_time);
// Histogram calculating with CPU ends
// Allocate device memory, copy data from host to device, initialize device histogram with zeros.
int *d_data, *d_hist;
checkCUDAError(cudaMalloc((int**)&d_data, len * sizeof(int)));
checkCUDAError(cudaMalloc((int**)&d_hist, bin * sizeof(int)));
printf("\nDone allocating space in device.");
checkCUDAError(cudaMemcpy(d_data, data, len * sizeof(int), cudaMemcpyHostToDevice));
printf("\nDone copying memory from host to device.");
checkCUDAError(cudaMemset(d_hist, 0, bin));
printf("\nDone initializing device histogram with zeros.\n");
// Done allocating, transfering and initializing
// Initialize thread block and kernel grid dimensions
dim3 threads(BLOCK_SIZE);
// dim3 grid((int)ceil(1.0 * len / threads.x));
dim3 grid(120);
printf("\nDone initializing block dimention and grid dimention.");
// Done initializing thread block and kernel grid dimensions
// launch CUDA device kernel
start = clock();
histCountGPU<<< grid, threads, bin * sizeof(int) >>>(d_data, d_hist, len, bin, 1024 / bin);
// Done CUDA device kernel
// Copy results from device to host and free device memory
checkCUDAError(cudaDeviceSynchronize());
finish = clock();
total_time = (int)(finish - start);
printf("\nDone matrix multiplication with GPU in %d miliseconds.\n", total_time);
checkCUDAError(cudaMemcpy(hist_GPU, d_hist, bin * sizeof(int), cudaMemcpyDeviceToHost));
printf("hist_GPU:");
showData(hist_GPU, bin);
checkCUDAError(cudaFree(d_hist));
checkCUDAError(cudaFree(d_data));
// Done copying results and freeing device memory
// Check the result of the Calculated Matrix
resultCheck(hist_CPU, hist_GPU, bin);
// Done result checking.
return 0;
}
// UI for main function
// return 0 means everything's fine, just continue;
// return 1 means there's invalid input or '--help', terminate running.
int UI(int argc, char* argv[], int* param) {
// UI for the exe file
// while input with -h or --help; tell that what we need as params in linux style
// while input with 0 or 1 or more than 2 parameters; tell that we need 2 params
// while input with 2 paramerters; print the size of two input matrix; check if all params are valid;
// param[0] is valid if it is exponent of 2. param[1] is valid if it is greater than 0
if (argc == 2 && (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0)) {
printf("CUDA Programming Homework. Histogram Algorithm.\n");
printf("\nUsage: hist [OPTION]...\n");
printf("\nOptions:\n");
printf("%5s, %-10s %-50s\n", "-h", "--help", "Show helping information.");
printf("%5s, %-10s %-50s\n", "-i", "--input", "Followed by 2 integers as input parameters.");
printf("\nExamples:\n");
printf("hist -h\n");
printf(" Shows the helping information.\n");
printf("hist -i 8 200\n");
printf(" 8 represents 8 bins in histogram, 200 means the length of the data\n");
return 1;
}
if (argc == 4 && (strcmp(argv[1], "-i") == 0 || strcmp(argv[1], "--input") == 0)) {
int bin = atoi(argv[2]);
int len = atoi(argv[3]);
int div, mod, cache = bin, count = 0;
while (cache > 1) {
++count;
div = cache / 2;
mod = cache - div * 2;
if (mod == 1) {
printf("Invalid bin numbers. The bin numbers should be exponent of 2, range from 2^2 to 2^8\n");
return 1;
}
cache = div;
}
if (count > 8 || count < 2) {
printf("Invalid bin numbers. The bin numbers should be exponent of 2, range from 2^2 to 2^8\n");
return 1;
}
if (len <= 0) {
printf("Invalid array length. The array length should be an integer greater than 0.\n");
return 1;
}
else {
printf("Bin numbers: %d\n", bin);
printf("Array length: %d\n", len);
param[0] = bin;
param[1] = len;
return 0;
}
}
else {
printf("Invalid command. Please check how to make valid command by '-h' or '--help'.\n");
return 1;
}
}
// initialize data with int type range [0, 1024)
void initData(int* data, int len) {
for (int i = 0; i < len; ++i)
data[i] = rand() % 1024;
return;
}
// show the data in the command prompt.
// this function is used for configuration
// only show previous 10 elements when length of array is too large
void showData(int* data, int len) {
printf("data:\n[");
for (int i = 0; i < len && i < 10; ++i) {
if (i != 0) printf(",");
printf("%4d", data[i]);
}
if (len > 10) printf("...");
printf("]\n");
return;
}
// matrix multiplication with CPU in the most stupid algo
// Algo Complexity: O((2k-1)*j*l)
void histCountCPU(int* data, int* hist, int len, int bin) {
int part = 1024 / bin;
for (int i = 0; i < len; ++i) {
++hist[data[i] / part];
}
return;
}
// Check the result of the CUDA function.
void checkCUDAError(cudaError_t e) {
if (e == 0) return;
printf("\nError: %s\n", cudaGetErrorName(e));
printf("%s\n", cudaGetErrorString(e));
exit(0);
}
// matrix multiplication with GPU device
// using tiling algorithm, take use of the shared memory to higher the compute-to-global-memory-access ratio
__global__
void histCountGPU(int* d_data, int* d_hist, int len, int bin, int part) {
// calculating thread id
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Privatized bins
extern __shared__ int histo_s[];
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
histo_s[binIdx] = 0;
}
__syncthreads();
// Histogram Count
for (unsigned int i = tid; i < len; i += blockDim.x * gridDim.x) {
atomicAdd(&(histo_s[d_data[i] / part]), 1);
}
__syncthreads();
// Commit to global memory
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
atomicAdd(&(d_hist[binIdx]), histo_s[binIdx]);
}
}
// check if two array is exactly the same
void resultCheck(int* result_CPU, int* result_GPU, int size) {
for (int i = 0; i < size; ++i) {
if (result_CPU[i] != result_GPU[i]) {
printf("\nResult check: Error!!!! Didn't pass.");
return;
}
}
printf("\nResult check: ---PASS---.");
return;
}
|
12,206 | __global__ void Sample1Kernel(float *d_A, float *d_B, float *d_C) {
// Step 1. 自身のCUDAスレッドIDを計算する
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
// Step 2. CUDAスレッドIDを用いてグローバルメモリからデータを読み込み,計算する
d_C[thread_id] = d_A[thread_id] + d_B[thread_id];
}
__host__ void Sample1Host(float *h_A, float *h_B, float *h_C, int length) {
for (int i = 0; i < length; ++i) {
h_C[i] = h_A[i] + h_B[i];
}
}
|
12,207 | # include <iostream>
# include <time.h>
# include <stdio.h>
# include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Implement the kernel using shared memeory
__global__ void reverseArrayBlock_shared_memo(int * d_out, int *d_in){
extern __shared__ int s_data[];
// (1) Note that the size is not indicated in the kernel -- rather it is obtained from the host through the execution configuration.
// (2) This is shared pointer is available for all the threads inside the same block
//************** Note that the use of shared memory requires manually generating the offset.
int inOffset = blockDim.x * blockIdx.x;
int in = inOffset + threadIdx.x;
// Load one element per thread from device memory and store it in reversed order into temporary shared memory
s_data[blockDim.x - 1 - threadIdx.x] = d_in[in];
// block until all threads in the block have written their data to shared mem
__syncthreads();
// write the data from shared memory in forward order but to the reversed block offset as before
int outOffset = blockDim.x *(gridDim.x - 1 - blockIdx.x);
int out = outOffset + threadIdx.x;
d_out[out] = s_data[threadIdx.x];
}
int main(int argc, char ** argv){
int *h_a;
int dimA = 256*1024; // In my machine 1 int = 4 bytes therefore this is 256K elements (1MB size)
// Pointer for device memory
int *d_a, *d_b;
// define grid and block size
int numThreadsPerBlock = 256;
// Compute the number of blocks needed based on array size and desired block size
int numBlocks = dimA/numThreadsPerBlock;
// allocate host and device memory
size_t sharedMemSize = numThreadsPerBlock*sizeof(int);
size_t memSize = numBlocks*sharedMemSize;
h_a = (int *)malloc(memSize);
// A key design feature of this program is that both arrays d_a and d_b reside in global memory on the device.
cudaMalloc((void **)&d_a, memSize);
cudaMalloc((void **)&d_b, memSize);
// Initialize input array on host
for(int i=0; i< dimA; i++){
h_a[i] = i;
}
// Cpy host array to device arryr
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
clock_t device_start_shared_memo = clock();
reverseArrayBlock_shared_memo<<< dimGrid, dimBlock, sharedMemSize>>>(d_b, d_a);
/*
By default, the execution configuration assumes no shared memory is used. For example, in the host code of arrayReversal_multiblock_fast.cu, the following code snippet allocates shared memory for an array of integers containing a number of elements equal to the number of threads in a block:
here: sharedMemSize = numThreadsPerBlock*sizeof(int)
*/
cudaThreadSynchronize();
clock_t device_time_shared_memo = clock() - device_start_shared_memo;
printf("Time elapsed on device using shared_memo: %f microseconds\n", (double)device_time_shared_memo/CLOCKS_PER_SEC/1000000);
// check if kernel execution generated an error
// check for any cuda errors
checkCUDAError("kernel invocation");
// This code returns me a cuda error: unspecified launch failure.
/*
An unspecified launch failure is almost always a segfault. You've got an indexing mistake somewhere in your kernel, probably while accessing global memory.
*/
// device to host copy
cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost);
// check for any CUDA erros
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for(int i=0; i<dimA; i++){
assert( h_a[i] == dimA - 1 - i);
}
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
printf("Correct! \n");
return 0;
}
void checkCUDAError( const char * msg){
cudaError_t err = cudaGetLastError();
// Properties of function "cudaGetLastError()" is discussed in the tutorial
// Due to the asynchronous nature, the error get from here may not be the first error we met
if(cudaSuccess != err){
fprintf(stderr, "Cuda error: %s: %s. \n ", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
12,208 | /* matrix transpose program */
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
cudaMalloc(&d_matrix, matSize);
cudaMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
cudaEvent_t tStart, tEnd;
float duration;
cudaEventCreate(&tStart);
cudaEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd); // waits for record to terminate
cudaEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
}
|
12,209 | #include "matrix.cuh"
float vector_get(matrix_t* v, unsigned int x)
{
assert(v->rows == 1);
//assert(v->cols >= 0);
return v->matrix[x];
}
void vector_set(matrix_t* v, unsigned int x, float value)
{
assert(v->rows == 1);
//assert(v->cols >= 0);
v->matrix[x] = value;
}
|
12,210 | /* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Example showing the use of LTO callbacks with CUFFT to perform
* truncation with zero padding.
*
*/
#include <cufftXt.h>
struct cb_params {
unsigned window_N;
unsigned signal_size;
};
// This is the store callback routine. It filters high frequencies
// based on a truncation window specified by the user
// NOTE: unlike the non-LTO version, the callback device function
// must have the name cufftJITCallbackLoadComplex, it cannot be aliased
__device__ cufftComplex cufftJITCallbackLoadComplex(void *input,
size_t index,
void *info,
void *sharedmem) {
const cb_params* params = static_cast<const cb_params*>(info);
cufftComplex* cb_output = static_cast<cufftComplex*>(input);
const unsigned sample = index % params->signal_size;
return (sample < params->window_N) ? cb_output[index] : cufftComplex{0.f, 0.f};
}
|
12,211 | #include "includes.h"
#pragma comment(lib,"cublas.lib")
using namespace std;
//==============================Function Prototypes================================
double getRand();
__global__ void deltaCalcHidden(float *Activation,float *delta){
int n = blockIdx.x*blockDim.x + threadIdx.x;
delta[n] = delta[n] * (1 / (1 + exp(-Activation[n]))*(1 - 1 / (1 + exp(-Activation[n]))));
} |
12,212 | #include <stdio.h>
#include <math.h>
#include <iostream>
#define N (512*512)
#define THREADS_PER_BLOCK 128
using namespace std;
__global__ void logistic(unsigned int n, float a, float *x, float *z) {
unsigned int myId = blockDim.x*blockIdx.x + threadIdx.x;
if(myId < n)
z[myId] = a*x[myId]*(1 - x[myId]);
}
__device__ void reduce_sum_dev(unsigned int n, float *x) {
unsigned int myId = threadIdx.x;
for(unsigned int m = n >> 1; m > 0; m = n >> 1) {
n -= m;
__syncthreads();
if(myId < m)
x[myId] += x[myId+n];
}
}
__global__ void reduce_sum(uint n, float *x) {
reduce_sum_dev(n, x);
}
float reduce_sum_ref(uint n, float *x) {
float sum = 0.0;
for(int i = 0; i < n; i++)
sum += x[i];
return(sum);
}
/******************************
* dotprod: just like it sounds
* Simple version: we only handle one block of threads
*******************************/
__global__ void dotprod(uint n, float *x, float *z) {
uint blockBase = blockDim.x * blockIdx.x;
uint myId = blockBase + threadIdx.x;
uint m = min(blockDim.x, n - blockBase);
if(myId < n)
x[myId] *= x[myId];
reduce_sum_dev(m, &(x[blockBase]));
if((myId < n) && (threadIdx.x == 0))
z[blockIdx.x] = x[myId];
}
__global__ void my_dotprod(float *x, float *dot)
{
__shared__ int product[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x; //index
product[threadIdx.x] = x[index] * x[index];
//Make sure every thread has finished
__syncthreads();
if(index==0) *dot = 0; //Ask one thread to set c to zero.
//Sum the elements serially to obtain dot product
if( 0 == threadIdx.x ) //Every block to do c += sum
{
int sum = 0;
for(int j=0; j < THREADS_PER_BLOCK; j++) {
sum += product[j];
}
atomicAdd(dot,sum);
}
}
void print_vec(float *x, unsigned int n, const char *fmt, const char *who) {
printf("%s = ", who);
for(int i = 0; i < n; i++) {
if(i > 0) printf(", ");
printf(fmt, x[i]);
}
if(n > 10) printf(", ...");
printf("\n");
}
float norm(float * x, unsigned int n) {
float *dot;
float *dev_x , *dev_dot;
int size = n * sizeof (int );
// Allocating Cuda memory
cudaMalloc ( (void **)& dev_x , size );
cudaMalloc ( (void **)& dev_dot , size );
// Allocating memory
dot = (float *) malloc (sizeof (float ) );
// Copying values
cudaMemcpy (dev_x , x, size, cudaMemcpyHostToDevice );
dotprod<<< N/THREADS_PER_BLOCK , THREADS_PER_BLOCK >>>(N, dev_x, dev_dot);
reduce_sum<<<1,N/THREADS_PER_BLOCK>>>(N/THREADS_PER_BLOCK, dev_dot);
cudaMemcpy ( dot, dev_dot , sizeof (float ) , cudaMemcpyDeviceToHost );
return *dot;
}
float seq_norm(float *x, unsigned int n) {
float y = 0.0;
for(int i = 0; i<n; i++)
y += x[i] * x[i];
return (y);
}
int main(void) {
float *x;
float *p_norm, *s_norm;
float a;
int size = N * sizeof(float);
p_norm = (float *) malloc (size);
s_norm = (float *) malloc (size);
x = (float *) malloc ( size );
for (int i = 0; i < N; ++i){
x[i] = i;
}
print_vec(x, N, "%5.3f", "y");
p_norm[0] = norm(x, N);
s_norm[0] = seq_norm(x, N);
printf("\nParallel = %f, Sequential = %f \n", p_norm, s_norm);
}
// int main( void ) {
// float *x, *y, *dot;
// float * dev_x , * dev_y , * dev_dot;
// float a;
// int size = N * sizeof (int );
// // allocate device copies of a, b, c
// cudaMalloc ( (void **)& dev_x , size );
// cudaMalloc ( (void **)& dev_y , size );
// cudaMalloc ( (void **)& dev_dot , sizeof (int ) );
// x = (float *) malloc ( size );
// y = (float *) malloc ( size );
// dot = (float *) malloc (sizeof (float ) );
// cout<<N;
// for (int i = 0; i < N; ++i){
// x[i] = i;
// }
// a = 1.0;
// cudaMemcpy (dev_x , x, size, cudaMemcpyHostToDevice );
// cudaMemcpy (dev_y , y, size, cudaMemcpyHostToDevice );
// // cudaMemcpy (dev_a , a, sizeof (int ), cudaMemcpyHostToDevice );
// // launch dot() kernel
// dotprod<<< N/THREADS_PER_BLOCK , THREADS_PER_BLOCK >>>(dev_x, dev_dot);
// logistic<<< N/THREADS_PER_BLOCK , THREADS_PER_BLOCK >>>(N, a, dev_x, dev_y);
// // copy device result back to host copy of c
// cudaMemcpy ( dot, dev_dot , sizeof (int ) , cudaMemcpyDeviceToHost );
// // fprintf(" Ans = %15.10d\n", c);
// // cout<< *c;
// printf("\ndot(a,b) = %f\n", *dot); //Output result
// print_vec(x, N, "%5.3f", "y");
// free( x ); free( y ); free( dot );
// cudaFree (dev_x);
// cudaFree (dev_y);
// cudaFree (dev_dot);
// return 0;
// }
|
12,213 | #include<iostream>
#include<cstdio>
using namespace std;
__global__ void var(int *a,int *b,int n,float mean)
{
int block=256*blockIdx.x;
double sum=0.0f;
for(int i=block;i<min(block+256,n);i++)
{
sum=sum+((a[i]-mean)*(a[i]-mean));
}
b[blockIdx.x]=sum;
}
__global__ void sum(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int sum=0;
for(int i=block;i<min(block+256,n);i++)
{
sum=sum+a[i];
}
b[blockIdx.x]=sum;
}
int main()
{
cout<<"Enter the no of elements:";
int n;
cin>>n;
int n1=n,p1=n;
int *a=(int*)malloc(n*sizeof(int));
for(int i=0;i<n;i++)
{
a[i]=rand()%100;
cout<<a[i]<<"\t";
}
cudaEvent_t start1,end1;
cudaEventCreate(&start1);
cudaEventCreate(&end1);
cudaEventRecord(start1);
int sum1=0;
for(int i=0;i<n;i++)
{
sum1+=a[i];
}
float mean1=0.0f;
mean1=sum1/(n*1.0f);
double s=0.0f;
for(int i=0;i<n;i++)
{
s=s+((a[i]-mean1)*(a[i]-mean1));
}
double sd1=sqrt(s/n*1.0f);
cout<<"\nAdd="<<s;
cudaEventRecord(end1);
cudaEventSynchronize(end1);
float time1=0;
cudaEventElapsedTime(&time1,start1,end1);
cout<<"\nSequential Processing:";
cout<<"\nSum="<<sum1;
cout<<"\nMean="<<mean1;
cout<<"\nStandard deviation="<<sd1;
cout<<"\nSequential time="<<time1<<endl;
int *ad,*bd;
int size=n*sizeof(int);
cudaMalloc(&ad,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
int grids=ceil(n*1.0f/256.0f);
cudaMalloc(&bd,grids*sizeof(int));
dim3 grid(grids,1);
dim3 block(1,1);
int p=n;
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
while(n>1)
{
sum<<<grid,block>>>(ad,bd,n);
n=ceil(n*1.0f/256.0f);
cudaMemcpy(ad,bd,n*sizeof(int),cudaMemcpyDeviceToDevice);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
float time=0;
cudaEventElapsedTime(&time,start,end);
int add[2];
n=p;
cudaMemcpy(add,ad,4,cudaMemcpyDeviceToHost);
cout<<"\nSum="<<add[0]<<endl;
float mean=0.0f;
mean=add[0]/(n*1.0f);
cout<<"Mean="<<mean<<endl;
int *ad1,*bd1;
cudaMalloc(&ad1,size);
cudaMemcpy(ad1,a,size,cudaMemcpyHostToDevice);
int grids1=ceil(n1*1.0f/256.0f);
cudaMalloc(&bd1,grids1*sizeof(int));
dim3 grid1(grids1,1);
dim3 block1(1,1);
//var<<<grid,block>>>(ad,bd,n,mean);
//n=ceil(n*1.0f/256.0f);
//sum<<<grid,block>>>(bd,ad,n);
while(n1>1)
{
var<<<grid1,block1>>>(ad1,bd1,n1,mean);
n1=ceil(n1*1.0f/256.0f);
cudaMemcpy(ad1,bd1,n1*sizeof(int),cudaMemcpyDeviceToDevice);
}
long long int add1[2];
cudaMemcpy(add1,ad1,4,cudaMemcpyDeviceToHost);
cout<<"\nAdd="<<add1[0]<<endl;
float sd_=sqrt(add1[0]/(p1*1.0f));
cout<<"Standard deviation="<<sd_<<endl;
cout<<"Parallel time="<<time<<endl;
return cudaDeviceSynchronize();
}
|
12,214 | #include <stdio.h>
#include <cuda_runtime.h>
#include <cmath>
#include <algorithm>
constexpr auto c_blockSize = 256;
__device__
float CudaIsotropicGaussKernel2D(float x, float y, float center_x, float center_y, float eps)
{
register float tmpx = x - center_x;
register float tmpy = y - center_y;
register float eps_p_2 = eps*eps;
return ::expf(-0.5f * tmpx * tmpx / eps_p_2) * ::expf(-0.5f * tmpy * tmpy / eps_p_2);
}
__global__
void CudaKDE2D(const float* xData, const float* yData, size_t dataCount, float epsilon, float minX, float maxX, float minY, float maxY, float* kdeImage, size_t numBins)
{
int imageSize = static_cast<int>(numBins * numBins);
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id >= imageSize)
return;
int i = id / numBins;
int j = id % numBins;
float rangeX = maxX - minX;
float rangeY = maxY - minY;
float y = float(i) / (numBins - 1)*rangeY + minY;
float x = float(j) / (numBins - 1)*rangeX + minX;
float sum = 0.0f;
// Version 1:
/*{
for (int dataIndex = 0; dataIndex < dataCount; dataIndex++)
{
sum += CudaIsotropicGaussKernel2D(x, y, xData[dataIndex], yData[dataIndex], epsilon);
}
}*/
// Version 2:
{
__shared__ float xDataShared[c_blockSize];
__shared__ float yDataShared[c_blockSize];
for (int blockStart = 0; blockStart < dataCount; blockStart += c_blockSize)
{
for (int index = 0; index < c_blockSize && (blockStart + index) < dataCount; ++index)
{
xDataShared[index] = xData[blockStart + index];
yDataShared[index] = yData[blockStart + index];
}
__syncthreads();
for (int index = 0; index < c_blockSize && (blockStart + index) < dataCount; ++index)
{
sum += CudaIsotropicGaussKernel2D(x, y, xDataShared[index], yDataShared[index], epsilon);
}
__syncthreads();
}
}
kdeImage[id] = sum;
}
float CallKDE2D(const float* xData, const float* yData, size_t dataCount, float epsilon, float minX, float maxX, float minY, float maxY, float* kdeImage, size_t numBins)
{
size_t dataByteSize = dataCount * sizeof(float);
size_t imageByteSize = numBins * numBins * sizeof(float);
// Initialize GPU memory for the x-data:
float* cudaXData;
cudaMalloc(&cudaXData, dataByteSize);
cudaMemcpy(cudaXData, xData, dataByteSize, cudaMemcpyHostToDevice);
// Initialize GPU memory for the y-data:
float* cudaYData;
cudaMalloc(&cudaYData, dataByteSize);
cudaMemcpy(cudaYData, yData, dataByteSize, cudaMemcpyHostToDevice);
// Initialize GPU memory for KDE image with values 0:
float* cudaKDEImage;
cudaMalloc(&cudaKDEImage, imageByteSize);
cudaMemset(cudaKDEImage, 0, imageByteSize);
// Call kde 2d:
int blockDimension = c_blockSize;
int gridDimension = static_cast<int>((numBins*numBins + blockDimension - 1) / blockDimension);
CudaKDE2D << <gridDimension, blockDimension >> > (cudaXData, cudaYData, dataCount, epsilon, minX, maxX, minY, maxY, cudaKDEImage, numBins);
// Copy memory of the KDE image from GPU to CPU:
cudaMemcpy(kdeImage, cudaKDEImage, imageByteSize, cudaMemcpyDeviceToHost);
// Free memory on GPU:
cudaFree(cudaKDEImage);
cudaFree(cudaYData);
cudaFree(cudaXData);
return *std::max_element(kdeImage, kdeImage + (numBins*numBins));
}
|
12,215 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
void pined_memory()
{
int shape = 1<<25;
int size = shape * sizeof(float);
float * h_a;
cudaMallocHost((float **)&h_a, size);
float * d_a;
cudaMalloc((float **)&d_a, size);
for(int i=0;i<shape;i++)
{
h_a[i]=6;
}
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFreeHost(h_a);
cudaDeviceReset();
}
void paged_memory()
{
int shape = 1<<25;
int size = shape * sizeof(float);
float * h_a = (float *)malloc(size);
float * d_a;
cudaMalloc((float **)&d_a, size);
for(int i=0;i<shape;i++)
{
h_a[i]=6;
}
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
free(h_a);
cudaDeviceReset();
}
int main()
{
int n;
printf("Enter 1 for paged and 2 for pinned : ");
scanf("%d",&n);
switch(n)
{
case 1: paged_memory();break;
case 2: pined_memory();break;
}
return 0;
} |
12,216 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void reduce2(int *g_idata, int *g_odata) {
extern __shared__ volatile int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
for(unsigned int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x / s) {
sdata[index] += sdata[index + s];
}
__syncthreads();
#ifdef DEBUG
if (tid == 0) {
for(unsigned i = 0; i < 32; i++)
printf("%d\n",sdata[i]);
}
#endif
}
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
int main() {
int * a = (int*)malloc(sizeof(int)*32);
int * b = (int*)malloc(sizeof(int)*32);
for(int i = 0; i < 32; i++) {
a[i] = i;
}
int *a_dev;
int *b_dev;
cudaSetDevice(0);
cudaMalloc(&a_dev, sizeof(int)*32);
cudaMalloc(&b_dev, sizeof(int)*32);
cudaMemcpy(a_dev, a, sizeof(int)*32, cudaMemcpyHostToDevice);
reduce2 <<<1, 32, 32>>> (a_dev, b_dev);
cudaMemcpy(b, b_dev, sizeof(int)*32, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("%d\n", b[0]);
cudaFree(a_dev);
cudaFree(b_dev);
free(a);
free(b);
}
|
12,217 | #include "includes.h"
__global__ void addMat( float * mA_d, float * mB_d, std::size_t w, std::size_t h )
{
auto x = blockDim.x * blockIdx.x + threadIdx.x;
auto y = blockDim.y * blockIdx.y + threadIdx.y;
if( x < w && y < h )
{
mA_d[ y * w + x ] += mB_d[ y * w + x ];
}
} |
12,218 | #include <cstdio>
#include <cstdlib>
const int DIM = 128;
const int N = DIM*DIM;
const int CUDA_DIM = 128;
const int MOD = 5;
void printMat(int *m) {
#ifndef NO_PRINT
for (int i=0; i<N; ++i) {
printf("%4d", m[i]);
if (i%DIM==DIM-1) {
printf("\n");
}
}
#endif
}
int *cache;
struct mat {
int *p;
mat(int n) {
p = cache;
cache += n*DIM;
}
__device__ int& operator[](int n) {
return p[n];
}
int* operator+(int n) {
return p+n;
}
~mat() {
cache = p;
}
};
struct addOrSubInfo {
int *a, *b, *c;
bool add;
addOrSubInfo(int *a, int *b, int *c, bool add):
a(a), b(b), c(c), add(add) {}
addOrSubInfo(): a(NULL) {}
};
struct infoSet {
addOrSubInfo infoSet[11];
};
struct MulInfo {
int *a, *b, *c;
MulInfo(int *a, int *b, int *c):
a(a), b(b), c(c) {}
MulInfo() {}
};
struct MulInfoSet {
MulInfo infoSet[7];
};
__global__ void matAddOrSub(infoSet addOrSubInfoSet) {
int x = blockIdx.x, y = threadIdx.x;
int offset = x*DIM + y;
addOrSubInfo *info = addOrSubInfoSet.infoSet;
while (info->a) {
info->c[offset] = info->add ?
info->a[offset]+info->b[offset]:info->a[offset]-info->b[offset];
++info;
}
}
__global__ void matMulCuda(MulInfoSet mulInfoSet) {
int idx = blockIdx.x;
int x = blockIdx.y, y = threadIdx.x;
int offset = x*DIM + y;
int dim = blockDim.x;
int *a = mulInfoSet.infoSet[idx].a, *b = mulInfoSet.infoSet[idx].b,
*c = mulInfoSet.infoSet[idx].c;
c[offset] = 0;
for (int i=0; i<dim; ++i) {
c[offset] += a[x*DIM+i]*b[i*DIM+y];
}
}
void matMul(int *a, int *b, int *c, int dim) {
if (dim <= CUDA_DIM) {
dim3 d(1, dim);
MulInfoSet mulInfoSet;
mulInfoSet.infoSet[0] = MulInfo(a, b, c);
matMulCuda<<<d, dim>>>(mulInfoSet);
return;
}
mat p1(dim), p2(dim), p3(dim);
int half = dim/2;
int *a11=a, *a12=a+half, *a21=a+half*DIM, *a22=a+half*DIM+half;
int *b11=b, *b12=b+half, *b21=b+half*DIM, *b22=b+half*DIM+half;
int *s1=p1+0, *s2=p1+half, *s3=p1+half*DIM, *s4=p1+half*DIM+half;
int *s5=p2+0, *s6=p2+half, *s7=p2+half*DIM, *s8=p2+half*DIM+half;
int *s9=p3+0, *s10=p3+half;
infoSet addOrSubInfoSet;
addOrSubInfoSet.infoSet[0] = addOrSubInfo(a11, a22, s1, true);
addOrSubInfoSet.infoSet[1] = addOrSubInfo(b11, b22, s2, true);
addOrSubInfoSet.infoSet[2] = addOrSubInfo(a21, a22, s3, true);
addOrSubInfoSet.infoSet[3] = addOrSubInfo(b12, b22, s4, false);
addOrSubInfoSet.infoSet[4] = addOrSubInfo(b21, b11, s5, false);
addOrSubInfoSet.infoSet[5] = addOrSubInfo(a11, a12, s6, true);
addOrSubInfoSet.infoSet[6] = addOrSubInfo(a21, a11, s7, false);
addOrSubInfoSet.infoSet[7] = addOrSubInfo(b11, b12, s8, true);
addOrSubInfoSet.infoSet[8] = addOrSubInfo(a12, a22, s9, false);
addOrSubInfoSet.infoSet[9] = addOrSubInfo(b21, b22, s10, true);
addOrSubInfoSet.infoSet[10] = addOrSubInfo();
matAddOrSub<<<half, half>>>(addOrSubInfoSet);
mat q1(dim), q2(dim);
int *m1=q1+0, *m2=q1+half, *m3=q1+half*DIM, *m4=q1+half*DIM+half;
int *m5=q2+0, *m6=q2+half, *m7=q2+half*DIM;
if (dim > CUDA_DIM*2) {
matMul(s1, s2, m1, half);
matMul(s3, b11, m2, half);
matMul(a11, s4, m3, half);
matMul(a22, s5, m4, half);
matMul(s6, b22, m5, half);
matMul(s7, s8, m6, half);
matMul(s9, s10, m7, half);
} else {
MulInfoSet mulInfoSet;
mulInfoSet.infoSet[0] = MulInfo(s1, s2, m1);
mulInfoSet.infoSet[1] = MulInfo(s3, b11, m2);
mulInfoSet.infoSet[2] = MulInfo(a11, s4, m3);
mulInfoSet.infoSet[3] = MulInfo(a22, s5, m4);
mulInfoSet.infoSet[4] = MulInfo(s6, b22, m5);
mulInfoSet.infoSet[5] = MulInfo(s7, s8, m6);
mulInfoSet.infoSet[6] = MulInfo(s9, s10, m7);
dim3 d(7, half);
matMulCuda<<<d, half>>>(mulInfoSet);
}
int *c11=c, *c12=c+half, *c21=c+half*DIM, *c22=c+half*DIM+half;
addOrSubInfoSet.infoSet[0] = addOrSubInfo(m1, m4, c11, true);
addOrSubInfoSet.infoSet[1] = addOrSubInfo(c11, m5, c11, false);
addOrSubInfoSet.infoSet[2] = addOrSubInfo(c11, m7, c11, true);
addOrSubInfoSet.infoSet[3] = addOrSubInfo(m3, m5, c12, true);
addOrSubInfoSet.infoSet[4] = addOrSubInfo(m2, m4, c21, true);
addOrSubInfoSet.infoSet[5] = addOrSubInfo(m1, m2, c22, false);
addOrSubInfoSet.infoSet[6] = addOrSubInfo(c22, m3, c22, true);
addOrSubInfoSet.infoSet[7] = addOrSubInfo(c22, m6, c22, true);
addOrSubInfoSet.infoSet[8] = addOrSubInfo();
matAddOrSub<<<half, half>>>(addOrSubInfoSet);
}
int main() {
int a[N], b[N], c[N];
for (int i=0; i<N; ++i) {
a[i] = rand()%MOD;
b[i] = rand()%MOD;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc(&cache, sizeof(int[N*5]));
int *devA, *devB, *devC;
cudaMalloc(&devA, sizeof(int[N]));
cudaMalloc(&devB, sizeof(int[N]));
cudaMalloc(&devC, sizeof(int[N]));
cudaMemcpy(devA, a, sizeof(int[N]), cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, sizeof(int[N]), cudaMemcpyHostToDevice);
matMul(devA, devB, devC, DIM);
cudaMemcpy(c, devC, sizeof(int[N]), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("a:\n");
printMat(a);
printf("\nb:\n");
printMat(b);
printf("\nc:\n");
printMat(c);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\nTime taken for matrix multiplication: %.3f ms", elapsedTime);
int ans[N];
for (int i=0; i<DIM; ++i) {
for (int j=0; j<DIM; ++j) {
int offset = i*DIM+j;
ans[offset] = 0;
for (int k=0; k<DIM; ++k) {
ans[offset] += a[i*DIM+k]*b[k*DIM+j];
}
}
}
try {
for (int i=0; i<N; ++i) {
if (ans[i]!=c[i]) {
throw 1;
}
}
printf("\nSuccess!\n");
} catch (int) {
printf("\nFailed\n");
}
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
cudaFree(cache);
}
|
12,219 | #include "includes.h"
__global__ void kernExp(double* A, double* bias) {
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
A[i] = exp(A[i] - *bias);
} |
12,220 | #include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_cuda.h"
#include "cs_helper.h"
#include "cs_vector.h"
#define CUDA_DBG
#define CUDA_DBG1
__global__ void d_do_vector_zero_some ( float *in1, int *tblp, int tbl_size )
{
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < tbl_size )
{
in1[ tblp[ t_idx ]] = 0 ;
t_idx += CUDA_MAX_THREADS ;
}
}
/*
h_do_vector_add_destroy: will zero out the data fields in the vector d_datap,
these fields are indexed by
the entries in the d_inp. the size of d_inp is tbl_size.
*/
void
h_do_vector_zero_some ( float *d_datap, int *d_inp, int tbl_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_vector_zero_some <<< nBlocks, nThreadsPerBlock >>> ( d_datap, d_inp, tbl_size ) ;
cudaThreadSynchronize() ;
}
|
12,221 | #include<stdio.h>
__global__ void pattern(int *__restrict a, int n, int m) {
for (int i = threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
a[i] = (long long)i*77%m;
}
}
__global__ void torture2(int *__restrict a, int *__restrict b, int *__restrict c, int n) {
int s = 0;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int gs = blockDim.x * gridDim.x;
for (i = i; i < n; i += gs) {
int aa = a[i];
s += b[aa];
s += b[aa+1];
/*s += b[aa+2];
s += b[aa+3];*/
}
c[threadIdx.x + blockIdx.x * blockDim.x] = s;
}
int main(){
int *a,*b,*c, d;
int n = 100000, m = 100000;
cudaMalloc(&a, sizeof(int) * n);
cudaMalloc(&b, sizeof(int) * m);
cudaMalloc(&c, sizeof(int) * 10000);
pattern<<<10,1024>>>(a, n, m);
cudaDeviceSynchronize();
for (int i = 0; i < 3000; i++)
torture2<<<10,1024>>>(a, b, c, n);
cudaMemcpy(&d, c, sizeof(int), cudaMemcpyDeviceToHost);
} |
12,222 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include<time.h>
#define index(i, j, w) ((i)*(w)) + (j)
__global__ void blurKernel (unsigned char *, unsigned char *, int, int, int);
void cudaBlur(unsigned char * , int, int, int);
int main(int argc, char * argv[]){
unsigned char * imageArray;
int w = (int) atoi(argv[1]);
int h = (int) atoi(argv[2]);
int numOfImages = (int) atoi(argv[3]);
int blurSize = (int) atoi(argv[4]);
imageArray = (unsigned char *)calloc(w*h, sizeof(unsigned char));
//assign random unsigned chars to imageArray
int i;
for (i = 0; i <= w*h; i++) {
imageArray[i] = (unsigned char) (rand() % 255 + 1);
}
int j;
for(j = 0; j < numOfImages; j++){
//printf("%u \n", imageArray[index(0,0,w)]);
cudaBlur(imageArray, w, h, blurSize);
//printf("%u \n", imageArray[0]);
}
return 0;
}
void cudaBlur(unsigned char * imageArray, int w, int h, int blurSize)
{
unsigned int num_bytes = w*h*sizeof(unsigned char);
unsigned char * temp;
temp = (unsigned char *)calloc(w*h, sizeof(unsigned char));
memcpy((void *)temp, (void *) imageArray, num_bytes);
//allocate device Memory
unsigned char *d_inputArray;
unsigned char *d_outputArray;
cudaMalloc(&d_inputArray, num_bytes);
cudaMalloc(&d_outputArray, num_bytes);
dim3 threads_per_block( 128, 1, 1 );
dim3 blocks_in_grid( ceil( (w*h)/ threads_per_block.x ), 1, 1 );
clock_t t;
t = clock();
cudaMemcpy(d_outputArray, temp, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_inputArray, imageArray, num_bytes, cudaMemcpyHostToDevice);
blurKernel<<<blocks_in_grid, threads_per_block>>>(d_inputArray, d_outputArray, w, h, blurSize);
cudaMemcpy(imageArray, d_outputArray, num_bytes, cudaMemcpyDeviceToHost);
t = clock() - t;
double time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("kernel took %f seconds to execute \n", time_taken);
//printf("done");
//free device Memory
cudaFree(d_outputArray);
cudaFree(d_inputArray);
free(temp);
}
__global__ void blurKernel (unsigned char * d_inputArray, unsigned char * d_outputArray,
int w, int h, int blurSize){
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if(Col<w && Row < h){
int pixVal = 0;
int pixels = 0;
for(int blurRow = -blurSize; blurRow < blurSize+1; ++blurRow){
for(int blurCol = -blurSize; blurCol < blurSize+1; ++blurCol){
int curRow = Row + blurRow;
int curCol = Col + blurCol;
//verify we have a valid image pixel
if(curRow > -1 && curRow < h && curCol > -1 && curCol < w){
pixVal += d_inputArray[curRow*w+curCol];
pixels++; // keep track of number of pixels in the avg
}
}
}
//write our new pixel value out
d_outputArray[Row*w+Col] = (unsigned char)(pixVal/pixels);
}
}
|
12,223 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math_constants.h"
#include"cuComplex.h"
#include <stdlib.h>
#include <stdio.h>
//A wrapper function to call cuda safely (A ton of error checks)
cudaError_t cudaDDFT(unsigned nbits, unsigned nthreads, cuDoubleComplex * out, cuDoubleComplex * in);
__global__ void ddftKernel(unsigned nbits, unsigned nthreads, cuDoubleComplex* out, cuDoubleComplex* in, cuDoubleComplex* W)
{
long int N = 1 << nbits;
long int Nloc = N / nthreads;
long int tid = threadIdx.x;
long int istart = tid*Nloc;
long int ifinish = (tid+1)*Nloc;
for (int k = istart; k < ifinish; k++) {
out[k] = in[N - 1];
for (int j = 0; j < N - 1; j++) {
out[k].x = out[k].x * W[k].x - out[k].y * W[k].y + in[N - 2 - j].x;
out[k].y = out[k].y * W[k].x + out[k].x * W[k].y + in[N - 2 - j].y;
}
}
}
int main()
{
unsigned nthreads = 16;
unsigned nbits = 10;
//Create test
cuDoubleComplex* in = (cuDoubleComplex*)malloc(1 << nbits * sizeof(cuDoubleComplex));
cuDoubleComplex* out = (cuDoubleComplex*)malloc(1 << nbits * sizeof(cuDoubleComplex));
FILE *fh = fopen("init.txt", "w");
for (unsigned i = 0; i < 1 << nbits; ++i)
{
in[i].x = sin(2. * i * CUDART_PI / (1 << nbits));
in[i].y = 0;
fprintf(fh, "%19.12e\t%19.12e\t%19.12e\n", 2.*CUDART_PI*i / (1 << nbits), in[i].x, in[i].y);
}
fclose(fh);
//Measure time
cudaError_t cudaStatus = cudaDDFT(nbits, nthreads, out, in);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
fh = fopen("output.txt", "w");
for (unsigned i = 0; i < 1 << nbits; ++i)
{
fprintf(fh, "%19.12e\t%19.12e\t%19.12e\n", 2.*CUDART_PI*i / (1 << nbits), out[i].x, out[i].y);
}
fclose(fh);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to compute discrete Fourier transform in parallel.
cudaError_t cudaDDFT(unsigned nbits, unsigned nthreads, cuDoubleComplex * out, cuDoubleComplex * in)
{
cuDoubleComplex* dev_in;
cuDoubleComplex* dev_out;
cuDoubleComplex* dev_W;
cuDoubleComplex* W;
W = (cuDoubleComplex*)malloc((1 << nbits) * sizeof(cuDoubleComplex));
// (serial n^2) algorithm prepare twiddle factors
double phs = -2.0*CUDART_PI / (1 << nbits);
for (int s = 0; s < (1 << nbits); s++)
{
W[s].x = cos(-phs*s);
W[s].y = sin(-phs*s);
}
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors (one input, one output) .
cudaStatus = cudaMalloc((void**)&dev_out, (1 << nbits) * sizeof(cuDoubleComplex));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_in, (1 << nbits) * sizeof(cuDoubleComplex));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_W, (1 << nbits) * sizeof(cuDoubleComplex));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_in, in, (1<<nbits) * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_W, W, (1 << nbits) * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
ddftKernel<<<1, nthreads>>>(nbits, nthreads, dev_out, dev_in, dev_W);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(out, dev_out, (1 << nbits) * sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_out);
cudaFree(dev_in);
cudaFree(dev_W);
return cudaStatus;
}
|
12,224 | /*#include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "book.h"
#include "cusparse.h"
*/
#define BlockDim 1024
template <typename T>
__global__ void spmv_csr_adaptive_kernel(T * d_val, T * d_vector, int * d_cols, int * d_ptr, int N, int * d_rowBlocks, T * d_out)
{
int startRow = d_rowBlocks[blockIdx.x];
int nextStartRow = d_rowBlocks[blockIdx.x + 1];
int num_rows = nextStartRow - startRow;
int i = threadIdx.x;
__shared__ volatile T LDS[BlockDim];
// If the block consists of more than one row then run CSR Stream
if (num_rows > 1) {
int nnz = d_ptr[nextStartRow] - d_ptr[startRow];
int first_col = d_ptr[startRow];
// Each thread writes to shared memory
if (i < nnz)
{
LDS[i] = d_val[first_col + i] * d_vector[d_cols[first_col + i]];
}
__syncthreads();
// Threads that fall within a range sum up the partial results
for (int k = startRow + i; k < nextStartRow; k += blockDim.x)
{
T temp = 0;
for (int j = (d_ptr[k] - first_col); j < (d_ptr[k + 1] - first_col); j++) {
temp = temp + LDS[j];
}
d_out[k] = temp;
}
}
// If the block consists of only one row then run CSR Vector
else {
// Thread ID in warp
int rowStart = d_ptr[startRow];
int rowEnd = d_ptr[nextStartRow];
T sum = 0;
// Use all threads in a warp to accumulate multiplied elements
for (int j = rowStart + i; j < rowEnd; j += BlockDim)
{
int col = d_cols[j];
sum += d_val[j] * d_vector[col];
}
LDS[i] = sum;
__syncthreads();
// Reduce partial sums
for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (i < stride)
LDS[i] += LDS[i + stride];
}
// Write result
if (i == 0)
d_out[startRow] = LDS[i];
}
}
int spmv_csr_adaptive_rowblocks(int *ptr, int totalRows, int *rowBlocks)
{
rowBlocks[0] = 0;
int sum = 0;
int last_i = 0;
int ctr = 1;
for (int i = 1; i < totalRows; i++) {
// Count non-zeroes in this row
sum += ptr[i] - ptr[i - 1];
if (sum == BlockDim) {
// This row fills up LOCAL_SIZE
last_i = i;
rowBlocks[ctr++] = i;
sum = 0;
}
else if (sum > BlockDim) {
if (i - last_i > 1) {
// This extra row will not fit
rowBlocks[ctr++] = i - 1;
i--;
}
else if (i - last_i == 1)
// This one row is too large
rowBlocks[ctr++] = i;
last_i = i;
sum = 0;
}
}
rowBlocks[ctr++] = totalRows;
return ctr;
} |
12,225 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2013/2014, March 3 - April 4
// ###
// ###
// ### Evgeny Strekalovskiy, Maria Klodt, Jan Stuehmer, Mohamed Souiai
// ###
// ###
// ###
// ### Shiv, painkiller047@gmail.com, p053
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
// squares element at a particular index (i) an array
__device__ float square_element(float *d_a, size_t i) {
return d_a[i] * d_a[i];
}
// square kernel
__global__ void square_array(float *d_a, size_t n) {
// get thread global id
size_t id = threadIdx.x + (size_t) blockDim.x * blockIdx.x;
// only threads inside array range compute
if(id < n) d_a[id] = square_element(d_a, id);
}
// GPU allocs, mem copy and calls square kernel
void square_array_caller(float *h_a, size_t n) {
// define block and grid sizes - 1D assumed
// setting a block of 512 threads
dim3 block = dim3(512, 1, 1);
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
// alloc GPU memory and copy data
float *d_a;
int bytesSize = n * sizeof(float);
cudaMalloc((void **) &d_a, bytesSize);
CUDA_CHECK;
cudaMemcpy(d_a, h_a, bytesSize, cudaMemcpyHostToDevice);
CUDA_CHECK;
// call kernel
square_array<<<grid, block>>>(d_a, n);
// wait for kernel call to finish
cudaDeviceSynchronize();
CUDA_CHECK;
// copy back data
cudaMemcpy(h_a, d_a, bytesSize, cudaMemcpyDeviceToHost);
CUDA_CHECK;
// free GPU array
cudaFree(d_a);
CUDA_CHECK;
}
int main(int argc,char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 10;
float *a = new float[n];
for(int i=0; i<n; i++) a[i] = i;
// CPU computation
for(int i=0; i<n; i++)
{
float val = a[i];
val = val*val;
a[i] = val;
}
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// GPU computation
// reinit data
for(int i=0; i<n; i++) a[i] = i;
// ###
// ### TODO: Implement the "square array" operation on the GPU and store the result in "a"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "aux.h"
// kernel caller
square_array_caller(a, n);
// print result
cout << "GPU:" << endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
} |
12,226 | #include "includes.h"
__global__ void initActiveSlices(int* buffer, int num)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < num)
buffer[i] = i;
} |
12,227 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void octal(int *a,int *b)
{
int tid = threadIdx.x;
long int rem[50],i=0,length=0,x=0;
int num=a[tid];
while(num>7)
{
rem[i]=num%8;
num=num/8;
i++;
length++;
}
for(i=length-1;i>=0;i--)
{
x=x+(rem[i]*(10*i));
}
b[tid]=x;
}
int main(void)
{
int i,a[10000],b[10000],n;
printf("Enter value of N:");
scanf("%d",&n);
printf("Enter array elements of array A\n");
for(i=0;i<n;i++)
scanf("%d",&a[i]);
int *d_a,*d_b;
int size = sizeof(int);
cudaMalloc((void **)&d_a,size*n);
cudaMalloc((void **)&d_b,size*n);
cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size*n,cudaMemcpyHostToDevice);
octal<<<1,n>>>(d_a,d_b);
cudaMemcpy(b,d_b,size*n,cudaMemcpyDeviceToHost);
printf("Octal array is :");
for(i=0;i<n;i++)
{
printf("%d\t",b[i]);
}
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
12,228 | #include "includes.h"
__global__ void convertKinectDisparityInPlace_kernel(float *d_disparity, int pitch, int width, int height, float depth_scale) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
float *d_in = (float *)((char *)d_disparity + y * pitch) + x;
*d_in = (*d_in == 0.0f) ? nanf("") : (-depth_scale / *d_in);
}
} |
12,229 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
int n = 512;
float
a,
beta_old = 1.0f,
beta = 0.0f,
*x, *y,
ydot,
thr = 1e-5f;
float
* dx, * dy;
#define I (i+1)
#define J (j+1)
#define BLOCKSIZE 256
/* Notice that for n < BLOCKSIZE the program will fail
*
* Can be solved by choosing bigger n
* or by having BLOCKSIZE as variable and add a test
*/
__global__ void cu_mult (float * dx, float * dy, int n)
{
int i = blockIdx.x * BLOCKSIZE + threadIdx.x;
dy[i] = 0.0f;
for (int j = 0; j < n; ++j)
{
float a = 1.0f / (0.5f*(I+J-1)*(I+J-2)+I);
dy[i] += a * dx[j];
}
}
__global__ void cu_divide_ydot (float * dx, float * dy, float ydot)
{
int i = blockIdx.x * BLOCKSIZE + threadIdx.x;
dx[i] = dy[i] / sqrt(ydot);
}
int
main ( int argc, char **argv )
{
// Setting up n
printf("yoyo");
if ( argc > 1 )
n = (1 << strtol ( argv[1], NULL, 10 ));
// Allocating space on CPU
x = (float *) malloc ( n*sizeof(float) );
y = (float *) malloc ( n*sizeof(float) );
// Allocating space on GPU
cudaMalloc ( (void**) &dx, n*sizeof(float) );
cudaMalloc ( (void**) &dy, n*sizeof(float) );
cudaMemset ((void *) dy , 0, n*sizeof(float));
// Setting up initial values
memset ( x, 0, n*sizeof(float) );
x[0] = 1.0f;
cudaMemcpy ( dx, x, n*sizeof(float), cudaMemcpyHostToDevice );
// Setting up dimentions on GPU
dim3 gridBlock ( n/BLOCKSIZE );
dim3 threadBlock ( BLOCKSIZE );
do
{
cu_mult <<< gridBlock, threadBlock >>> ( dx, dy, n );
cudaMemcpy ( y , dy, n*sizeof(float), cudaMemcpyDeviceToHost );
if ( fabs(beta_old-beta) < thr )
break;
cudaMemcpy ( x , dx, n*sizeof(float), cudaMemcpyDeviceToHost );
beta_old = beta;
beta = 0.0f;
ydot = 0.0f;
for ( int j=0; j<n; j++ )
{
beta += y[j] * x[j];
ydot += y[j] * y[j];
}
cu_divide_ydot <<< gridBlock, threadBlock >>> ( dx, dy, ydot );
}
while ( 1 );
printf ( "%e\n", beta );
free ( x ), free ( y );
cudaFree ( &dx ), cudaFree ( &dy );
}
|
12,230 | #include <iostream>
#include <chrono>
#include<float.h>
using namespace std;
using namespace std::chrono;
#define max(a, b) a>b?a:b
__global__ void compute_max_gpu(float *device_input, float *device_output){
extern __shared__ float sm[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sm[tid] = device_input[i];
__syncthreads();
for(int s = 1;s < blockDim.x; s*= 2){
if(tid % (2 * s) == 0){
sm[tid] = max(sm[tid], sm[tid+s]);
}
__syncthreads();
}
if(tid == 0) device_output[blockIdx.x] = sm[0];
}
void compute_max_cpu(float *cpu_input, float *cpu_output, unsigned int n){
cpu_output[0] = FLT_MIN;
auto start = high_resolution_clock::now();
for(int i = 0;i < n;i ++){
cpu_output[0] = max(cpu_output[0], cpu_input[i]);
}
auto stop = high_resolution_clock::now();
auto time_req = duration_cast<microseconds>(stop - start).count();
cout << endl << " Maximum from CPU is : " << cpu_output[0] << endl;
cout << endl << " Time required for CPU is : " << time_req << " microseconds" << endl;
}
int main(){
float *cpu_input, *cpu_output;
float *device_input, *device_output, *transfer_output;
unsigned int n = 1024*1024*1024;
size_t size = n * sizeof(float);
unsigned int numThreadsPerBlock = 1024;
unsigned int blocksPerGrid = int(n / numThreadsPerBlock);
// Allocate memories
cpu_input = (float *)malloc(size);
cpu_output = (float *)malloc(sizeof(float));
transfer_output = (float *)malloc(blocksPerGrid * sizeof(float));
cudaMalloc(&device_input, size);
cudaMalloc(&device_output, blocksPerGrid * sizeof(float));
//Fill in the arrays
/*
for(unsigned int i = 0;i < n;i ++){
if(i == 23842){
cpu_input[i] = 101.0; // maximum for testing
}
else{
cpu_input[i] = 99.0;
}
}*/
for(unsigned int i=0;i<n;i++){
cpu_input[i] = 10.0f + n*float(rand()) / RAND_MAX;
}
cudaMemcpy(device_input, cpu_input, size, cudaMemcpyHostToDevice);
//Execute CPU code for maximum
compute_max_cpu(cpu_input, cpu_output, n);
//Execute GPU code for maximum
dim3 grid_size(blocksPerGrid, 1, 1);
dim3 block_size(numThreadsPerBlock, 1, 1);
auto start = high_resolution_clock::now();
compute_max_gpu<<<grid_size, block_size, numThreadsPerBlock * sizeof(float)>>>(device_input, device_output);
cudaMemcpy(transfer_output, device_output, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
// Compute the maximum from the final array
float max = FLT_MIN;
for(int i = 0;i < blocksPerGrid;i ++){
max = (max > transfer_output[i])?max:transfer_output[i];
}
auto stop = high_resolution_clock::now();
auto time_req = duration_cast<microseconds>(stop - start).count();
cout << endl << " Maximum from GPU is : " << max << endl;
cout << endl << " Time required for GPU is : " << time_req << " microseconds" << endl;
}
|
12,231 | // Tests that "sm_XX" gets correctly converted to "compute_YY" when we invoke
// fatbinary.
//
// REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_20 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM20 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_21 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM21 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_30 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM30 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_32 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM32 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_35 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM35 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_37 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM37 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_50 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM50 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_52 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM52 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_53 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM53 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_60 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM60 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_61 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM61 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_62 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM62 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_70 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=COMMON,SM70 %s
// COMMON: ptxas
// COMMON-SAME: -m64
// COMMON: fatbinary
// SM20:--image=profile=sm_20{{.*}}--image=profile=compute_20
// SM21:--image=profile=sm_21{{.*}}--image=profile=compute_20
// SM30:--image=profile=sm_30{{.*}}--image=profile=compute_30
// SM32:--image=profile=sm_32{{.*}}--image=profile=compute_32
// SM35:--image=profile=sm_35{{.*}}--image=profile=compute_35
// SM37:--image=profile=sm_37{{.*}}--image=profile=compute_37
// SM50:--image=profile=sm_50{{.*}}--image=profile=compute_50
// SM52:--image=profile=sm_52{{.*}}--image=profile=compute_52
// SM53:--image=profile=sm_53{{.*}}--image=profile=compute_53
// SM60:--image=profile=sm_60{{.*}}--image=profile=compute_60
// SM61:--image=profile=sm_61{{.*}}--image=profile=compute_61
// SM62:--image=profile=sm_62{{.*}}--image=profile=compute_62
// SM70:--image=profile=sm_70{{.*}}--image=profile=compute_70
|
12,232 | #include <iostream>
#include <vector>
#include <algorithm>
#include <numeric>
#include <random>
#include <chrono>
#include<cfloat>
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
using namespace std;
// compare radix sort
template<typename T>
std::vector<std::size_t> tag_sort(const std::vector<T>& v)
{
std::vector<std::size_t> result(v.size());
std::iota(std::begin(result), std::end(result), 0);
std::sort(std::begin(result), std::end(result),
[&v](const double & lhs, const double & rhs)
{
return v[lhs] < v[rhs];
}
);
return result;
}
void compare_sort(int vec_size){
// int vec_size = 100000;
vector<double> values;
vector<int> indices;
double lower_bound = 0;
double upper_bound = 10000;
std::default_random_engine re;
std::uniform_real_distribution<double> unif(lower_bound,upper_bound);
for(int i = 0; i < vec_size; i++){
double a_random_double = unif(re);
values.push_back(a_random_double);
indices.push_back(i);
}
// generating values
//for(double i : values)
// cout << i << " ";
//cout << endl;
vector<double> values_cpu(values);
// sort using c++ stl vector
auto start_cpu = std::chrono::high_resolution_clock::now();
auto idices_cpu = tag_sort(values_cpu);
auto finish_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_cpu = finish_cpu - start_cpu;
cout <<" CPU SP executing time (ms): " << elapsed_cpu.count()*1000 << endl;
// output
//for (auto && elem:idxs)
// std::cout << elem << " : " << values[elem] << std::endl;
thrust::device_vector<double > values_gpu(values);
thrust::device_vector<int> indices_gpu(indices);
thrust::host_vector<double > indices_cpu(indices);
// sort using cuda gpu
auto start_gpu = std::chrono::high_resolution_clock::now();
thrust::sort_by_key(values_gpu.begin(), values_gpu.end(), indices_gpu.begin());
thrust::copy(indices_gpu.begin(), indices_gpu.end(), indices_cpu.begin());
auto finish_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_gpu = finish_gpu - start_gpu;
cout <<" GPU CUDA executing time (ms): " << elapsed_gpu.count()*1000 << endl;
//for(int i = 0; i < vec_size; i++)
// cout << indices[i] << " : " << values[i] << endl;
/*
bool are_equal = true;
for(int i = 0; i < vec_size; i++)
if (idices_cpu[i] != indices_gpu[i]){
are_equal = false;
break;
}
cout << are_equal << endl;
*/
}
// m should be the power of 2 rank version
#define BlockSize 4 // for keper architecture maxwell will be 16?
// compare pairwise of eculidean distance for A (n*dim)
__global__ void gpu_euclidian_distances(float *out, float *in, int n, int dim){
__shared__ float Xs[BlockSize][BlockSize];
__shared__ float Ys[BlockSize][BlockSize];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int xBegin = bx * BlockSize * dim;
int yBegin = by * BlockSize * dim;
int yEnd = yBegin + dim - 1;
int x, y, k, outIdx;
float s = 0.0, tmp;
for(y = yBegin, x = xBegin; y <= yEnd; y += BlockSize, x += BlockSize){
Ys[ty][tx] = in[y + ty*dim + tx];
Xs[tx][ty] = in[x + ty*dim + tx];
__syncthreads();
for(k = 0; k < BlockSize; k++){
tmp = Ys[ty][k] - Xs[k][tx];
s += tmp*tmp;
}
__syncthreads();
}
outIdx = by*BlockSize*n + ty*n + bx*BlockSize + tx;
out[outIdx] = sqrtf(s);
}
void filling(float* matrix, int w, int h, float high){
float low = 0.0f;
for(int i = 0; i < w*h; i++){
matrix[i] = low + static_cast<float>(rand() / static_cast<float>(RAND_MAX/(high - low)));
}
}
void cpu_euclidian_distances(float* in, int n, int dim, float* out){
for(int i = 0; i < n; i++){
out[i*n + i] = 0.0;
for(int j = i+1; j < n; j++){
float dist = 0.0;
for(int k = 0; k < dim; k++){
float a = in[i*dim + k];
float b = in[j*dim + k];
dist += (a-b)*(a-b);
}
out[i*n + j] = sqrtf(dist);
out[j*n + i] = sqrtf(dist);
}
}
}
void compare_distances(int vec_size, int E){
// host memory allocation
float *h_A, *h_result;
cudaMallocHost((void**)&h_A, vec_size * E * sizeof(float));
cudaMallocHost((void**)&h_result, vec_size * vec_size * sizeof(float));
filling(h_A, vec_size, E, 10.0f);
// device memory allocation
float *d_A, *d_result;
cudaMalloc((void**)&d_A, vec_size * E * sizeof(float));
cudaMalloc((void**)&d_result, vec_size * vec_size * sizeof(float));
auto start_gpu = std::chrono::high_resolution_clock::now();
// data transfer to GPU
cudaMemcpy(d_A, h_A, vec_size * E * sizeof(float), cudaMemcpyHostToDevice);
// kernel launch
dim3 block(BlockSize, BlockSize);
dim3 grid(vec_size / BlockSize, vec_size / BlockSize);
gpu_euclidian_distances <<<grid, block>>> (d_result, d_A, vec_size, E);
// data transfer to CPU
cudaMemcpy(h_result, d_result, vec_size * vec_size * sizeof(float), cudaMemcpyDeviceToHost);
auto finish_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_gpu = finish_gpu - start_gpu;
cout <<" GPU CUDA executing time (ms): " << elapsed_gpu.count()*1000 << endl;
cudaFree(d_A);
cudaFree(d_result);
/*
// check result first
cout <<"input A: " << endl;
for(int i = 0; i < vec_size; i++){
for(int j = 0; j < E; j++){
cout << h_A[i*E + j] << " ";
}
cout << endl;
}
cout << "GPU output: " << endl;
for(int i = 0; i < vec_size; i++){
for(int j = 0; j < vec_size; j++){
cout << h_result[i*vec_size + j] << " ";
}
cout << endl;
}
*/
// CPU
auto start_cpu = std::chrono::high_resolution_clock::now();
cpu_euclidian_distances(h_A, vec_size, E, h_result);
auto finish_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_cpu = finish_cpu - start_cpu;
cout <<" CPU SP executing time (ms): " << elapsed_cpu.count()*1000 << endl;
/*
cout << "CPU output: " << endl;
for(int i = 0; i < vec_size; i++){
for(int j = 0; j < vec_size; j++){
cout << h_result[i*vec_size + j] << " ";
}
cout << endl;
}
*/
}
__global__ void pairwise_pearson_correlation(float *out, float *in, int n, int dim){
__shared__ float Xs[BlockSize][BlockSize];
__shared__ float Ys[BlockSize][BlockSize];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int xBegin = bx * BlockSize * dim;
int yBegin = by * BlockSize * dim;
int yEnd = yBegin + dim - 1;
int x, y, k, outIdx;
float sumX, sumY, sumX2, sumY2, sumXY;
float avgX, avgY, varX, varY, cov, rho;
sumX = sumY = sumX2 = sumY2 = sumXY = 0.0;
for(y = yBegin, x = xBegin; y <= yEnd; y += BlockSize, x += BlockSize){
Ys[ty][tx] = in[y + ty*dim + tx];
Xs[tx][ty] = in[x + ty*dim + tx];
__syncthreads();
for(k = 0; k < BlockSize; k++){
sumX += Xs[k][tx];
sumY += Ys[ty][k];
sumX2 += Xs[k][tx]*Xs[k][tx];
sumY2 += Ys[ty][k]*Ys[ty][k];
sumXY += Xs[k][tx]*Ys[ty][k];
}
__syncthreads();
}
avgX = sumX/dim;
avgY = sumY/dim;
varX = (sumX2 - avgX*avgX*dim) / (dim - 1);
varY = (sumY2 - avgY*avgY*dim) / (dim - 1);
cov = (sumXY - avgX*avgY*dim) / (dim - 1);
rho = cov / sqrtf(varX*varY);
outIdx = by*BlockSize*n + ty*n + bx*BlockSize + tx;
out[outIdx] = rho;
}
__global__ void gpu_pearson_correlation(float* out, float* A, float* B, int n, int dim){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n){
// calculate the pearson between A and B
float sumX = 0.0, sumY = 0.0, sumX2 = 0.0, sumY2 = 0.0, sumXY = 0.0;
float rho = 0.0;
for(int k = 0; k < dim; k++){
sumX += A[idx*dim + k];
sumY += B[idx*dim + k];
sumX2 += A[idx*dim + k] * A[idx*dim + k];
sumY2 += B[idx*dim + k] * B[idx*dim + k];
sumXY += A[idx*dim + k] * B[idx*dim + k];
}
float denominator = sqrtf((sumX2 * dim - sumX * sumX) * (sumY2 * dim - sumY * sumY));
float numerator = sumXY*dim - sumX*sumY;
if(abs(denominator) > 1e-10)
rho = numerator / denominator;
out[idx] = rho;
}
}
void cpu_pearson_correlation(float* out, float* A, float* B, int n, int dim){
float rho;
for(int i = 0; i < n; i++){
float sumX = 0.0, sumY = 0.0, sumX2 = 0.0, sumY2 = 0.0, sumXY = 0.0;
for(int k = 0; k < dim; k++){
sumX += A[i*dim + k];
sumY += B[i*dim + k];
sumX2 += A[i*dim + k]*A[i*dim + k];
sumY2 += B[i*dim + k]*B[i*dim + k];
sumXY += A[i*dim + k]*B[i*dim + k];
}
rho = 0.0;
float denominator = sqrtf((sumX2 * dim - sumX * sumX) * (sumY2 * dim - sumY * sumY));
float numerator = sumXY*dim - sumX*sumY;
if(abs(denominator) > 1e-10)
rho = numerator / denominator;
out[i] = rho;
}
}
void compare_rho(int vec_size, int d){
// host memory allocation and data filling
float *h_A, *h_B, *h_result;
cudaMallocHost((void**)&h_A, vec_size * d * sizeof(float));
cudaMallocHost((void**)&h_B, vec_size * d * sizeof(float));
cudaMallocHost((void**)&h_result, vec_size * sizeof(float));
filling(h_A, vec_size, d, 10.0f);
filling(h_B, vec_size, d, 10.0f);
// device memory allocation
float *d_A, *d_B, *d_result;
cudaMalloc((void**)&d_A, vec_size * d * sizeof(float));
cudaMalloc((void**)&d_B, vec_size * d * sizeof(float));
cudaMalloc((void**)&d_result, vec_size * sizeof(float));
auto start_gpu = std::chrono::high_resolution_clock::now();
// data transfer to GPU
cudaMemcpy(d_A, h_A, vec_size * d * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, vec_size * d * sizeof(float), cudaMemcpyHostToDevice);
// kernel launch
dim3 block(BlockSize);
dim3 grid(vec_size / BlockSize);
gpu_pearson_correlation <<<grid, block>>> (d_result, d_A, d_B, vec_size, d);
// data transfer to CPU
cudaMemcpy(h_result, d_result, vec_size * sizeof(float), cudaMemcpyDeviceToHost);
auto finish_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_gpu = finish_gpu - start_gpu;
cout <<" GPU CUDA executing time (ms): " << elapsed_gpu.count()*1000 << endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_result);
/*
// check result first
cout <<"input A: " << endl;
for(int i = 0; i < vec_size; i++){
for(int j = 0; j < d; j++){
cout << h_A[i*d + j] << " ";
}
cout << endl;
}
cout <<"input B: " << endl;
for(int i = 0; i < vec_size; i++){
for(int j = 0; j < d; j++){
cout << h_B[i*d + j] << " ";
}
cout << endl;
}
cout << "GPU output: " << endl;
for(int i = 0; i < vec_size; i++){
cout << h_result[i] << " ";
}
cout << endl;
*/
// CPU
auto start_cpu = std::chrono::high_resolution_clock::now();
cpu_pearson_correlation(h_result, h_A, h_B, vec_size, d);
auto finish_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_cpu = finish_cpu - start_cpu;
cout <<" CPU SP executing time (ms): " << elapsed_cpu.count()*1000 << endl;
/*
cout << "CPU output: " << endl;
for(int i = 0; i < vec_size; i++){
cout << h_result[i] << " ";
}
cout << endl;
*/
}
int main(){
vector<int> sizes = {16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192};
vector<int> dimensions = {2, 4, 8};
cout << "comparing the performance difference of pairwise distances calculation" << endl;
for(auto dim: dimensions){
cout << "dimension: " << dim << endl;
for(auto size: sizes){
compare_distances(size, dim);
}
}
cout << "comparing the performance difference of radix sort" << endl;
for(auto size: sizes){
compare_sort(size);
}
cout << "comparing the performance difference of Pearson correlation" << endl;
for(auto dim: dimensions){
cout << "dimension: " << dim << endl;
for(auto size: sizes){
compare_rho(size, dim);
}
}
return 0;
}
|
12,233 | #include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 65535
// Version CPU de la función suma de vectores
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// Versión GPU de la función suma de vectores
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c, int n) {
int i = threadIdx.x;
// No es necesario el loop for por que el runtime de CUDA
// maneja estos hilos ITER veces
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
// Necesitamos variables accesibles en CUDA,
// para eso cudaMallocManaged nos las provee
cudaMallocManaged(&gpu_a, ITER * sizeof(int));
cudaMallocManaged(&gpu_b, ITER * sizeof(int));
cudaMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Llama a la versión CPU y la temporiza
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Llama a la versión GPU y la temporiza
// Los triples <> es una extensión del runtime CUDA que permite
// que los parametros de una llamada al kernel CUDA sean pasados
// En este ejemplo estamos pasando un thread block con ITER threads
auto gpu_start = Clock::now();
vector_add_gpu <<<1, ITER>>> (gpu_a, gpu_b, gpu_c, ITER);
cudaDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
// Libere la memoria basada en la función GPU allocations
cudaFree(a);
cudaFree(b);
cudaFree(c);
// Libere la memoria basada en la función CPU allocations
free(a);
free(b);
free(c);
return 0;
}
|
12,234 | /****
Author: David Tran
File: minDistSOAGPU.cu
Compilation: nvcc minDistSOAGPU.cu -Wno-deprecated-gpu-targets -o minDistSOAGPU
Execution: dtran7.csc656@tiger:~$ ./minDistSOAGPU
***/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <cuda.h>
// You may edit NUMPARTICLES and THREADSPERBLOCK for measurements
#define NUMPARTICLES 32768
#define THREADSPERBLOCK 4
void initPos(float *);
void findMinsG(float *pos, int *minIndex, float *minDistance);
void dumpResults(int index[], float d[]);
__global__ void findMinsGPU(float *p, int *minI, float *minD);
__device__ float findDistanceGPU(float *p, int i, int j);
// You are not allowed to change main()!
int main() {
cudaEvent_t start, stop;
float time;
float *pos;
int *minIndex;
float *minDistance;
pos = (float *) malloc(NUMPARTICLES * 3 * sizeof(float));
minIndex = (int *) malloc(NUMPARTICLES * sizeof(int));
minDistance = (float *) malloc(NUMPARTICLES * sizeof(float));
initPos(pos);
// create timer events
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
findMinsG(pos, minIndex, minDistance);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%d particles, %d threads per block\n", NUMPARTICLES, THREADSPERBLOCK);
printf("Elapsed time = %f\n", time);
dumpResults(minIndex, minDistance);
}
void initPos(float *p) {
// this should be identical to initPos() for minDistSOA.c
// your code goes here
for(int i=0; i<NUMPARTICLES; i++){
p[i] = rand() / (float) RAND_MAX;
p[NUMPARTICLES+i] = rand() / (float) RAND_MAX;
p[NUMPARTICLES*2+i] = rand() / (float) RAND_MAX;
}
}
void findMinsG(float *pos, int *minIndex, float *minDistance) {
// wrapper function for CUDA code
// CUDA memory management and kernel calls go here
float *dPos, *mDistance;
int *mIndex;
cudaMalloc((void **) &dPos, NUMPARTICLES * 3 * sizeof(float));
cudaMalloc((void **) &mDistance, NUMPARTICLES * sizeof(float));
cudaMalloc((void **) &mIndex, NUMPARTICLES * sizeof(int));
cudaMemcpy(dPos, pos, NUMPARTICLES * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mDistance, minDistance, NUMPARTICLES * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mIndex, minIndex, NUMPARTICLES * sizeof(int), cudaMemcpyHostToDevice);
// Invoke kernal finMinsGPU()
findMinsGPU<<<NUMPARTICLES/THREADSPERBLOCK, THREADSPERBLOCK>>>(dPos, mIndex, mDistance);
cudaThreadSynchronize();
cudaMemcpy(minIndex, mIndex, NUMPARTICLES * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(minDistance, mDistance, NUMPARTICLES * sizeof(float), cudaMemcpyDeviceToHost);
/****** Did not want to change main so left this here to show that i remembered
// clean up
free(hm);
cudaFree(dm);
free(hcs);
cudaFree(dcs);
*****/
}
/* device function to find distances */
__device__ float findDistanceGPU(float *p, int i, int j) {
float dx, dy, dz;
dx = p[i] - p[j];
dy = p[NUMPARTICLES + i] - p[NUMPARTICLES + j];
dz = p[NUMPARTICLES*2 + i] - p[NUMPARTICLES*2 + j];
return(dx*dx + dy*dy + dz*dz);
}
/* kernal function that calculates min distance */
__global__ void findMinsGPU(float *p, int *minI, float *minD) {
// your kernel code goes here
int i, j;
float distance, mD;
int mI;
mD = 0;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i!=0){
mI = 0;
mD = findDistanceGPU(p, i, 0);
}else{
mI=1;
mD = findDistanceGPU(p, 0, 1);
}
for(j=0; j<NUMPARTICLES; j++){
if(i!=j) {
// calculate distance between particles i and j
distance = findDistanceGPU(p, i, j);
// if distance < mD
if(distance < mD){
mD = distance;
mI = j;
}
}
}
minI[i] = mI;
minD[i] = mD;
}
void dumpResults(int index[], float d[]) {
int i;
FILE *fp;
fp = fopen("./dump.out", "w");
for (i=0; i<NUMPARTICLES; i++) {
fprintf(fp, "%d %d %f\n", i, index[i], d[i]);
}
fclose(fp);
}
|
12,235 | #include "includes.h"
__global__ void setStatisticAtLast ( const int dim, const int nwl, const float *lst, float *stt ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
stt[i] = lst[dim+1+i*(dim+1+1+1+1)];
}
} |
12,236 | #include "includes.h"
using namespace std;
__global__ void sumCols(int* d_matrix, int* d_result, int numRows, int numCols) {
int sum = 0;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < numRows; i++) {
sum += d_matrix[idx + (numCols * i)];
}
d_result[idx] = sum;
} |
12,237 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define BLOCK_DIM 16
// Row Size & Column Size
const int N = 2;
const int SIZE = N * N;
__global__ void matrixMult(int *c, int *a, int *b, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
if (row < n && col < n)
{
for (int i = 0; i < n; ++i)
sum += a[row * n + i] * b[i * n + col];
c[row * n + col] = sum;
}
}
int main()
{
srand(time(NULL));
int a[N][N] = { { 1, 2 },
{ 2, 1 } };
int b[N][N] = { { 1, 2 },
{ 2, 1 } };
int *c;
int *dev_a, *dev_b, *dev_c;
c = (int *)malloc(SIZE * sizeof(int));
cudaMalloc((void **)&dev_a, SIZE * sizeof(int));
cudaMalloc((void **)&dev_b, SIZE * sizeof(int));
cudaMalloc((void **)&dev_c, SIZE * sizeof(int));
cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(N, N);
dim3 dimGrid((int)ceil(N / dimBlock.x), (int)ceil(N / dimBlock.y));
matrixMult <<< dimGrid, dimBlock >>>(dev_c, dev_a, dev_b, N);
cudaMemcpy(c, dev_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; ++i)
printf("c[%d] = %d\n", i, c[i]);
// Free the Host array memory
free(c);
// Free the Device array memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
12,238 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <iostream>
#include <time.h>
#include <vector>
#include <cstddef>
#include <stddef.h>
/*
Small matrix library to perform various required matrix calculations.
*/
// Structure to define the matrix features. Elements stored in row major order.
struct Matrix {
int height;
int width;
float* elements;
int stride;
Matrix() {}
// Constructor for a matrix stucture
Matrix(int height, int width) {
this->height = height;
this->width = width;
this->elements = (float*)malloc(height * width * sizeof(float));
}
};
// Function to fill a matrix with random values between 0 and 1
static Matrix matRand(Matrix A) {
Matrix C(A.height, A.width);
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
C.elements[i * C.width + j] = ((double)rand() / (RAND_MAX + 1.0));
return C;
}
// Function to multiply two matrices together
static Matrix matMult(const Matrix A, const Matrix B) {
Matrix C(A.height, B.width);
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < B.width; j++) {
C.elements[j + i * B.width] = 0;
for (int k = 0; k < A.width; k++) {
C.elements[j + i * B.width] += A.elements[k + i * A.width] * B.elements[j + k * B.width];
}
}
}
return C;
}
// Function to subtract one matrix from another element wise
static Matrix matSub(const Matrix A, const Matrix B) {
Matrix C(A.height, A.width);
for (int i = 0; i < C.height; i++) {
for (int j = 0; j < C.width; j++) {
C.elements[j + i * C.width] = A.elements[j + i * C.width] - B.elements[j + i * C.width];
}
}
return C;
}
// Function to transpose a matrix
static Matrix matTranspose(const Matrix A) {
Matrix C(A.width, A.height);
for (int row = 0; row < A.height; row++)
for (int col = 0; col < A.width; col++)
C.elements[row + col * A.height] = A.elements[col + row * A.width];
return C;
}
// Function to add two matrices together alement wise
static Matrix matAdd(const Matrix A, const Matrix B) {
Matrix C(A.height, A.width);
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
C.elements[j + i * C.width] = A.elements[j + i * C.width] + B.elements[j + i * C.width];
return C;
}
// Function to multiply each matrix element by a given value
static Matrix matScale(const Matrix A, float B) {
Matrix C(A.height, A.width);
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
C.elements[j + i * C.width] = A.elements[j + i * A.width] * B;
return C;
}
// Function to multiply two matrices together element wise
static Matrix matElementMult(const Matrix A, const Matrix B) {
Matrix C(A.height, A.width);
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
C.elements[j + i * C.width] = A.elements[j + i * A.width] * B.elements[j + i * B.width];
return C;
}
// Function to run sigmoid activation to each element in the matrix
static Matrix activation(const Matrix A) {
Matrix C(A.height, A.width);
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
C.elements[j + i * A.width] = 1 / (1 + exp(-A.elements[j + i * A.width]));
return C;
}
// Function to calculate the sigmoid derivative for each element in the matrix
static Matrix derivative(const Matrix A) {
Matrix C(A.height, A.width);
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
C.elements[j + i * A.width] = A.elements[j + i * A.width] * (1 - A.elements[j + i * A.width]);
return C;
} |
12,239 | /*
* Almost the minimal CUDA C++ example.
*
* Compile with `nvcc -o cuda1 cuda1.cu`
*/
#include <iostream>
using namespace std;
/*
* Square all elements of array
*/
static __global__
void square(float *x) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
x[i] = x[i]*x[i];
}
int main(int argc, char **argv) {
/*
* From CUDA documentation:
* "There is no explicit initialization function for the runtime;
* it initializes the first time a runtime function is called"
*/
/*
* Create data on CPU
*/
float *host_data = new float[256];
for (int i = 0; i < 256; ++i) {
host_data[i] = float(i);
}
/*
* Copy data to GPU
*/
float *device_data;
cudaMalloc(&device_data, 256*sizeof(float));
cudaMemcpy(device_data, host_data, 256*sizeof(float), cudaMemcpyHostToDevice);
/*
* Call `square` with array.
* Using 2 blocks, each with 128 threads, to evaluate 256 elements.
*/
square<<<2, 128>>>(device_data);
/*
* Return data to CPU
*/
cudaMemcpy(host_data, device_data, 256*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < 256; ++i) {
cout << host_data[i] << ' ';
}
cout << endl;
cudaFree(device_data);
free(host_data);
}
|
12,240 |
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
#define numElements (50000)
#define size (numElements * sizeof(float))
#define threadsPerBlock (256)
#define blocksPerGrid ((numElements + threadsPerBlock - 1) / threadsPerBlock)
static float h_A[numElements];
static float h_B[numElements];
static float h_C[numElements];
static float *d_A = 0;
static float *d_B = 0;
static float *d_C = 0;
int main(void)
{
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = i;
h_B[i] = i * 2;
}
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < numElements; ++i)
{
if (h_C[i] != i * 3)
{
return 1;
}
}
return 0;
}
|
12,241 | #include <stdio.h>
#define N 256
#define TPB 256
__global__ void cuda_hello()
{
const int myID = blockIdx.x*blockDim.x+threadIdx.x;
printf("Hello World! My threadId is %d\n",myID);
}
int main()
{
// Allocate device memory to store the output array
// cudaMalloc(&d_out, N*sizeof(float));
// Launch kernel to compute and store distance values
cuda_hello<<<N/TPB, TPB>>>();
cudaDeviceSynchronize();
// cudaFree(d_out); // Free the memory
return 0;
}
|
12,242 | // author: yohanes.gultom@gmail.com
// partial source: https://gist.github.com/wh5a/4313739
#include <stdio.h>
#include <time.h>
#define TILE_WIDTH 20
// create random matrix row-major-format
float* create_flat_matrix_rand(int row, int col, int max)
{
float* m = (float*)malloc(row*col*sizeof(float));
int i, j = 0;
for (i = 0; i < row; i++) {
for (j = 0; j < col; j++) {
float val = (max > 0) ? (float)(rand() % max) : 0.0f;
m[col * i + j] = val;
}
}
return m;
}
float* create_flat_matrix(int row, int col, float val)
{
float* m = (float*)malloc(row*col*sizeof(float));
int i, j = 0;
for (i = 0; i < row; i++) {
for (j = 0; j < col; j++) {
m[col * i + j] = val;
}
}
return m;
}
// print matrix row-major-format
void print_flat_matrix(float *m, int row, int col)
{
int i, j = 0;
for (i = 0; i < row; i++) {
for (j = 0; j < col; j++) {
printf("%.2f ", m[col * i + j]);
}
printf("\n");
}
}
__global__ void mmul_d(float *first, int m, int p, float *second, int q, float *multiply)
{
int c, d, k = 0;
float sum = .0f;
for (c = 0; c < m; c++) {
for (d = 0; d < q; d++) {
for (k = 0; k < p; k++) {
sum = sum + first[c*m+k] * second[k*q+d];
}
multiply[c*q+d] = sum;
sum = 0;
}
}
}
__global__ void mmul_d_thread(float *first, int m, int p, float *second, int q, float *multiply)
{
int c = blockIdx.x * blockDim.x + threadIdx.x;
int d, k = 0;
float sum = .0f;
for (d = 0; d < q; d++) {
for (k = 0; k < p; k++) {
sum = sum + first[c*m+k] * second[k*q+d];
}
multiply[c*q+d] = sum;
sum = 0;
}
}
// Compute C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x, by = blockIdx.y,
tx = threadIdx.x, ty = threadIdx.y,
Row = by * TILE_WIDTH + ty,
Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) {
if (Row < numARows && m*TILE_WIDTH+tx < numAColumns)
ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx];
else
ds_M[ty][tx] = 0;
if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows)
ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
if (Row < numCRows && Col < numCColumns)
C[Row*numCColumns+Col] = Pvalue;
}
int main(int argc, char** argv)
{
if (argc < 6) {
printf("insufficient args. for A x B = C, required args: [row num A] [col num A/row num B] [col num B] [cuda block size] [reps] [optimized] [compare]\n");
return EXIT_FAILURE;
}
int m, n, p, q = 0;
m = atoi(argv[1]);
n = atoi(argv[2]);
p = n;
q = atoi(argv[3]);
int blockSize = atoi(argv[4]);
int nBlocks = (blockSize > 0) ? (m * n) / blockSize + ((m * n) % blockSize == 0 ? 0 : 1) : 0;
int reps = atoi(argv[5]);
// optimized = ignore blockSize and nBlocks
int optimized = (argc >= 7) ? atoi(argv[6]):0;
int compare = (argc >= 8) ? atoi(argv[7]):0;
//@@ Initialize the optimized grid and block dimensions here
dim3 dimGrid((q-1)/TILE_WIDTH+1, (m-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
float *first_d, *second_d, *multiply_d;
float *first, *second, *multiply;
int i = 0;
double total_time = 0.0f;
for (i = 0; i < reps; i++) {
double exec_time = ((double) clock()) * -1;
first = create_flat_matrix(m, n, 1);
second = create_flat_matrix(p, q, 2);
multiply = create_flat_matrix(m, q, 0);
cudaMalloc((void **) &first_d, m * n * sizeof(float));
cudaMalloc((void **) &second_d, p * q * sizeof(float));
cudaMalloc((void **) &multiply_d, m * q * sizeof(float));
cudaMemcpy(first_d, first, m * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(second_d, second, p * q * sizeof(float), cudaMemcpyHostToDevice);
if (optimized == 1) {
matrixMultiply<<<dimGrid, dimBlock>>>(first_d, second_d, multiply_d, m, n, p, q, m, q);
} else {
mmul_d_thread <<< nBlocks, blockSize >>> (first_d, m, n, second_d, q, multiply_d);
}
cudaMemcpy(multiply, multiply_d, m * q * sizeof(float), cudaMemcpyDeviceToHost);
if (compare == 1) {
printf("first:\n");
print_flat_matrix(first, m, n);
printf("second:\n");
print_flat_matrix(second, p, q);
printf("multiply:\n");
print_flat_matrix(multiply, m, q);
}
free(multiply); free(second); free(first);
cudaFree(first_d); cudaFree(second_d); cudaFree(multiply_d);
total_time = total_time + ((exec_time + ((double)clock())) / CLOCKS_PER_SEC);
// printf("%d: %.6f\n", i, ((exec_time + ((double)clock())) / CLOCKS_PER_SEC));
}
printf("%d\t%d\t%d\t%d\t%d\t%.6f\n", m, n, q, blockSize, reps, (total_time / reps));
return EXIT_SUCCESS;
}
|
12,243 | // Use Thrust library to sort a vector of elements.
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
#include<thrust/copy.h>
int main() {
long n = pow(10, 7);
srand(time(0));
thrust :: host_vector<long> h(n);
for (int i = 0; i < n; i++) h[i] = rand() % (8 * n);
thrust :: device_vector<long> d = h;
thrust :: sort(d.begin(), d.end());
thrust :: copy(d.begin(), d.end(), h.begin());
for (int i = 0; i < n; i++) {
printf("%ld ", h[i]);
if (!(i % 5)) puts("");
}
puts("");
return 0;
}
|
12,244 | /*
Update 29/08/2020
Generalized image:
g(xT,xR,z) = -1/(2pi c) SS z^2 / rT^1.5 / rR^1.5 u_t(xT',xR',t=(rT+rR)/c) dxT'dxR'
where u_t is d/dt(u), or alternatively filtered by F(w)=jw
Input: u_t
*/
#define pi 3.141592f
// Forward imaging with FMC
__global__ void BP_DAS_FWD_FMC_complex(float2 * pOut, const float2 * pIn, int N, int Nx, int Nz, int Nt, float d, float dx, float dz, float dt, float c, float x0, float z0, float t0, float angFilt)
{
int zn = threadIdx.x + blockDim.x * blockIdx.x;
int xRn = threadIdx.y + blockDim.y * blockIdx.y;
int xTn = threadIdx.z + blockDim.z * blockIdx.z;
float xT = xTn * dx + x0;
float xR = xRn * dx + x0;
float z = zn * dz + z0;
if( xTn<Nx && xRn<Nx && zn<Nz )
{
int idxOut = zn + xRn*Nz + xTn*Nz*Nx;
float Areal = 0;
float Aimag = 0;
for(int tx = 0; tx<N; tx++)
{
float Xtx = (float) tx*d - (N-1)*d/2;
float Rtx = sqrtf( (xT-Xtx)*(xT-Xtx) + z*z );
float invRtx2 = 1/(Rtx*Rtx);
float sqrtRtx = sqrtf(Rtx);
float BeamspreadTx = 1;
if( Rtx>0 ) BeamspreadTx = z * invRtx2 * sqrtRtx ;
if( (z/Rtx > __cosf(angFilt*pi/180)) || angFilt==0 )
for(int rx = 0; rx<N; rx++)
{
float Xrx = (float) rx*d - (N-1)*d/2;
float Rrx = sqrtf( (xR-Xrx)*(xR-Xrx) + z*z );
float ToF = (Rtx + Rrx)/c;
int ToFn = floorf((ToF-t0)/dt);
float ToFi = ToFn*dt + t0;
if( (z/Rrx > __cosf(angFilt*pi/180)) || angFilt==0 )
if(ToFn>0 && ToFn<Nt-1)
{
int idxIn = ToFn + rx*Nt + tx*Nt*N;
int idxIn1 = (ToFn+1) + rx*Nt + tx*Nt*N;
if( idxIn>0 && idxIn<N*N*Nt-1)
{
float invRrx2 = 1/(Rrx*Rrx);
float sqrtRrx = sqrtf(Rrx);
float BeamspreadRx = 1;
if( Rrx>0 ) BeamspreadRx = z * invRrx2 * sqrtRrx ;
float coeff = BeamspreadTx * BeamspreadRx;
Areal += (pIn[idxIn].x + (pIn[idxIn1].x-pIn[idxIn].x)/dt*(ToF - ToFi))*coeff;
Aimag += (pIn[idxIn].y + (pIn[idxIn1].y-pIn[idxIn].y)/dt*(ToF - ToFi))*coeff;
}
}
}
}
pOut[idxOut].x = -Areal*d*d/2/pi/c;
pOut[idxOut].y = -Aimag*d*d/2/pi/c;
//__syncthreads();
}
}
// Forward imaging with HMC
__global__ void BP_DAS_FWD_HMC_complex(float2 * pOut, const float2 * pIn, int N, int Nx, int Nz, int Nt, float d, float dx, float dz, float dt, float c, float x0, float z0, float t0, float angFilt)
{
int zn = threadIdx.x + blockDim.x * blockIdx.x;
int xRn = threadIdx.y + blockDim.y * blockIdx.y;
int xTn = threadIdx.z + blockDim.z * blockIdx.z;
float xT = xTn * dx + x0;
float xR = xRn * dx + x0;
float z = zn * dz + z0;
if( xTn<Nx && xRn<Nx && zn<Nz )
{
int idxOut = zn + xRn*Nz + xTn*Nz*Nx;
int idxOut_sym = zn + xTn*Nz + xRn*Nz*Nx;
float Areal = 0;
float Aimag = 0;
for(int tx = 0; tx<N; tx++)
{
float Xtx = (float) tx*d - (N-1)*d/2;
float Rtx = sqrtf( (xT-Xtx)*(xT-Xtx) + z*z );
float invRtx2 = 1/(Rtx*Rtx);
float sqrtRtx = sqrtf(Rtx);
float BeamspreadTx = 1;
if( Rtx>0 ) BeamspreadTx = z * invRtx2 * sqrtRtx ;
if( (z/Rtx > __cosf(angFilt*pi/180)) || angFilt==0 )
for(int rx = tx; rx<N; rx++)
{
float Xrx = (float) rx*d - (N-1)*d/2;
float Rrx = sqrtf( (xR-Xrx)*(xR-Xrx) + z*z );
float ToF = (Rtx + Rrx)/c;
int ToFn = floorf((ToF-t0)/dt);
float ToFi = ToFn*dt + t0;
float Rtx_sym = sqrt( (xT-Xrx)*(xT-Xrx) + z*z );
float Rrx_sym = sqrt( (xR-Xtx)*(xR-Xtx) + z*z );
float ToF_sym = (Rtx_sym + Rrx_sym)/c;
int ToFn_sym = floorf((ToF_sym-t0)/dt);
float ToFi_sym = ToFn_sym*dt + t0;
if( (z/Rrx > __cosf(angFilt*pi/180)) || angFilt==0 )
if(ToFn>0 && ToFn<Nt-1 && ToFn_sym>0 && ToFn_sym<Nt-1)
{
int idxHMC = tx*N+rx - (tx+1)*tx/2;
int idxIn = ToFn + idxHMC*Nt;
int idxIn1 = (ToFn+1) + idxHMC*Nt;
int idxIn_sym = ToFn_sym + idxHMC*Nt;
int idxIn1_sym = (ToFn_sym+1) + idxHMC*Nt;
if( idxIn>0 && idxIn1<N*(N+1)/2*Nt && idxIn_sym>0 && idxIn1_sym<N*(N+1)/2*Nt)
{
float invRrx2 = 1/(Rrx*Rrx);
float invRtx_sym2 = 1/(Rtx_sym*Rtx_sym);
float invRrx_sym2 = 1/(Rrx_sym*Rrx_sym);
float sqrtRrx = sqrtf(Rrx);
float sqrtRtx_sym = sqrtf(Rtx_sym);
float sqrtRrx_sym = sqrtf(Rrx_sym);
float BeamspreadRx = 1;
if( Rrx>0 ) BeamspreadRx = z * invRrx2 * sqrtRrx ;
float BeamspreadRx_sym = 1;
if( Rrx_sym>0 ) BeamspreadRx_sym = z * invRrx_sym2 * sqrtRrx_sym ;
float BeamspreadTx_sym = 1;
if( Rtx_sym>0 ) BeamspreadTx_sym = z * invRtx_sym2 * sqrtRtx_sym ;
float coeff = BeamspreadTx * BeamspreadRx;
float coeff_sym = BeamspreadTx_sym * BeamspreadRx_sym;
Areal += (pIn[idxIn].x + (pIn[idxIn1].x-pIn[idxIn].x)/dt*(ToF - ToFi))*coeff;
Aimag += (pIn[idxIn].y + (pIn[idxIn1].y-pIn[idxIn].y)/dt*(ToF - ToFi))*coeff;
if(rx!=tx)
{
Areal += (pIn[idxIn_sym].x + (pIn[idxIn1_sym].x-pIn[idxIn_sym].x)/dt*(ToF_sym - ToFi_sym))*coeff_sym;
Aimag += (pIn[idxIn_sym].y + (pIn[idxIn1_sym].y-pIn[idxIn_sym].y)/dt*(ToF_sym - ToFi_sym))*coeff_sym;
}
}
}
}
}
pOut[idxOut].x = -Areal*d*d/2/pi/c;
pOut[idxOut].y = -Aimag*d*d/2/pi/c;
if(xTn!=xRn)
{
pOut[idxOut_sym].x = -Areal*d*d/2/pi/c;
pOut[idxOut_sym].y = -Aimag*d*d/2/pi/c;
}
//__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
Update 12/03/2020
Recovered data:
u(xT,xR,t) = 1/(2pi c) d/dt SS 1 / rT^0.5 / rR^0.5 g(xT',xR',z=zeta) dxT'dxR'
where zeta = sqrt( (ct+(DxT+DxR))*(ct-(DxT+DxR))*(ct+(DxT-DxR))*(ct-(DxT-DxR)) ) / (2ct)
Output: 1/(2pi c) SS 1 / rT^0.5 / rR^0.5 g(xT',xR',z=zeta) dxT'dxR'
Calculate the derivative outside.
*/
// Inverse imaging with FMC
__global__ void BP_DAS_INV_FMC(float * pOut, const float * pIn, int N, int Nx, int Nz, int Nt, float d, float dx, float dz, float dt, float c, float x0, float z0, float t0, float Twidth, float posX, float posZ)
{
int tn = threadIdx.x + blockDim.x * blockIdx.x;
int rx = threadIdx.y + blockDim.y * blockIdx.y;
int tx = threadIdx.z + blockDim.z * blockIdx.z;
float Xtx = (float) tx*d - (N-1)*d/2;
float Xrx = (float) rx*d - (N-1)*d/2;
float t = (float) tn*dt + t0;
float tc = (sqrtf( (Xtx-posX)*(Xtx-posX) + posZ*posZ ) + sqrtf( (Xrx-posX)*(Xrx-posX) + posZ*posZ))/c ;
if( abs( t-tc) < Twidth/2 )
if( tx<N && rx<N && tn<Nt )
{
int idxOut = tn + rx*Nt + tx*Nt*N;
float Amplitude = 0;
for(int xTn = 0; xTn<Nx; xTn++)
{
float xT = xTn * dx + x0;
for(int xRn = 0; xRn<Nx; xRn++)
{
float xR = xRn * dx + x0;
float ch1 = (c*c*t*t + (xR-Xrx)*(xR-Xrx) - (xT-Xtx)*(xT-Xtx))/(2*c*t);
float ch2 = ch1*ch1 - (xR-Xrx)*(xR-Xrx);
if( ch2>=0 )
{
float z = sqrtf( ch2 );
int zn = floorf((z-z0)/dz);
float zi = zn*dz + z0;
if(zn>0 && zn<Nz-1)
{
float Rtx = sqrtf( (xT-Xtx)*(xT-Xtx) + z*z );
float Rrx = sqrtf( (xR-Xrx)*(xR-Xrx) + z*z );
float coeff = 1 / sqrtf(Rtx*Rrx);
int idxIn = zn + xRn*Nz + xTn*Nz*Nx;
int idxIn1 = zn+1 + xRn*Nz + xTn*Nz*Nx;
if( idxIn>0 && idxIn<Nx*Nx*Nz-1)
{
Amplitude += (pIn[idxIn] + (pIn[idxIn1]-pIn[idxIn])/dz*(z - zi)) *coeff;
}
}
}
}
}
pOut[idxOut] = Amplitude*dx*dx/2/pi/c;
//__syncthreads();
}
}
// Inverse imaging with HMC
__global__ void BP_DAS_INV_HMC(float * pOut, const float * pIn, int N, int Nx, int Nz, int Nt, float d, float dx, float dz, float dt, float c, float x0, float z0, float t0, float Twidth, float posX, float posZ)
{
int tn = threadIdx.x + blockDim.x * blockIdx.x;
int rx = threadIdx.y + blockDim.y * blockIdx.y;
int tx = threadIdx.z + blockDim.z * blockIdx.z;
float Xtx = (float) tx*d - (N-1)*d/2;
float Xrx = (float) rx*d - (N-1)*d/2;
float t = (float) tn*dt + t0;
float tc = (sqrtf( (Xtx-posX)*(Xtx-posX) + posZ*posZ ) + sqrtf( (Xrx-posX)*(Xrx-posX) + posZ*posZ))/c ;
if( abs( t-tc) < Twidth/2 )
if( tx<N && tx<=rx && tn<Nt )
{
//int idxOut = tn + rx*Nt + tx*Nt*N;
//int idxOut = tn + ((tx+1)*(tx+2)/2-(tx-rx)-1) * Nt;
int idxOut = tn + (tx*N+rx - (tx+1)*tx/2) * Nt;
// int idxOut = tn + (tx*N+rx) * Nt;
float Amplitude = 0;
for(int xTn = 0; xTn<Nx; xTn++)
{
float xT = xTn * dx + x0;
for(int xRn = xTn; xRn<Nx; xRn++)
{
float xR = xRn * dx + x0;
float ch1 = (c*c*t*t + (xR-Xrx)*(xR-Xrx) - (xT-Xtx)*(xT-Xtx))/(2*c*t);
float ch2 = ch1*ch1 - (xR-Xrx)*(xR-Xrx);
float ch1_sym = (c*c*t*t + (xT-Xrx)*(xT-Xrx) - (xR-Xtx)*(xR-Xtx))/(2*c*t);
float ch2_sym = ch1_sym*ch1_sym - (xT-Xrx)*(xT-Xrx);
if( ch2>=0 && ch2_sym>=0)
{
float z = sqrtf( ch2 );
int zn = floorf((z-z0)/dz);
float z_sym = sqrtf( ch2_sym );
int zn_sym = floorf((z_sym-z0)/dz);
if(zn>0 && zn<Nz-1 && zn_sym>0 && zn_sym<Nz-1)
{
float Rtx = sqrtf( (xT-Xtx)*(xT-Xtx) + z*z );
float Rrx = sqrtf( (xR-Xrx)*(xR-Xrx) + z*z );
float Rtx_sym = sqrtf( (xR-Xtx)*(xR-Xtx) + z_sym*z_sym );
float Rrx_sym = sqrtf( (xT-Xrx)*(xT-Xrx) + z_sym*z_sym );
float coeff = 1 / sqrtf(Rtx*Rrx);
float coeff_sym = 1 / sqrtf(Rtx_sym*Rrx_sym);
int idxIn = zn + xRn*Nz + xTn*Nz*Nx;
int idxIn1 = zn+1 + xRn*Nz + xTn*Nz*Nx;
int idxIn_sym = zn_sym + xRn*Nz + xTn*Nz*Nx;
int idxIn1_sym = zn_sym+1 + xRn*Nz + xTn*Nz*Nx;
if( idxIn>0 && idxIn<Nx*Nx*Nz-2 && idxIn_sym>0 && idxIn_sym<Nx*Nx*Nz-2)
{
float vi = (pIn[idxIn] + (pIn[idxIn1]-pIn[idxIn])/dz*(z - zn*dz))*coeff;
float vi_sym = (pIn[idxIn_sym] + (pIn[idxIn1_sym]-pIn[idxIn_sym])/dz*(z_sym - zn_sym*dz))*coeff_sym;
if(xTn!=xRn) Amplitude += vi + vi_sym;
else Amplitude += vi;
}
}
}
}
}
pOut[idxOut] = Amplitude*dx*dx/2/pi/c;
}
}
|
12,245 | // Tests that "sm_XX" gets correctly converted to "compute_YY" when we invoke
// fatbinary.
//
// REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_20 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM20 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_21 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM21 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_30 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM30 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_32 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM32 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_35 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM35 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_37 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM37 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_50 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM50 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_52 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM52 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_53 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM53 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_60 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM60 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_61 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM61 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_62 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM62 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_70 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=CUDA,SM70 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx600 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX600 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx601 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX601 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx602 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX602 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx700 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX700 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx701 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX701 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx702 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX702 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx703 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX703 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx704 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX704 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx705 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX705 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx801 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX801 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx802 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX802 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx803 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX803 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx805 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX805 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx810 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX810 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx900 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX900 %s
// RUN: %clang -x hip -### -target x86_64-linux-gnu -c --cuda-gpu-arch=gfx902 %s 2>&1 \
// RUN: | FileCheck -check-prefixes=HIP,GFX902 %s
// CUDA: ptxas
// CUDA-SAME: -m64
// CUDA: fatbinary
// HIP: clang-offload-bundler
// SM20:--image=profile=sm_20{{.*}}--image=profile=compute_20
// SM21:--image=profile=sm_21{{.*}}--image=profile=compute_20
// SM30:--image=profile=sm_30{{.*}}--image=profile=compute_30
// SM32:--image=profile=sm_32{{.*}}--image=profile=compute_32
// SM35:--image=profile=sm_35{{.*}}--image=profile=compute_35
// SM37:--image=profile=sm_37{{.*}}--image=profile=compute_37
// SM50:--image=profile=sm_50{{.*}}--image=profile=compute_50
// SM52:--image=profile=sm_52{{.*}}--image=profile=compute_52
// SM53:--image=profile=sm_53{{.*}}--image=profile=compute_53
// SM60:--image=profile=sm_60{{.*}}--image=profile=compute_60
// SM61:--image=profile=sm_61{{.*}}--image=profile=compute_61
// SM62:--image=profile=sm_62{{.*}}--image=profile=compute_62
// SM70:--image=profile=sm_70{{.*}}--image=profile=compute_70
// GFX600:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx600
// GFX601:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx601
// GFX602:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx602
// GFX700:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx700
// GFX701:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx701
// GFX702:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx702
// GFX703:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx703
// GFX704:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx704
// GFX705:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx705
// GFX801:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx801
// GFX802:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx802
// GFX803:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx803
// GFX805:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx805
// GFX810:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx810
// GFX900:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx900
// GFX902:-targets=host-x86_64-unknown-linux,hipv4-amdgcn-amd-amdhsa--gfx902
|
12,246 | #include "includes.h"
__global__ void shared4R1Ws10ops2RG1WG(float *A, float *B, float *C, const int N)
{
__shared__ float Smem[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
Smem[threadIdx.x] = A[i];
__syncthreads();
if (i < N) {
C[i] = A[i] + B[i] - A[i]*A[i] + 3*B[i] - 4*A[i]*B[i] + B[i]*B[i]*7- 8+Smem[(threadIdx.x+1)%512]+Smem[(threadIdx.x+2)%512]+Smem[(threadIdx.x+3)%512]+Smem[(threadIdx.x+4)%512];
}
} |
12,247 | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int *nums, int size){
for (int i = 0; i < size; i++){
nums[i] = 1;
}
}
#define N 512
int main(void){
int *a, *b, *c; //host copies of a, b, c
int *d_a, *d_b, *d_c; //device copies of a, b, c
int size = N * sizeof(int);
//Allocate space for device copies of a, b, c
cudaMalloc((void **) &d_a, size); //take in address of pointer
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
//copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch add() kernel on GPU with N blocks
add<<<N,1>>>(d_a, d_b, d_c);
//copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//print results
for (int i = 0; i < N; i++){
printf("c[%d]=%d\n", i, c[i]);
}
//Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
12,248 | #include "includes.h"
__global__ void add(int *a, int *b, int *c ) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
} |
12,249 | #include "includes.h"
#define max(a, b) a > b ? a : b
#define min(a, b) a < b ? a : b
struct Edge{
long long int x;
};
///*
//*/
__global__ void hook_init(int* parent, Edge* edge_list, int e){
int bid = blockIdx.x;
int id = bid*blockDim.x + threadIdx.x;
long long int x;
int u, v, mx, mn;
if(id < e){
x = edge_list[id].x;
v = (int) x & 0xFFFFFFFF;
u = (int) (x >> 32);
mx = max(u, v);
mn = u + v - mx;
parent[mx] = mn;
}
return;
} |
12,250 | // This is a simple reduction code to run on gpu with the host checker. here the final reduction over the blocks happens on cpu.
// We can iteratively launch the reductions on GPU to handle that scenario. This is not coded here.
// Uses the partition and summarize approach
#include<stdio.h>
#include<stdlib.h>
#define block_size 128
__global__
void gpu_reduceSum(float *d_out, float *d_in, int din_size){
int num_elements = 2*block_size;
__shared__ float shmem[2*block_size];
int start = blockIdx.x*num_elements;
int tx = threadIdx.x;
if (start + tx < din_size)
shmem[tx] = d_in[start + tx];
if (start + tx + block_size < din_size)
shmem[block_size + tx] = d_in[start + block_size + tx];
for(int stride=1; stride < num_elements; stride*=2){
__syncthreads();
if(tx % stride == 0)
shmem[2*tx] += shmem[2*tx + stride];
}
d_out[blockIdx.x] = shmem[0];
}
void init(float *arr, int N, float val){
for (int r=0; r < N; r++){
arr[r] = val;
}
}
void host_reduceSum(float *h_out, float *h_in, int din_size){
for (int i =0; i < din_size; i++){
*h_out += h_in[i];
}
}
int main(){
float *d_in, *d_out;
float *h_in, *h_out; // Only for checking. Not needed for functionality
int din_size = 1024;
int elements = 2*block_size;
int dout_size = (din_size-1)/elements + 1;
//calculate memory
size_t size_in = din_size*sizeof(float);
size_t size_out = dout_size*sizeof(float);
//allocate memory in host
h_in = (float*) malloc (size_in);
h_out = (float*) malloc (1*sizeof(float));
//allocate memory using UVM
cudaMallocManaged(&d_in, size_in);
cudaMallocManaged(&d_out, size_out);
//Initialize device parameters
init(d_in, din_size, 1.0);
init(d_out, dout_size, 0.0);
int num_threads = block_size;
int num_blocks = (din_size-1)/elements + 1;
//gpu sum reduction
//printf("num_blocks: %d, num_threads: %d\n", num_blocks, num_threads);
gpu_reduceSum<<<num_blocks, num_threads>>>(d_out, d_in, din_size);
cudaDeviceSynchronize();
init(h_in, din_size, 1.0);
init(h_out, dout_size, 0.0);
host_reduceSum(h_out, h_in, din_size);
float final_out = 0.0;
for (int i=0; i<num_blocks; i++){
final_out += d_out[i];
}
//printf("h_out: %f\n", *h_out);
if (final_out != *h_out){
printf("Failure!!\n");
return 0;
}
printf("Success!!\n");
}
|
12,251 | #include "includes.h"
__global__ void matrixMult(int* m,int* n, int* p, int size)
{
// matrixMult<<< grid_size,block_size >>>(d_m,d_n,d_p,n);
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
int p_sum;
for(int i=0;i<size;i++){
p_sum += m[row*size+i] * n[col*size+i];
}
p[row*size+col]=p_sum;
} |
12,252 | #ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <stdio.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
cg::sync(cta);
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
cg::sync(cta);
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
cg::sync(cta);
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
if (cta.thread_rank() < 32)
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = tile32.size()/2; offset > 0; offset /= 2)
{
mySum += tile32.shfl_down(mySum, offset);
}
}
// write result for this block to global mem
if (cta.thread_rank() == 0) g_odata[blockIdx.x] = mySum;
}
template <class T>
void
reduce(int size, int threads, int blocks,
T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
if (isPow2(size))
{
switch (threads)
{
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
}
void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
static int init = 0;
static int device;
static cudaDeviceProp prop;
//get device capability, to avoid block/grid size exceed the upper bound
if(init == 0){
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
init = 1;
fprintf(stderr, "getNumBlocksAndThreads\n");
}
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
fprintf( stderr, "n is too large, please choose a smaller number!\n");
exit(1);
}
if (blocks > prop.maxGridSize[0])
{
fprintf(stderr, "Grid size <%d> exceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
blocks = MIN(maxBlocks, blocks);
}
// Instantiate the reduction function for 3 types
template void
reduce<int>(int size,int threads, int blocks,
int *d_idata, int *d_odata);
template void
reduce<float>(int size,int threads, int blocks,
float *d_idata, float *d_odata);
template void
reduce<double>(int size,int threads, int blocks,
double *d_idata, double *d_odata);
template <class T>
void ParallelReduction( int n,
T *d_idata,
T *d_odata,
T* result)
{
//static T gpu_result = 0;
static int maxThreads = 256; // number of threads per block
static int maxBlocks = 64;
//static int count = 0; // debug
static T *tmp;
//cudaDeviceSynchronize();
// sum partial block sums on GPU
int s=n;
while (s > 1)
{
int threads = 0, blocks = 0;
getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads);
reduce<T>(s, threads, blocks, d_idata, d_odata);
//cudaMemcpy(d_idata, d_odata, s*sizeof(T), cudaMemcpyDeviceToDevice);
tmp = d_odata;
d_odata = d_idata;
d_idata = tmp;
s = (s + (threads*2-1)) / (threads*2);
}
//if(needReadBack){
cudaMemcpy( result, tmp, sizeof(T), cudaMemcpyDeviceToHost);
//fprintf(stderr, "memcpy:count %d\n",++count);
//}
return;
}
// Instantiate the reduction function for 3 types
template void
ParallelReduction<int>( int n,
int *d_idata,
int *d_odata,
int *result);
template void
ParallelReduction<float>( int n,
float *d_idata,
float *d_odata,
float *result);
template void
ParallelReduction<double>( int n,
double *d_idata,
double *d_odata,
double*result);
#endif // #ifndef _REDUCE_KERNEL_H_
|
12,253 | /*
* @author Connie Shi
* Lab 3: Write a reduction program in CUDA that finds the maximum of
* an array of M integers.
* Part 3: Write a CUDA version that makes use of shared memory,
* prefetching, coalesing and different granularities.
*
* Should be run on cuda1 machine with 1024 max threads per block.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 1024
#define WARP 32
/* Function Declarations */
void generate_random(int random[], int num_elements);
__global__ void max_in_block(int random[], int num_elements);
__device__ void sequential(int random[], int sdata[], int n_els, int el_per_thread);
__device__ void check_warp(int sdata[], int num_elements);
/* Generate M random numbers from 1 to 100000*/
void generate_random(int random[], int num_elements){
int i;
time_t t;
srand((unsigned)time(&t));
for (i = 0; i < num_elements; i++) {
random[i] = (int)(((double)rand()/(double)RAND_MAX)*100000);
}
}
/* global function called from host and executed on kernel
* Uses a tree-like structure that avoids branch divergence. Each thread
* sequentially searches over its own assigned elements_per_thread
* and puts the max of the subset in array stored in shared memory.
*/
__global__
void max_in_block(int random[], int num_elements, int elements_per_thread) {
// Find max of subset and store in shared memory
__shared__ int sdata[THREADS_PER_BLOCK];
sequential(random, sdata, num_elements, elements_per_thread);
__syncthreads();
unsigned int tid = threadIdx.x;
unsigned int stride;
// Tree reduction on array in shared memory for max
for (stride = blockDim.x/2; stride >= WARP; stride >>= 1) {
if (tid < stride && tid + stride < blockDim.x) {
int current = sdata[tid + stride];
if (sdata[tid] < current) {
sdata[tid] = current;
}
}
__syncthreads();
}
__syncthreads();
// Check warp size for max and put in blockIdx.x
if (tid == 0) {
check_warp(sdata, num_elements);
random[blockIdx.x] = sdata[0];
}
}
/* Sequential searches through elements_per_thread for each thread
* A subset of the data is assigned to each thread to check sequentially
* Stores the result in shared memory array in threadIdx.x position
*/
__device__
void sequential(int random[], int sdata[], int num_elements, int elements_per_thread) {
int i;
int max = 0;
unsigned int index = (threadIdx.x + blockIdx.x * blockDim.x) * elements_per_thread;
for (i = index; i < index + elements_per_thread && i < num_elements; i++) {
if (max < random[i]) {
max = random[i];
}
}
sdata[threadIdx.x] = max;
}
/* Check warp size for the max thread and put in sdata[0] position
*/
__device__
void check_warp(int sdata[], int num_elements) {
int i;
int max = 0;
int tid = threadIdx.x;
for (i = tid; i < tid + WARP && i < num_elements; i++) {
if (max < sdata[i]) {
max = sdata[i];
}
}
sdata[0] = max;
}
/**************************************************************/
int main(int argc, char*argv[]) {
int* h_random;
int* d_random;
int i;
int largest = 0;
clock_t start, end;
if (argc != 2) {
printf("Invalid number of commands: usage ./cudashared M\n");
exit(1);
}
// Generate array of random elements
int num_elements = atoi(argv[1]);
h_random = (int*)malloc(sizeof(int)*num_elements);
generate_random(h_random, num_elements);
start = clock();
// Calculate grid dimensions
int leftover = num_elements % WARP;
int d_elements = num_elements - leftover;
int elements_per_thread = THREADS_PER_BLOCK;
int n_threads = (int)ceil((double)d_elements/elements_per_thread);
int n_blocks = (int)ceil(((double)n_threads/THREADS_PER_BLOCK));
// Allocate space on device and copy over
cudaError_t err = cudaMalloc((void**)&d_random, sizeof(int) * d_elements);
if (err != cudaSuccess) {
printf("cudaMalloc failure\n");
}
err = cudaMemcpy(d_random, h_random, sizeof(int) * d_elements, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("cudaMemcpyfailure\n");
}
// Execute kernel
max_in_block<<<n_blocks, THREADS_PER_BLOCK>>>(d_random, d_elements, elements_per_thread);
// While kernel is executing, find the max in leftover elements
for (i = d_elements; i < num_elements; i++) {
if (largest < h_random[i]) {
largest = h_random[i];
}
}
// Retrieve reduction results, only the first n_blocks element
cudaMemcpy(h_random, d_random, sizeof(int) * n_blocks, cudaMemcpyDeviceToHost);
// Check through n_blocks elements for the max
for (i = 0; i < n_blocks; i++) {
if (h_random[i] > largest) {
largest = h_random[i];
}
}
end = clock();
printf("Time to find max %f\n", (double)(end-start)/CLOCKS_PER_SEC);
printf("Largest: %d\n", largest);
// Clean up resources
cudaFree(d_random);
free(h_random);
}
|
12,254 | #include "includes.h"
#define NX 100 // No. of cells in x direction
#define NY 100 // No. of cells in y direction
#define NZ 100 // No. of cells in z direction
#define N (NX*NY*NZ) // N = total number of cells in domain
#define L 100 // L = length of domain (m)
#define H 100 // H = Height of domain (m)
#define W 100 // W = Width of domain (m)
#define DX (L/NX) // DX, DY, DZ = grid spacing in x,y,z.
#define DY (H/NY)
#define DZ (W/NZ)
#define DT 0.001 // Time step (seconds)
#define R (1.0) // Dimensionless specific gas constant
#define GAMA (7.0/5.0) // Ratio of specific heats
#define CV (R/(GAMA-1.0)) // Cv
#define CP (CV + R) // Cp
//#define DEBUG_VALUE
float *dens; //density
float *temperature; //temperature
float *xv; //velocity in x
float *yv; //velocity in y
float *zv; //velocity in z
float *press; //pressure
float *d_dens; //density
float *d_temperature; //temperature
float *d_xv; //velocity in x
float *d_yv; //velocity in y
float *d_zv; //velocity in z
float *d_press; //pressure
float *U;
float *U_new;
float *E;
float *F;
float *G;
float *FF;
float *FB;
float *FR;
float *FL;
float *FU;
float *FD;
float *h_body;
float *d_body;
int total_cells = 0; // A counter for computed cells
__global__ void GPUTimeStepFunction(float *a, float *b, int *body){
} |
12,255 | #include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include <stdlib.h>
#include <fstream>
#include <vector>
#include <sstream>
#include <iomanip>
#include <algorithm>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define PI 3.14159265358979323846
//this function initialises the kernel which constructs the mesh according to the independent path construction.
void mesh_generation(int b, int num_assets, double m, double X0[], double sigma[], double delta[], double asset_amount[], double* X, double strike, double r, double delta_t, curandState_t* States, curandState_t* statesi, int threads);
//this function initialises all the mesh weights kernels
void meshweights(double* W, double m, int b, double sigma[], double delta[], double r, double delta_t, double* X, int num_assets, double* weight_denominator);
//this function converts one dim vectors to one dim arrays
void one_dim_array(std::vector< double > &vals, double array[], int N);
//this function provides an indexing interface for 3-d matrices stored in 1 dim arrays
double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets);
//this function provides an indexing interface for 2-d matrices stored in 1 dim arrays
double* two_dim_index(double* vector, int i, int j, double m, int b);
//this function initialises the mesh estimator kernel
double MeshEstimator(double strike, double r, double delta_t, int b, double m, double* X, double* W, double* V, double asset_amount[], int num_assets);
//this function initialises the pathestimator kernel.
double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations, int iterator, int Final_iteration, curandState_t* States, curandState_t* states, int threads);
//this function prints results for 1-dim options to 'OneDimHighPayoff.txt'. These results can be plotted using the program 'OneDimPayoff.py'
void print_high_payoff(int b, double m, double* X, double* V, double asset_amount[], double* W );
//this function prints results for 1-dim options to 'stocksimulation.txt'. These results can be plotted using the program 'simulation.py'
void SimulationPaths(int b, double m, double* X, double* V, double asset_amount[], double* W, double x );
//this kernel initialises the random seeds on each thread
__global__ void initialise(unsigned int seed, curandState_t* states) {
int idx=blockDim.x*blockIdx.x + threadIdx.x;
// we have to initialize the state
curand_init(seed, idx, 0, &states[idx]);
}
int main(){
srand((unsigned)time(NULL));
//begin timer
clock_t begin=clock();
//read in parameters from setting.txt
std::ifstream setting( "settings.txt" );
std::string line;
std::vector<std::string> settings;
int linenumber=0;
while(std::getline( setting, line))
{
if(linenumber%2==1)
settings.push_back(line);
linenumber++;
}
setting.close();
int integer;
std::vector < double > X0V;
std::vector < double > deltaV;
std::vector <double> sigmaV;
std::vector <double> asset_amountV;
std::istringstream ss(settings[0]);
std::string token;
while(std::getline(ss, token, ','))
{
X0V.push_back(atof(token.c_str()));
}
double T = atof(settings[1].c_str());
double m = atof(settings[2].c_str());
double delta_t=T/m;
double v_0, V_0, vtotal_sum=0, Vtotal_sum=0;
double r= atof(settings[3].c_str());
std::istringstream ss2(settings[4]);
while(std::getline(ss2, token, ','))
{
deltaV.push_back(atof(token.c_str()));
}
std::istringstream ss3(settings[5]);
while(std::getline(ss3, token, ','))
{
sigmaV.push_back(atof(token.c_str()));
}
int Path_estimator_iterations=atof(settings[6].c_str());
double strike=atof(settings[7].c_str());
int b=atoi(settings[8].c_str());
int N=atoi(settings[9].c_str());
double quantile=atof(settings[10].c_str());
int num_assets=atof(settings[11].c_str());
std::istringstream ss4(settings[12]);
while(std::getline(ss4, token, ','))
{
asset_amountV.push_back(atof(token.c_str()));
}
if(X0V.size() != num_assets || sigmaV.size() != num_assets || deltaV.size() !=num_assets || asset_amountV.size() !=num_assets){
std::cout<<"Either the starting price, volatility, number of assets or dividend yield was not specified for all assets"<<std::endl;
exit (EXIT_FAILURE);
}
std::cout<<"The parameters of this simulation are:"<<std::endl;
//Print these values to screen
for(integer=0; integer<X0V.size(); integer++){
std::cout<<"Starting Price of Asset "<<integer<<" = "<<X0V[integer]<<std::endl;
}
std::cout<<"Time to Expiry="<<T<<"\n"<<"Number of Time Steps="<<m<<"\n"<<"Interest Rate="<<r<<std::endl;
for(integer=0; integer<sigmaV.size(); integer++){
std::cout<<"Volatility of Asset "<<integer<<" = "<<sigmaV[integer]<<std::endl;
}
for(integer=0; integer<deltaV.size(); integer++){
std::cout<<"Dividend Yield of Asset "<<integer<<" ="<<deltaV[integer]<<std::endl;
}
std::cout<<"Number of Iterations Over the Path Estimator="<<Path_estimator_iterations<<"\n"<<"Strike Price="<<strike<<"\n"<<"Nodes per Time Step="<<b<<"\n"<<"Number Mesh Replications="<<N<<"\n"<<"Number of Assets="<<num_assets<<std::endl;
for(integer=0; integer<asset_amountV.size(); integer++){
std::cout<<"Amount of Asset "<<integer<<" = "<<asset_amountV[integer]<<std::endl;
}
// CONVERT TO ARRAYS
double X0 [num_assets];
double delta [num_assets];
double sigma [num_assets];
double asset_amount [num_assets];
one_dim_array(X0V, X0, num_assets);
one_dim_array(deltaV, delta, num_assets);
one_dim_array(sigmaV, sigma, num_assets);
one_dim_array(asset_amountV, asset_amount, num_assets);
//V values from each iteration over meshes
std::vector< double > Vvector;
//v values from each iteration over meshes
std::vector< double > vvector;
//asset vector
std::vector< double > assets;
//1 d vector in Weightsgen for-loop
std:: vector<double> dim1temp;
std::vector<double> sortvector;
int m_int= (int)m;
//mesh matrix
double* X;
int X_dim = (m_int) * b * (num_assets);
X= new double[X_dim];
//weight matrix
double* W;
int W_dim = (m_int) * b * b;
W= new double[W_dim];
//high values matrix
double* V;
int V_dim = (m_int) * b;
V = new double[V_dim];
//weight denominator values
double* weight_denominator;
int denom_dim = (m_int-1) * b;
weight_denominator =new double[denom_dim];
//convert to log prices
for(int init=0; init<num_assets; init++){
X0[init]=log(X0[init]);
}
int threads=Path_estimator_iterations;
if(b>Path_estimator_iterations){
threads=b;
}
curandState_t* States;
States= new curandState_t[threads];
//for-loop over different meshes
for(int iterator=0; iterator<N; iterator++){
curandState_t* states;
cudaMalloc((void**) &states, threads * sizeof(curandState_t));
if(iterator==0){
dim3 gridDim((int)ceil(threads/512.0));
dim3 blockDim(512.0);
initialise<<<gridDim, blockDim>>>(time(0), states);
cudaDeviceSynchronize();
cudaMemcpy(States, states, sizeof(curandState_t)*threads, cudaMemcpyDeviceToHost);
}
else{cudaMemcpy(states, States, threads*sizeof(curandState_t), cudaMemcpyHostToDevice);}
mesh_generation(b, num_assets, m, X0, sigma, delta, asset_amount, X, strike, r, delta_t, States, states, threads);
meshweights(W, m, b, sigma, delta, r, delta_t, X, num_assets, weight_denominator);
double check=0;
//check all the weights from X0 are 1
for(int e=0; e<b; e++){
if(*three_dim_index(W, 0, e, 0, m, b, b)!=1){
std::cout<<"there is an error with the weights. check that W[0][k][0]'s =1"<<std::endl;
}
}
//check that the weights going into a node sum to 1
for(int q=1; q<m; q++){
for(int a=0; a<b; a++){
check=0;
for(int E=0; E<b; E++){
check+=*three_dim_index(W, (q), a, E, m, b, num_assets);
}
}
}
V_0=MeshEstimator(strike, r, delta_t, b, m, X, W, V, asset_amount, num_assets);
Vvector.push_back(V_0);//vector containing high bias option prices
Vtotal_sum+=V_0;
std::cout<<"High Bias price (V_0) for mesh iteration "<<iterator<<" is "<<V_0<<std::endl;
v_0=PathEstimator(strike, r, delta_t, b, m, sigma, delta, X0, X, weight_denominator, V, asset_amount, num_assets, Path_estimator_iterations, iterator, N, States, states, threads);
cudaDeviceReset();
vvector.push_back(v_0);
vtotal_sum+=v_0;
std::cout<<"Low Bias price (v_0) for mesh iteration "<<iterator<<" is "<<v_0<<std::endl;
}//this is the end of the loop over the whole process.
if(num_assets==1){
print_high_payoff(b, m, X, V, asset_amount,W);
SimulationPaths(b, m, X, V, asset_amount, W, X0[0] );
}
//Calculate V(N) and v(N)
V_0=(1/double(N))*Vtotal_sum;
v_0=(1/double(N))*vtotal_sum;
//calculate errors
double std_div_V=0, std_div_v=0, squaresumV=0, squaresumv=0, Verror=0, verror=0;
for(int h=0; h<N; h++){
squaresumV+=(Vvector[h]-V_0)*(Vvector[h]-V_0);
squaresumv+=(vvector[h]-v_0)*(vvector[h]-v_0);
}
std_div_V=sqrt((1/double(N))*squaresumV); //standard deviation of V
std_div_v=sqrt((1/double(N))*squaresumv); //standard deviation of v
double standardErrorV=std_div_V*(1/sqrt(double(N)));
double standardErrorv=std_div_v*(1/sqrt(double(N)));
Verror=quantile*standardErrorV;
verror=quantile*standardErrorv;
std::cout<<"V(N)_0="<<V_0<<std::endl;
std::cout<<"v(N)_0="<<v_0<<std::endl;
double pointEst=(V_0+v_0)/2;
double EstimatedError=((Verror+V_0)-(v_0-verror))/(2*pointEst);
clock_t end =clock();
double elapsedtime=double(end-begin) / CLOCKS_PER_SEC;
std::ofstream outFile("results.txt", std::ios_base::app | std::ios_base::out);
outFile << N <<"\t"<< b <<"\t"<< Path_estimator_iterations<<"\t"<<exp(X0[0])<<"\t" << v_0 <<"\t"<< standardErrorv <<"\t"<< V_0 <<"\t"<< standardErrorV <<"\t"<< v_0-verror<<"\t"<<Verror+V_0 <<"\t"<<pointEst<<"\t"<<EstimatedError<<"\t" <<elapsedtime<< std::endl;
outFile.close();
std::ofstream LoutFile("latexresults.txt", std::ios_base::app | std::ios_base::out);
LoutFile <<std::fixed<<std::setprecision(3) << N <<"&"<< b <<"&"<< Path_estimator_iterations<<"&"<<exp(X0[0])<<"&" << v_0 <<"&"<< standardErrorv <<"&"<< V_0 <<"&"<< standardErrorV <<"&"<< v_0-verror<<"&"<<Verror+V_0 <<"&"<<pointEst<<"&"<<EstimatedError<<"&" <<elapsedtime<< std::endl;
outFile.close();
delete[] X;
delete[] W;
delete[] V;
delete[] weight_denominator;
delete[] States;
return 0;
}
|
12,256 | __global__ void gSum_vec(float* v1, float* v2, int N){
int i=threadIdx.x+blockIdx.x*blockDim.x;
v1[i]+=v2[i]+1.0;
}
void Sum_vec(float* v1, float* v2, float *w, int N){
float *u1,*u2;
cudaMalloc((void **) &u1, N*sizeof(float));
cudaMalloc((void **) &u2, N*sizeof(float));
cudaMemcpy(u1, v1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(u2, v2, N*sizeof(float), cudaMemcpyHostToDevice);
gSum_vec<<<dim3(N/512+((N%512)?1:0)),dim3(512)>>>(u1,u2,N);
cudaDeviceSynchronize();
cudaMemcpy(w, u1, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(u1);
cudaFree(u2);
}
|
12,257 | // C++ Libraries.
#include <iostream>
// CUDA libraries.
#include <cuda.h>
#include <cuda_runtime.h>
#include "cuComplex.h"
// Define max number of concurrent threads
#define MAX_BLOCKSIZE 512
////////////////////////////////////////////////////////////////////////////////
/// 1. Strided Offset N Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search 'offset' number of elements from
* the previous thread's search (strided offset).
* @param dev_Array Array to be searched.
* @param uniqueValue Unique value to be searched for.
* @param offset Number of elements each thread will search, and the separation between each thread's starting index.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Strided_Offset_N_Search(int *dev_Array, int uniqueValue, int offset, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
for (int N = 0; N < offset; N++){
// Calculate actual array index.
actualIndex = tid * offset + N;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Strided_Offset_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param offset Number of elements each thread will search, and the separation between each thread's starting index.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Strided_Offset_N_Search(int *dev_Array, int uniqueValue, int offset, int arraySize){
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / offset + 1;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Strided_Offset_N_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, offset, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << offset << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 2. Coalesced N Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion, followed by searching adjacent to the other threads again but offset by the total
* number of threads.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param numOfThreads Total number of threads searching the array.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int numOfThreads, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
for (int N = 0; N < numToCheck; N++){
// Calculate actual array index.
actualIndex = numOfThreads * N + tid;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / numToCheck + 1;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Coalesced_N_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, numToCheck, numOfThreads, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << numToCheck << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 3. Unrolled Coalesced N Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion, followed by searching adjacent to the other threads again but offset by the total
* number of threads. All for loops are unrolled with #pragma.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param numOfThreads Total number of threads searching the array.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Unrolled_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int numOfThreads, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
#pragma unroll
for (int N = 0; N < numToCheck; N++){
// Calculate actual array index.
actualIndex = numOfThreads * N + tid;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Unrolled_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / numToCheck + 1;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Unrolled_Coalesced_N_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, numToCheck, numOfThreads, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << numToCheck << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
//////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 4. Full Coalesced Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Full_Coalesced_Search(int *dev_Array, int uniqueValue, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Retrieve current value from global memory to be checked.
int currentValue = dev_Array[tid];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = tid;
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Full_Coalesced_Search(int *dev_Array, int uniqueValue, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Full_Coalesced_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << "1" << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
int main(){
// Define unique value to search for.
const int uniqueValue = 5;
// Define random index the unique value will be for constructing the searchable array.
const int randomIndex = 68;
// Define the size of our array.
const int arraySize = 500000;
// Initialize test array that we will search.
int testArray[arraySize];
// Set array to all zeros.
for (int i = 0; i < arraySize; i++){
testArray[i] = 0;
}
// Set random index to value to search for.
testArray[randomIndex] = uniqueValue;
// CUDA ALLOCATIONS //
// Initialize device pointers.
int *d_testArray, d_foundIndex;
// Allocate memory for local variables on the GPU device.
cudaMalloc((void**)&d_testArray, arraySize * sizeof(int));
cudaMalloc((void**)&d_foundIndex, sizeof(int));
// Transfer test array from local host memory to device.
cudaMemcpy(d_testArray, testArray, arraySize * sizeof(int), cudaMemcpyHostToDevice);
// Find unique values //
int foundIndex = -1;
//////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. Each thread searches through N adjacent elements where each thread begins its search N elements
// from the previous thread's starting position. If a thread successfully locates the unique value, it
// write the index of the element to memory.
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple offset sizes.
std::cout << "-- Strided Offset N Search --" << std::endl;
int offset = 1;
for (int offset = 1; offset < 65; offset+=1) {
foundIndex = Strided_Offset_N_Search(d_testArray, uniqueValue, offset, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. Each thread searches through N elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions. From there, the threads search the
// value which is the total number of threads offset from the current position, so that all threads are
// still making coalesced memory calls.
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Coalesced N Search --" << std::endl;
int numToCheck = 1;
for (int numToCheck = 1; numToCheck < 65; numToCheck+=1) {
foundIndex = Coalesced_N_Search(d_testArray, uniqueValue, numToCheck, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. Each thread searches through N elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions. From there, the threads search the
// value which is the total number of threads offset from the current position, so that all threads are
// still making coalesced memory calls. For loop is unroll with #pragma.
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Unrolled Coalesced N Search --" << std::endl;
for (int numToCheck = 1; numToCheck < 65; numToCheck+=1) {
foundIndex = Unrolled_Coalesced_N_Search(d_testArray, uniqueValue, 12, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
///////////////////////////////////////////////////////////////////////////////////////////////////
// 4. Each thread searches a single elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions.
///////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Full Coalesced Search --" << std::endl;
for (int offset = 1; offset < 65; offset+=1) {
foundIndex = Full_Coalesced_Search(d_testArray, uniqueValue, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
}
|
12,258 | #include "includes.h"
__global__ void ColumnReduceSimpleKernel(const float* in,float* out, int num_planes, int num_rows, int num_cols) {
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
const int elems_per_plane = num_rows * num_cols;
const int plane = gid / num_cols;
const int col = gid % num_cols;
if (plane >= num_planes)
return;
float sum = in[plane * elems_per_plane + col]+in[plane * elems_per_plane + num_cols + col];
for (int row = 2; row < num_rows; ++row) {
sum = sum+in[plane * elems_per_plane + row * num_cols + col];
}
out[plane * num_cols + col] = sum;
} |
12,259 | #include "includes.h"
__device__ void get_vertex_row_group(int *row_group, bool *dl_matrix, const int vertex_num, const int total_dl_matrix_row_num, const int total_dl_matrix_col_num) {
// printf("%d %d\n", vertex_num, total_dl_matrix_row_num);
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
for (int j = 0, cur_index = i * total_dl_matrix_col_num; j < vertex_num;
j++, cur_index++) {
row_group[i] += (int)(dl_matrix[cur_index]) * (j + 1);
}
}
}
__global__ void get_vertex_row_group(int *row_group, int *dl_matrix, const int vertex_num, const int total_dl_matrix_row_num, const int total_dl_matrix_col_num) {
// printf("%d %d\n", vertex_num, total_dl_matrix_row_num);
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
for (int j = 0; j < vertex_num; j++) {
row_group[i] += dl_matrix[i * total_dl_matrix_col_num + j] * (j + 1);
}
}
} |
12,260 | #include <iostream>
#include <vector>
#include <string>
#include <fstream>
using namespace std;
#define N 1024
#define BLOCK_SIZE 16
__global__ void arrMin(int *min, int *A, int *size)
{
__shared__ int sharedMin;
int tid = threadIdx.x;
if (tid == 0)
{
sharedMin = 1000;
}
__syncthreads();
int localMin = 1000;
for (int i = blockIdx.x * blockDim.x + tid; i < *size; i += blockDim.x)
{
int val = A[i];
if (localMin > val)
{
localMin = val;
}
}
atomicMin(&sharedMin, localMin);
__syncthreads();
if (tid == 0)
{
min[blockIdx.x] = sharedMin;
}
}
__global__ void makeB(int *A, int *B, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
B[i] = A[i] % 10;
}
}
int main()
{
vector<int> data;
int *A, *B, *min;
int *d_min, *d_A, *d_B, *d_size;
int size;
ifstream infile;
infile.open("inp.txt");
// Read file input and push to vector
if (infile.is_open())
{
while (infile.good())
{
char cNum[10];
infile.getline(cNum, 256, ',');
int num = atoi(cNum);
data.push_back(num);
}
size = data.size() * sizeof(int);
infile.close();
}
else
{
cout << "Error opening file";
}
// Alloc space for host copies
min = (int *)malloc(size);
A = (int *)malloc(size);
B = (int *)malloc(size);
// Alloc space for device copies
cudaMalloc((void **)&d_min, sizeof(int));
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_size, sizeof(int));
// Copy inputs to device
cudaMemcpy(d_A, data.data(), size, cudaMemcpyHostToDevice);
int temp = data.size();
cudaMemcpy(d_size, &temp, sizeof(int), cudaMemcpyHostToDevice);
arrMin<<<data.size() / BLOCK_SIZE + 1, BLOCK_SIZE>>>(d_min, d_A, d_size);
makeB<<<data.size() / BLOCK_SIZE + 1, BLOCK_SIZE>>>(d_A, d_B, temp);
cudaDeviceSynchronize();
// Copy result back to host
cudaMemcpy(min, d_min, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(B, d_B, size, cudaMemcpyDeviceToHost);
ofstream outfile;
outfile.open("q1a.txt");
if (outfile.is_open())
{
outfile << "The min is " << min[0] << '\n';
outfile.close();
}
else
{
cout << "Error opening file";
}
outfile.open("q1b.txt");
if (outfile.is_open())
{
for (int i = 0; i < data.size(); ++i)
{
outfile << B[i] << ' ';
}
outfile.close();
}
else
{
cout << "Error opening file";
}
cout << '\n';
// Cleanup
free(min);
free(A);
cudaFree(d_min);
cudaFree(d_A);
return 0;
}
|
12,261 | /* Derived from MLIFE exercise */
/* To build: nvcc -o main main.cu */
/* To run with a grid of 64x128: ./main 64 128 */
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define BORN 1
#define DIES 0
#define id(r,c) ((r)*Ncolumns+(c))
/* build board */
void init(int Nrows, int Ncolumns, int **board, int **newboard, int **c_board, int **c_newboard){
int r,c,n;
*board = (int*) calloc(Nrows*Ncolumns, sizeof(int));
*newboard = (int*) calloc(Nrows*Ncolumns, sizeof(int));
/* death at the border */
for(r=0;r<Nrows;++r){
(*board)[id(r,0)] = DIES;
(*board)[id(r,Ncolumns-1)] = DIES;
(*newboard)[id(r,0)] = DIES;
(*newboard)[id(r,Ncolumns-1)] = DIES;
}
for(c=0;c<Ncolumns;++c){
(*board)[id(0,c)] = DIES;
(*board)[id(Nrows-1,c)] = DIES;
(*newboard)[id(0,c)] = DIES;
(*newboard)[id(Nrows-1,c)] = DIES;
}
/* random life */
srand48(12345);
for(r=1;r<Nrows-1;++r){
for(c=1;c<Ncolumns-1;++c){
double rn = drand48();
(*board)[id(r,c)] = BORN*(rn<0.5) + DIES*(rn>=0.5);
}
}
/* EX01: allocate 1D DEVICE arrays with Nrows*Ncolumns ints for c_board and c_newboard here using cudaMalloc */
/* EX02a: copy board state from HOST board to DEVICE c_board using cudaMemcpy */
/* EX02b: copy newboard state from HOST newboard to DEVICE c_newboard using cudaMemcpy */
}
void destroy(int *board, int *newboard){
free(board);
free(newboard);
}
/* EX03: convert this to a CUDA kernel */
/* EX03a: annotate to indicate a kernel */
void update(int Nrows, int Ncolumns, int *board, int *newboard){
/* EX03b: replace double loop with 2D thread array */
for(int r=1;r<Nrows-1;++r)
for(int c=1;c<Ncolumns-1;++c){
/* EX03c: convert thread indices and block indices into r,c */
/* EX03d: need to make sure indices r,c are in range 1<=r<Nrows-1, 1<=c<Ncolumns-1 */
int s =
board[id(r-1,c-1)]+board[id(r-1,c-0)]+board[id(r-1,c+1)]+
board[id(r+0,c-1)]+ board[id(r+0,c+1)]+
board[id(r+1,c-1)]+board[id(r+1,c-0)]+board[id(r+1,c+1)];
newboard[id(r,c)]
= (s<2)*DIES + (s==2)*board[id(r,c)] + (s==3)*BORN + (s>3)*DIES;
}
}
/* EX04: add a copy from DEVICE to HOST using cudaMemcpy */
void print(int Nrows, int Ncolumns, int *board, int *c_board){
/* EX04: put cudaMemcpy here to copy from DEVICE c_board to HOST board*/
/* No need tochange this bit */
system("clear");
for(int r=0;r<Nrows;++r){
for(int c=0;c<Ncolumns;++c){
if(board[id(r,c)]==BORN) printf("*");
else printf(" ");
}
printf("\n");
}
}
int main(int argc, char **argv){
if(argc<3){
printf("usage: main [Nrows] [Ncolumns]\n");
exit(1);
}
/* initialize board */
int Nrows = atoi(argv[1]);
int Ncolumns = atoi(argv[2]);
int *board, *newboard;
int *c_board, *c_newboard;
init(Nrows, Ncolumns, &board, &newboard, &c_board, &c_newboard);
/* run some iterations */
int Nit = 100;
for(int it=0;it<Nit;++it){
/* EX05a: define thread-block size and grid size here using 16x16 thread-blocks*/
int T = 16;
dim3 bDim;
dim3 gDim;
/* EX05b: add kernel launch syntax here */
update(Nrows, Ncolumns, c_board, c_newboard);
/* EX05c: add kernel launch syntax here */
update(Nrows, Ncolumns, c_newboard, c_board);
print(Nrows, Ncolumns, board, c_board);
}
destroy(board, newboard);
exit(0);
return 0;
}
|
12,262 | #include "includes.h"
__global__ void awkward_Content_getitem_next_missing_jagged_getmaskstartstop_kernel( int64_t* prefixed_index, int64_t* index_in, int64_t* offsets_in, int64_t* mask_out, int64_t* starts_out, int64_t* stops_out, int64_t length) {
int64_t block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int64_t thread_id = block_id * blockDim.x + threadIdx.x;
if(thread_id < length) {
int64_t pre_in = prefixed_index[thread_id] - 1;
starts_out[thread_id] = offsets_in[pre_in];
if (index_in[thread_id] < 0) {
mask_out[thread_id] = -1;
stops_out[thread_id] = offsets_in[pre_in];
} else {
mask_out[thread_id] = thread_id;
stops_out[thread_id] = offsets_in[pre_in + 1];
}
}
} |
12,263 | #include "dense.cuh"
#define BLOCK_SIZE 1024
#define DIVIDE(A,B) ((A+B-1)/B)
#define BLOCKS(N) DIVIDE(N,BLOCK_SIZE)
/* PARALLEL FUNCTION:
* Objective: Applies the activation function to a set of neurons
* Runtime: O(n) parallel
* Arguments:
* matrix: the neurons the activation function to apply to
* size: the number of neurons in matrix
* activation_func: the activation function
* Requirements: activation_func MUST be a __device__ function
* Exceptions: None. The case where activation_func is not a __device__ function is UNHANDLED.
*/
__global__ void activate(float* matrix, unsigned int size, float(*activation_func)(float))
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
matrix[i] = activation_func(matrix[i]);
}
/* PARALLEL FUNCTION
* Objective: Apply the derivative of the activation function using chain rule to the gradient of the layer
* Runtime: O(n) parallel
*
*/
__global__ void apply_activation_deriv(float* grads, float* outputs, unsigned int size, float(*activation_deriv)(float))
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
grads[i] *= activation_deriv(outputs[i]);
}
Dense::Dense(int input_shape, int output_shape, Activation_Function activation_func) : Layer(input_shape, output_shape)
{
this->activation_func = activation_func;
bias = Matrix(output_shape, 0.0f);
bias_delta = Matrix(output_shape, 0.0f);
weight = Matrix(output_shape, input_shape);
weight_delta = Matrix(output_shape, input_shape, 0.0f);
p_neurons = Matrix(input_shape, 0.0f);
neurons = Matrix(output_shape, 0.0f);
stored_grad = Matrix(input_shape, 0.0f);
}
/* Objective: Update the neurons of this layer by feeding forward the neurons of the previous layer
* Runtime: O(n^1.5) average
* Arguments:
* prev_neurons (Matrix const reference), the matrix of the neuron of the previous layer
* Requirements: prev_neurons MUST be GPU enabled matrix
* The dimensions MUST match
* Exceptions:
* GPU is not enabled for previous neurons
* The dimensions of the matrices does not matched
*/
void Dense::feedforward_update(const Matrix& prev_neurons)
{
if (!prev_neurons.is_gpu())
yeet "GPU is not enabled for previous neurons";
if (prev_neurons.get_dim1() != input_shape)
yeet "The dimensions does not match";
p_neurons = prev_neurons;
neurons = weight * prev_neurons + bias;
if (activation_func.func) // if the activation function exists, apply the activation function.
activate<<<BLOCKS(output_shape), BLOCK_SIZE>>>(neurons.get_matrix(), output_shape, activation_func.func);
}
/* Objective: Feedforward the neurons of the previous layer using the weights and biases of this layer and returning the output
* Runtime: O(n^1.5) average
* Arguments:
* prev_neurons (Matrix const reference), the matrix of the neuron of the previous layer
* Requirements: prev_neurons MUST be GPU enabled matrix
* The dimensions MUST match
* Exceptions:
* GPU is not enabled for previous neurons
* The dimensions of the matrices does not matched
*/
Matrix Dense::feedforward(const Matrix &prev_neurons) const
{
if (!prev_neurons.is_gpu())
yeet "GPU is not enabled for previous neurons";
if (prev_neurons.get_dim1() != input_shape)
yeet "The dimensions does not match";
Matrix ans (output_shape, 0.0f);
ans = weight * prev_neurons + bias;
if (activation_func.func) // if the activation function exists, apply activation function
activate<<<BLOCKS(output_shape), BLOCK_SIZE>>>(neurons.get_matrix(), output_shape, activation_func.func);
return ans;
}
/* Objective: Using the previous gradient (from the subsequent layer), calculate the gradient of this layer and store it in stored_grad.
* Update the values of the weight and bias delta based on this gradient.
* Arguments:
* old_grad (Matrix const reference): the gradient of the subsequent layer
* Requirements:
* old_grad MUST be GPU enabled matrix
* The dimensions of the matrices must match old_grad should be (output_shape x 1)
*/
Matrix Dense::backpropogate(const Matrix &old_grad)
{
if (!old_grad.is_gpu())
yeet "GPU is not enabled for previous neurons";
if (old_grad.get_dim1() != output_shape)
yeet "The dimensions does not match";
// first use the derivative of the activation function
Matrix old_grad_cpy = old_grad;
apply_activation_deriv<<<BLOCKS(output_shape), BLOCK_SIZE>>>(old_grad_cpy.get_matrix(), neurons.get_matrix(), output_shape, activation_func.deriv);
// update the weight and bias deltas
bias_delta += old_grad_cpy;
weight_delta += old_grad_cpy * p_neurons.T();
return weight.T() * old_grad_cpy;
}
void Dense::backpropogate_update(const Matrix &old_grad)
{
stored_grad = backpropogate(old_grad);
}
/* Objective: Update the weights and biases with the calculated weight and bias deltas and the given learning rate.
* Arguments:
* learning rate: the learning rate of the neural network
* momentum: the batch momentum after gradient descent. defaults to 0.0 meaning the deltas are rezeroed. Momentum must be between 0 and 1.
* Runtime: O(n) parallel
* Exceptions: None
*/
void Dense::gradient_update(float learning_rate, float momentum)
{
if (momentum > 1.0f)
yeet "Momentum cannot be greater than one";
if (momentum < 0.0f)
yeet "Momentum cannot be negative";
weight += weight_delta * learning_rate;
bias += bias_delta * learning_rate;
if (momentum == 0.0f)
{
weight *= momentum;
bias *= momentum;
}
else
{
weight.rezero();
bias.rezero();
}
}
|
12,264 | #define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure
#define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)]
#define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_mat_dvz_dz(z,x) d_mat_dvz_dz[(x)*(nz)+(z)]
#define d_mat_dvx_dx(z,x) d_mat_dvx_dx[(x)*(nz)+(z)]
__global__ void ac_pressure(float *d_vz, float *d_vx, float *d_szz, \
float *d_mem_dvz_dz, float *d_mem_dvx_dx, float *d_Lambda, \
float *d_Den, float *d_K_z_half, float *d_a_z_half, float *d_b_z_half, \
float *d_K_x, float *d_a_x, float *d_b_x, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, bool isFor, \
float *d_mat_dvz_dz, float *d_mat_dvx_dx){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
float dvz_dz = 0.0;
float dvx_dx = 0.0;
float c1 = 9.0/8.0;
float c2 = 1.0/24.0;
if (isFor) {
if (gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) {
dvz_dz = (c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx)))/dz;
dvx_dx = (c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) - c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2)))/dx;
if(gidz<=nPml || (gidz>=nz-nPml-nPad-1)){
d_mem_dvz_dz(gidz,gidx) = d_b_z_half[gidz]*d_mem_dvz_dz(gidz,gidx) + d_a_z_half[gidz]*dvz_dz;
dvz_dz = dvz_dz / d_K_z_half[gidz] + d_mem_dvz_dz(gidz,gidx);
}
if(gidx<=nPml || gidx>=nx-nPml-1){
d_mem_dvx_dx(gidz,gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz,gidx) + d_a_x[gidx]*dvx_dx;
dvx_dx = dvx_dx / d_K_x[gidx] + d_mem_dvx_dx(gidz,gidx);
}
d_szz(gidz,gidx) += d_Lambda(gidz,gidx) * (dvz_dz + dvx_dx) * dt;
}
else {
return;
}
}
else {
// extension for derivative at the boundaries
if (gidz>=nPml+2 && gidz<=nz-nPad-3-nPml && gidx>=nPml+2 && gidx<=nx-3-nPml) {
// if (gidz>=nPml-2 && gidz<=nz-nPad+1-nPml && gidx>=nPml-2 && gidx<=nx+1-nPml) {
dvz_dz = (c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx)))/dz;
dvx_dx = (c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) - c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2)))/dx;
d_mat_dvz_dz(gidz, gidx) = dvz_dz;
d_mat_dvx_dx(gidz, gidx) = dvx_dx;
d_szz(gidz,gidx) -= d_Lambda(gidz,gidx) * (dvz_dz + dvx_dx) * dt;
// // compute the derivative at the boundaries
// if (gidz == nPml+2) {
// d_mat_dvz_dz(gidz-1, gidx) = 0.5 * (d_vz(gidz,gidx)-d_vz(gidz-1,gidx))/dz;
// d_mat_dvz_dz(gidz-2, gidx) = 0.5 * (d_vz(gidz-1,gidx)-d_vz(gidz-2,gidx))/dz;
// }
// if (gidz == nz-nPad-3-nPml) {
// d_mat_dvz_dz(gidz+1, gidx) = 0.5 * (d_vz(gidz+2,gidx)-d_vz(gidz+1,gidx))/dz;
// d_mat_dvz_dz(gidz+2, gidx) = 0.5 * (d_vz(gidz+2,gidx)-d_vz(gidz+1,gidx))/dz;
// }
// if (gidx == nPml+2) {
// d_mat_dvx_dx(gidz, gidx-1) = 0.5 * (d_vx(gidz,gidx-1)-d_vx(gidz,gidx-2))/dx;
// d_mat_dvx_dx(gidz, gidx-2) = 0.5 * (d_vx(gidz,gidx-1)-d_vx(gidz,gidx-2))/dx;
// }
// if (gidx == nx-3-nPml) {
// d_mat_dvx_dx(gidz, gidx+1) = 0.5 * (d_vx(gidz,gidx+1)-d_vx(gidz,gidx))/dx;
// d_mat_dvx_dx(gidz, gidx+2) = 0.5 * (d_vx(gidz,gidx+2)-d_vx(gidz,gidx+1))/dx;
// }
}
else {
return;
}
}
}
|
12,265 | #include<stdio.h>
#include<cuda.h>
#include<cuda_runtime_api.h>
int main()
{
printf("hello");
return 0;
}
|
12,266 | #include <iostream>
#include <stdio.h>
#include <iomanip>
#include <cuda_runtime.h>
using namespace std;
void MatrixRandBin(float *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
if((float)rand()/RAND_MAX>0.5){
mat[i*cols+j]=1.0f;
}else{
mat[i*cols+j]=-1.0f;
}
}
}
}
void MatrixPrint(float *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
cout<<setw(2)<<mat[i*cols+j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
void MatrixPrintD(int *mat,int rows,int cols){
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
cout<<setw(2)<<mat[i*cols+j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
float MatrixCompare(float *a,float *b,int rows,int cols){
float err=0;
for (int i=0;i<rows;i++){
for (int j=0;j<cols;j++){
err+=abs(a[i*cols+j]-b[i*cols+j]);
}
}
return err;
}
void MatrixMul_host(float *a,int a_rows,int a_cols,float *b,int b_rows,int b_cols,float *c)
{
for(int i = 0; i < a_rows; i++) {
for(int j = 0; j < b_cols; j++) {
float t = 0;
for(int k = 0; k < b_rows; k++) {
t += a[i * a_cols + k] * b[k * b_cols + j];
}
c[i * b_cols + j] = t;
}
}
}
//horizontal
__global__ void AMatrix2Bin(float *a,int *a_bin,int a_rows,int pitch_a,int pitch_a_bin,int MaxBS,int BINSIZE){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int maxThreads=MaxBS*a_rows;
for(int id = bix*bdx+tix; id < maxThreads; id+=gdx*bdx) {
int rid=id/MaxBS;
int cid=id%MaxBS;
int Integer=0;
int base=1;
for (int i=0;i<BINSIZE;i++){
if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i]==1.f){
Integer+=base;
}
base=base<<1;
}
a_bin[rid*pitch_a_bin+cid]=Integer;
}
}
//vetical
__global__ void BMatrix2Bin(float *b,int *b_bin,int b_cols,int pitch_b,int pitch_b_bin,int MaxBS,int BINSIZE){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int maxThreads=MaxBS*b_cols;
for(int id = bix*bdx+tix; id < maxThreads; id+=gdx*bdx) {
int cid=id/MaxBS;
int rid=id%MaxBS;
int Integer=0;
int base=1;
for (int i=0;i<BINSIZE;i++){
if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid]==1.f){
Integer+=base;
}
base=base<<1;
}
b_bin[rid*pitch_b_bin+cid]=Integer;
}
}
__device__ unsigned char __popcount_tab_device[256];//__constant__ is slower than __device__
__device__ int popcount (int x) {
return __popcount_tab_device[(x >> 0) & 0xff]
+ __popcount_tab_device[(x >> 8) & 0xff]
+ __popcount_tab_device[(x >> 16) & 0xff]
+ __popcount_tab_device[(x >> 24) & 0xff];
}
__global__ void MatrixMulXnor(int *a,int *b,int a_rows,int a_cols,
int b_cols,float *result,int pitch_a,int pitch_b,
int pitch_result,int BINSIZE,int RealMidSize){
int tix=threadIdx.x;
// int tiy=threadIdx.y;
int bix=blockIdx.x;
// int biy=blockIdx.y;
int bdx=blockDim.x;
// int bdy=blockDim.y;
int gdx=gridDim.x;
// int gdy=gridDim.y;
int rest=(BINSIZE*a_cols-RealMidSize);
for(int j=tix;j<b_cols;j+=bdx){
// printf("i=%d ; j=%d\n",i,j);
int sum=0;
for(int k=0;k<a_cols;k++){
int bin=(a[bix*pitch_a+k]^b[k*pitch_b+j]);
int negnum=popcount(bin);
int posnum=BINSIZE-negnum;
//calculate ignores the rest of BINSIZE if the Matsize cant devided by BINSIZE ,it can cause err
//(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it can cause a_rows*b_cols times.
sum+=(posnum-negnum);
}
result[bix*pitch_result+j]=sum-rest;
}
}
void MatrixMul_device(float *a,float *b,int a_rows,int a_cols,int b_cols,float *result){
int BINSIZE=30;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000
int MaxBS=(a_cols-1)/BINSIZE+1;
int a_cols_Copysize=MaxBS*BINSIZE;
dim3 BS_BIN(512,1,1);
dim3 GS_BIN(6,1,1);
float *a_device;//a_rows * a_cols_Copysize
float *b_device;//a_cols_Copysize * b_cols
size_t pitch_a_device, pitch_b_device;
cudaMallocPitch((void**)&a_device , &pitch_a_device , sizeof(float) *a_cols_Copysize , a_rows);
cudaMallocPitch((void**)&b_device , &pitch_b_device , sizeof(float) *b_cols , a_cols_Copysize);
cudaMemset(a_device, 0, pitch_a_device * a_rows);
cudaMemset(b_device, 0, pitch_b_device * a_cols_Copysize);
cudaMemcpy2D(a_device,pitch_a_device,a,sizeof(float) *a_cols ,sizeof(float) *a_cols, a_rows,cudaMemcpyDeviceToDevice);
cudaMemcpy2D(b_device,pitch_b_device,b,sizeof(float) *b_cols ,sizeof(float) *b_cols, a_cols,cudaMemcpyDeviceToDevice);
//check oringin
// float *a_host;
// float *b_host;
// a_host = (float*) malloc(sizeof(float) * a_cols_Copysize * a_rows);
// b_host = (float*) malloc(sizeof(float) * b_cols * a_cols_Copysize);
// cudaMemcpy2D(a_host,sizeof(float) *a_cols_Copysize, a_device,pitch_a_device,sizeof(float) *a_cols_Copysize , a_rows,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host,sizeof(float) *b_cols, b_device,pitch_b_device,sizeof(float) *b_cols , a_cols_Copysize,cudaMemcpyDeviceToHost);
// MatrixPrint(a_host,a_rows,a_cols_Copysize);
// MatrixPrint(b_host,a_cols_Copysize,b_cols);
int *a_device_bin;
int *b_device_bin;
size_t pitch_a_device_bin, pitch_b_device_bin;
cudaMallocPitch((void**)&a_device_bin , &pitch_a_device_bin , sizeof(int) *MaxBS , a_rows);
cudaMallocPitch((void**)&b_device_bin , &pitch_b_device_bin , sizeof(int) *b_cols , MaxBS);
AMatrix2Bin<<<GS_BIN,BS_BIN>>>(a_device , a_device_bin , a_rows ,
pitch_a_device/sizeof(float) , pitch_a_device_bin/sizeof(int) , MaxBS , BINSIZE);
BMatrix2Bin<<<GS_BIN,BS_BIN>>>(b_device , b_device_bin , b_cols ,
pitch_b_device/sizeof(float) , pitch_b_device_bin/sizeof(int) , MaxBS , BINSIZE);
//check bin
// int *a_host_bin;
// int *b_host_bin;
// a_host_bin = (int*) malloc(sizeof(int) *MaxBS * a_rows);
// b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBS);
// cudaMemcpy2D(a_host_bin,sizeof(int) *MaxBS, a_device_bin,pitch_a_device_bin,sizeof(int) *MaxBS , a_rows ,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_device_bin,pitch_b_device_bin,sizeof(int) *b_cols , MaxBS ,cudaMemcpyDeviceToHost);
// MatrixPrintD(a_host_bin,a_rows,MaxBS);
// MatrixPrintD(b_host_bin,MaxBS,b_cols);
float *result_device;//a_rows * b_cols
size_t pitch_result_device;
cudaMallocPitch((void**)&result_device , &pitch_result_device , sizeof(float) *b_cols , a_rows);
const unsigned char __popcount_tab[] = {
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8,
};
cudaMemcpyToSymbol(__popcount_tab_device, __popcount_tab, sizeof(__popcount_tab));
cudaEvent_t start_device, stop_device;
float time_device;
cudaEventCreate(&start_device);
cudaEventCreate(&stop_device);
cudaEventRecord( start_device, 0 );
dim3 BS_MM(32,1,1);
dim3 GS_MM(1000,1,1);
MatrixMulXnor<<<GS_MM,BS_MM>>>(a_device_bin , b_device_bin , a_rows , MaxBS , b_cols ,
result_device , pitch_a_device_bin/sizeof(int) , pitch_b_device_bin/sizeof(int) ,
pitch_result_device/sizeof(float) , BINSIZE , a_cols);
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaEventElapsedTime( &time_device, start_device, stop_device );
cudaEventDestroy( start_device );
cudaEventDestroy( stop_device );
cout<<"gputime="<<time_device<<"ms"<<endl;
cudaMemcpy2D(result,sizeof(float) *b_cols, result_device,pitch_result_device,sizeof(float) *b_cols , a_rows ,cudaMemcpyDeviceToDevice);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(a_device_bin);
cudaFree(b_device_bin);
cudaFree(result_device);
}
int main(){
//simulate pytorch param
int Matrixsize=1000;
float *a_host;
float *b_host;
float *result_host;
a_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
b_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
result_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
srand(0);
MatrixRandBin(a_host,Matrixsize,Matrixsize);
MatrixRandBin(b_host,Matrixsize,Matrixsize);
// cout<<MatrixCopysize<<endl;
float *a_device;
float *b_device;
float *result_device;
cudaMalloc((void**)&a_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMalloc((void**)&b_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMalloc((void**)&result_device,sizeof(float) *Matrixsize * Matrixsize);
cudaMemcpy(a_device,a_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice);
cudaMemcpy(b_device,b_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice);
// MatrixPrint(a_host,Matrixsize,Matrixsize);
// MatrixPrint(b_host,Matrixsize,Matrixsize);
//run in gpu warp in C code
MatrixMul_device(a_device,b_device,Matrixsize,Matrixsize,Matrixsize,result_device);
cudaMemcpy(result_host, result_device,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyDeviceToHost);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(result_device);
// MatrixPrint(result_host,Matrixsize,Matrixsize);
//run in cpu
float *result_cpu;
result_cpu = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize);
clock_t start_host = clock();
MatrixMul_host(a_host,Matrixsize,Matrixsize,b_host,Matrixsize,Matrixsize,result_cpu);
cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl;
// MatrixPrint(result_cpu,Matrixsize,Matrixsize);
//compare value of gpu and cpu
float err=MatrixCompare(result_cpu,result_host,Matrixsize,Matrixsize);
cout<<"err in gpu and cpu = "<<err<<endl;
return 0;
} |
12,267 | #include "includes.h"
__global__ void absolute_kernel( float4 * __restrict output, const float4 * __restrict input, int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = fabsf(val.x);
val.y = fabsf(val.y);
val.z = fabsf(val.z);
val.w = fabsf(val.w);
output[elem_id] = val;
}
} |
12,268 | /* Enunciado:
* Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width);
const int TILE_WIDTH = 4;//Se ha establecido un tamao de tesela de 4 hilos
__global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width)
{
int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = 0;
if ((id_fil < Width) && (id_col < Width)) {//Si el hilo est fuera de los valores, no debe actuar
for (int i = 0; i < Width; i++) {
n = n + (a[id_fil*Width + i] * b[i*Width + id_col]);
}
c[id_fil*Width + id_col] = n;
}
}
//Hace uso de ceil para obtener el tamao del Grid de Bloques
int grid_calc(int Width, int Tile_Width) {
double x = Width;
double y = Tile_Width;
return (int)(ceil(x / y));//redondea hacia arriba la divisin
}
void imprimeMatriz(int *v, int m, int n) {//( m * n )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dgitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dgitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
v[i*n + j] = (rand() % (max - min)) + min;
}
}
}
void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
v[i*m + j] = (rand() % (max - min)) + min;
}
}
}
int main()
{
/*const int Width = 6;//Pruebas
int a[6 * 6] = { 8, 7, 3, -4, -2, -3,
8, 9, -6, 3, -1, -4,
-6, -1, -10, -7, 8, 6,
4, -6, -6, -3, 8, 7,
-1, -1, -7, -8, -1, 9,
7, 8, 3, 7, 2, 3 };
int b[6 * 6] = { 0, 9, 9, 5, -10, 6,
-2, 3, 8, 4, 0, -9,
7, 1, -8, -9, -10, -9,
-3, -5, 7, -2, 6, 4,
7, 9, -3, -9, -9, 6,
6, 6, 4, -8, 8, -5 };
//Resultado de a * b =
// -13 80 70 91 -140 -55
// -100 45 200 165 -25 47
// 45 76 -31 -50 94 53
// 77 141 19 -72 -14 133
// 24 66 22 7 113 -17
// 16 91 158 -16 -52 -32*/
srand((unsigned int)time(0));
const int max = 10;
const int min = -10;
const int Width = 16;
int a[Width * Width] = { 0 };
generaMatriz(a, Width, max, min);
int b[Width * Width] = { 0 };
generaMatriz(b, Width, max, min);
int c[Width * Width] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = prodMatricesCuda(c, a, b, Width);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("\n\tMatriz A\n");
imprimeMatriz(a, Width);
printf("\n\tMatriz B\n");
imprimeMatriz(b, Width);
printf("\n\tResultado del producto:\n");
imprimeMatriz(c, Width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH));
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, Width * Width * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
12,269 | #include<iostream>
#include<cuda_runtime.h>
#include<cuda.h>
#include<algorithm>
#include"sha1.cuh"
#include<cmath>
#include<ctime>
/*
* sha1.c
*
* Copyright (C) 1998, 2009
* Paul E. Jones <paulej@packetizer.com>
* All Rights Reserved
*
*****************************************************************************
* $Id: sha1.c 12 2009-06-22 19:34:25Z paulej $
*****************************************************************************
*
* Description:
* This file implements the Secure Hashing Standard as defined
* in FIPS PUB 180-1 published April 17, 1995.
*
* The Secure Hashing Standard, which uses the Secure Hashing
* Algorithm (SHA), produces a 160-bit message digest for a
* given data stream. In theory, it is highly improbable that
* two messages will produce the same message digest. Therefore,
* this algorithm can serve as a means of providing a "fingerprint"
* for a message.
*
* Portability Issues:
* SHA-1 is defined in terms of 32-bit "words". This code was
* written with the expectation that the processor has at least
* a 32-bit machine word size. If the machine word size is larger,
* the code should still function properly. One caveat to that
* is that the input functions taking characters and character
* arrays assume that only 8 bits of information are stored in each
* character.
*
* Caveats:
* SHA-1 is designed to work with messages less than 2^64 bits
* long. Although SHA-1 allows a message digest to be generated for
* messages of any number of bits less than 2^64, this
* implementation only works with messages with a length that is a
* multiple of the size of an 8-bit character.
*
*/
#include "sha1.cuh"
/*
* Define the circular shift macro
*/
#define SHA1CircularShift(bits,word) \
((((word) << (bits)) & 0xFFFFFFFF) | \
((word) >> (32-(bits))))
/* Function prototypes */
inline void SHA1ProcessMessageBlock(SHA1Context *);
inline void SHA1PadMessage(SHA1Context *);
/*
* SHA1Reset
*
* Description:
* This function will initialize the SHA1Context in preparation
* for computing a new message digest.
*
* Parameters:
* context: [in/out]
* The context to reset.
*
* Returns:
* Nothing.
*
* Comments:
*
*/
__device__
inline void SHA1Reset(SHA1Context *context)
{
context->Length_Low = 0;
context->Length_High = 0;
context->Message_Block_Index = 0;
context->Message_Digest[0] = 0x67452301;
context->Message_Digest[1] = 0xEFCDAB89;
context->Message_Digest[2] = 0x98BADCFE;
context->Message_Digest[3] = 0x10325476;
context->Message_Digest[4] = 0xC3D2E1F0;
context->Computed = 0;
context->Corrupted = 0;
}
/*
* SHA1Result
*
* Description:
* This function will return the 160-bit message digest into the
* Message_Digest array within the SHA1Context provided
*
* Parameters:
* context: [in/out]
* The context to use to calculate the SHA-1 hash.
*
* Returns:
* 1 if successful, 0 if it failed.
*
* Comments:
*
*/
inline
void SHA1Result(SHA1Context *context)
{
if (context->Corrupted)
{
return;
}
if (!context->Computed)
{
SHA1PadMessage(context);
context->Computed = 1;
}
}
/*
* SHA1Input
*
* Description:
* This function accepts an array of octets as the next portion of
* the message.
*
* Parameters:
* context: [in/out]
* The SHA-1 context to update
* message_array: [in]
* An array of characters representing the next portion of the
* message.
* length: [in]
* The length of the message in message_array
*
* Returns:
* Nothing.
*
* Comments:
*
*/
inline
void SHA1Input( SHA1Context *context,
const unsigned char *message_array,
unsigned length)
{
if (!length)
{
return;
}
if (context->Computed || context->Corrupted)
{
context->Corrupted = 1;
return;
}
while(length-- && !context->Corrupted)
{
context->Message_Block[context->Message_Block_Index++] =
(*message_array & 0xFF);
context->Length_Low += 8;
/* Force it to 32 bits */
context->Length_Low &= 0xFFFFFFFF;
if (context->Length_Low == 0)
{
context->Length_High++;
/* Force it to 32 bits */
context->Length_High &= 0xFFFFFFFF;
if (context->Length_High == 0)
{
/* Message is too long */
context->Corrupted = 1;
}
}
if (context->Message_Block_Index == 64)
{
SHA1ProcessMessageBlock(context);
}
message_array++;
}
}
/*
* SHA1ProcessMessageBlock
*
* Description:
* This function will process the next 512 bits of the message
* stored in the Message_Block array.
*
* Parameters:
* None.
*
* Returns:
* Nothing.
*
* Comments:
* Many of the variable names in the SHAContext, especially the
* single character names, were used because those were the names
* used in the publication.
*
*
*/
inline __device__
void SHA1ProcessMessageBlock(SHA1Context *context)
{
const unsigned K[] = /* Constants defined in SHA-1 */
{
0x5A827999,
0x6ED9EBA1,
0x8F1BBCDC,
0xCA62C1D6
};
int t; /* Loop counter */
unsigned temp; /* Temporary word value */
unsigned W[80]; /* Word sequence */
unsigned A, B, C, D, E; /* Word buffers */
/*
* Initialize the first 16 words in the array W
*/
for(t = 0; t < 16; t++)
{
W[t] = ((unsigned) context->Message_Block[t * 4]) << 24;
W[t] |= ((unsigned) context->Message_Block[t * 4 + 1]) << 16;
W[t] |= ((unsigned) context->Message_Block[t * 4 + 2]) << 8;
W[t] |= ((unsigned) context->Message_Block[t * 4 + 3]);
}
for(t = 16; t < 80; t++)
{
W[t] = SHA1CircularShift(1,W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]);
}
A = context->Message_Digest[0];
B = context->Message_Digest[1];
C = context->Message_Digest[2];
D = context->Message_Digest[3];
E = context->Message_Digest[4];
for(t = 0; t < 20; t++)
{
temp = SHA1CircularShift(5,A) +
((B & C) | ((~B) & D)) + E + W[t] + K[0];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
for(t = 20; t < 40; t++)
{
temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[1];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
for(t = 40; t < 60; t++)
{
temp = SHA1CircularShift(5,A) +
((B & C) | (B & D) | (C & D)) + E + W[t] + K[2];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
for(t = 60; t < 80; t++)
{
temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[3];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
context->Message_Digest[0] =
(context->Message_Digest[0] + A) & 0xFFFFFFFF;
context->Message_Digest[1] =
(context->Message_Digest[1] + B) & 0xFFFFFFFF;
context->Message_Digest[2] =
(context->Message_Digest[2] + C) & 0xFFFFFFFF;
context->Message_Digest[3] =
(context->Message_Digest[3] + D) & 0xFFFFFFFF;
context->Message_Digest[4] =
(context->Message_Digest[4] + E) & 0xFFFFFFFF;
context->Message_Block_Index = 0;
}
/*
* SHA1PadMessage
*
* Description:
* According to the standard, the message must be padded to an even
* 512 bits. The first padding bit must be a '1'. The last 64
* bits represent the length of the original message. All bits in
* between should be 0. This function will pad the message
* according to those rules by filling the Message_Block array
* accordingly. It will also call SHA1ProcessMessageBlock()
* appropriately. When it returns, it can be assumed that the
* message digest has been computed.
*
* Parameters:
* context: [in/out]
* The context to pad
*
* Returns:
* Nothing.
*
* Comments:
*
*/
inline __device__
void SHA1PadMessage(SHA1Context *context)
{
/*
* Check to see if the current message block is too small to hold
* the initial padding bits and length. If so, we will pad the
* block, process it, and then continue padding into a second
* block.
*/
if (context->Message_Block_Index > 55)
{
context->Message_Block[context->Message_Block_Index++] = 0x80;
while(context->Message_Block_Index < 64)
{
context->Message_Block[context->Message_Block_Index++] = 0;
}
SHA1ProcessMessageBlock(context);
while(context->Message_Block_Index < 56)
{
context->Message_Block[context->Message_Block_Index++] = 0;
}
}
else
{
context->Message_Block[context->Message_Block_Index++] = 0x80;
while(context->Message_Block_Index < 56)
{
context->Message_Block[context->Message_Block_Index++] = 0;
}
}
/*
* Store the message length as the last 8 octets
*/
context->Message_Block[56] = (context->Length_High >> 24) & 0xFF;
context->Message_Block[57] = (context->Length_High >> 16) & 0xFF;
context->Message_Block[58] = (context->Length_High >> 8) & 0xFF;
context->Message_Block[59] = (context->Length_High) & 0xFF;
context->Message_Block[60] = (context->Length_Low >> 24) & 0xFF;
context->Message_Block[61] = (context->Length_Low >> 16) & 0xFF;
context->Message_Block[62] = (context->Length_Low >> 8) & 0xFF;
context->Message_Block[63] = (context->Length_Low) & 0xFF;
SHA1ProcessMessageBlock(context);
}
// globals
const int NUM_HASHES = 20;
const int STRING_LEN = 26;
const int HASH_LEN = 5;
const int NUM_BLOCKS = 100;
__global__ void cudasha(const unsigned char*, unsigned int*);
// Host function
int
main(int argc, char** argv)
{
unsigned char strings[NUM_HASHES*STRING_LEN];
srand (time(0));
for(int i = 0; i < NUM_HASHES; i++)
{
for(int j = 0; j < STRING_LEN; j++)
{
*(strings + (i * STRING_LEN) + j) = rand()%26+65; // assign them to various random characters
}
}
unsigned hashes[HASH_LEN*NUM_HASHES];
//////////////////////////////////////////////
printf("Now let's try it on the GPU:\n");
unsigned char* cudaString;
unsigned int* results;
size_t sizestr = sizeof(strings);
size_t sizeres = sizeof(hashes);
cudaMalloc((void**)&cudaString, sizestr);
cudaMalloc((void**)&results, sizeres);
cudaMemcpy(cudaString, strings, sizestr, cudaMemcpyHostToDevice);
dim3 dimGrid(NUM_BLOCKS);
dim3 dimBlock(NUM_HASHES/NUM_BLOCKS);
// invoke the kernel
cudasha<<< dimGrid, dimBlock >>>(cudaString, results);
cudaMemcpy(hashes, results, sizeres, cudaMemcpyHostToDevice); // get the results back
cudaFree(cudaString);
cudaFree(results);
return 0;
}
// Device kernel
__global__ void
cudasha(const unsigned char* str, unsigned int* results)
{
// determine where in the thread grid we are
int idx = blockIdx.x * blockDim.x + threadIdx.x;
SHA1Context sha;
SHA1Reset(&sha);
SHA1Input(&sha, str + (idx * STRING_LEN), STRING_LEN);
SHA1Result(&sha);
results[idx*HASH_LEN] = sha.Message_Digest[0];
results[idx*HASH_LEN+1] = sha.Message_Digest[1];
results[idx*HASH_LEN+2] = sha.Message_Digest[2];
results[idx*HASH_LEN+3] = sha.Message_Digest[3];
results[idx*HASH_LEN+4] = sha.Message_Digest[4];
} |
12,270 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
__global__
void matrixSum(float * A, float * B, float * C, int n){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(col < n && row < n){
C[row * n + col] = A[row * n + col] + B[row * n + col];
}
}
void hostMatrixSum(float * A, float * B, float * C, int n){
int size = sizeof(float) * n * n;
float * d_A;
float * d_B;
float * d_C;
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(n/16.0),ceil(n/16.0),1);
dim3 dimBlock(16,16,1);
matrixSum<<<dimGrid,dimBlock>>>(d_A,d_B,d_C,n);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
}
int main(int argv, char ** argc){
if(argv != 2){
printf("Faltan Argumentos <tamMatrix>\n");
return 0;
}
int n = atoi(argc[1]);
float * A = (float *) malloc(sizeof(float) * n * n);
for(int i = 0; i < n * n; i++) A[i] = 1;
float * B = (float *) malloc(sizeof(float) * n * n);
for(int i = 0; i < n * n; i++) B[i] = 1;
float * C = (float *) malloc(sizeof(float) * n * n);
hostMatrixSum(A,B,C,n);
//for(int i = 0; i < n * n; i++) printf("%f ", C[i]);
}
|
12,271 | #include "includes.h"
__global__ void kernel_mult_vector_by_number(double *vec, double alpha, int numElements)
{
int iam = threadIdx.x;
int bid = blockIdx.x;
int threads_in_block = blockDim.x;
int gid = bid*threads_in_block + iam;
if (gid < numElements){
vec[gid] *= alpha;
}
} |
12,272 | #include<stdio.h>
#include<iostream>
#include<malloc.h>
#include<ctime>
#include<cuda_runtime.h>
#include<assert.h>
using namespace std;
//__constant__ int* device_array;
__global__ void test_clock(int &delay, int &add){
int threadID = (blockIdx.x * blockDim.x) + threadIdx.x;
clock_t start=0;
if(threadID == 0) start = clock();
for(int k=0;k<100;k++){
for(int j =0;j<10;j++){
for(int i=0;i<100;i++){
if(threadID==0){add+=i;}
//add+=j;
//if(threadID<11){add+=k;}
add+=k;
}
}
}
if(threadID==0){
clock_t end = clock();
delay = (int)(end - start);
}
}
__global__ void sequence_read(long long int &latency, int* device_array, int n, int access_number){
extern __shared__ int shared_array[];
for(int i=0;i<n;i++){shared_array[i]=device_array[i];}
int* j = &shared_array[0];
//for(int i=0;i<access_number;i++){j=*(int **)j;}
//j = &shared_array[0];
long long int temp = clock64();
for(int i=0;i<access_number;i++){j=*(int **)j;}
latency = clock64() - temp;
}
__global__ void static_sequence_read(int &latency, long long unsigned* device_array, int access_number, long long unsigned &last_access_value, int array_size){
int threadID = (blockIdx.x * blockDim.x) + threadIdx.x;
long long unsigned *j;
if(threadID == 0){
for(int i=0;i<array_size;i++){
last_access_value = device_array[i];
}
}
j = &device_array[0];
__syncthreads();//finish intializing the array
clock_t temp=0;
if(threadID == 0){temp = clock();}//start clocking
for(int i=0;i<access_number;i++){if(threadID == 0) j=*(long long unsigned **)j;}//access the data array
if(threadID == 0){
latency = (int)(clock() - temp);
last_access_value = j[0];
}
}
__global__ void static_sequence_read_multism(int* latency, long long unsigned* device_array, int access_number, long long unsigned* last_access_value, int array_size){
//int threadID = (blockIdx.x * blockDim.x) + threadIdx.x;
int threadx =threadIdx.x;
int smid = blockIdx.x;
clock_t start, end;
long long unsigned temp_value=0;
long long unsigned *j;
if(threadx == 0){
for(int i=0;i<array_size;i++){
temp_value += device_array[i+array_size*smid];
}
}
last_access_value[smid]=temp_value;
j = &(device_array[array_size*smid]);
__syncthreads();//finish intializing the array
//start=0;
if(threadx == 0){start = clock();}//start clocking
for(int i=0;i<access_number;i++){if(threadx == 0) j=*(long long unsigned **)j;}//access the data array
if(threadx == 0){
end = clock();
latency[smid] = (int)(end - start);
last_access_value[smid] += j[0];
}
}
/*
__global__ void static_sequence_read_noinitialize(int* latency, long long unsigned* device_array, int access_number, long long unsigned* last_access_value, int array_size){
int threadID = (blockIdx.x * blockDim.x) + threadIdx.x;
long long unsigned *j;
if(threadID == 0){
for(int i=0;i<array_size;i++){
last_access_value = device_array[i];
}
}
j = &device_array[0];
__syncthreads();//finish intializing the array
clock_t temp=0;
if(threadID == 0){temp = clock();}//start clocking
for(int i=0;i<access_number;i++){if(threadID == 0) j=*(long long unsigned **)j;}//access the data array
if(threadID == 0){
latency = (int)(clock() - temp);
last_access_value = j[0];
}
}
*/
int main(void){
FILE* fp = fopen("./programout.txt","w");
assert(fp!=NULL);
/*
for(int array_size = 64; array_size<2048;array_size+=8){
int device_size = sizeof(int)*array_size;
int* device_array;
int* host_array = (int*)malloc(array_size*sizeof(int*));
cudaMalloc((void**)&device_array,device_size);
int stride = 4;
for(int i = 0; i < array_size; i++){
int t = i + stride;
if(t >= array_size) t %= stride;
host_array[i] = *((int*)(&device_array)) + 4*t;//converse the device from int* to int; 4 is the byte size of an int type
}
long long int* timing = (long long int*)malloc(sizeof(long long int));
long long int* timing_d;
cudaMalloc((void**)&timing_d, sizeof(long long int));
printf("start computing!\n");
cudaMemcpy(device_array,host_array,device_size,cudaMemcpyHostToDevice);
sequence_read<<<1,1,array_size*sizeof(int)>>>(timing_d[0], device_array, array_size, 1000000);
cudaMemcpy(timing,timing_d,sizeof(long long int),cudaMemcpyDeviceToHost);
printf ("It took me %lld clicks.\n",timing[0]);
delete host_array;
//printf ("It took me %Lf clicks.\n",timing[0]);
}
*/
/*
//cudaEvent_t event1, event2;
//cudaEventCreate(&event1);
//cudaEventCreate(&event2);
int* d_time;
int time;
int add = 0;
int* d_add;printf("%d,%d\n",time,add);
cudaMalloc((void**)&d_time,sizeof(int));
cudaMalloc((void**)&d_add,sizeof(int));
cudaMemcpy(d_add,&add,sizeof(int),cudaMemcpyHostToDevice);
clock_t start = clock();
//cudaEventRecord(event1 ,0);
test_clock<<<1,1>>>(d_time[0],d_add[0]);
//cudaEventRecord(event2,0);
//cudaEventSynchronize(event1);
//cudaEventSynchronize(event2);
//cudaDeviceSynchronize();
clock_t end = clock();
cudaMemcpy(&time,d_time,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&add,d_add,sizeof(int),cudaMemcpyDeviceToHost);
long double time_elapsed_ms = 1000.0 * (end-start) / CLOCKS_PER_SEC;
cout << "CPU time used: " << time_elapsed_ms << " ms\n";
printf("%d,%d\n",time,add);
//float dt_ms;
//cudaEventElapsedTime(&dt_ms, event1, event2);
//cout << "cuda event elpased time:" << dt_ms << " ms\n";
*/
for(long long unsigned array_size = 256; array_size < 257; array_size += 4){
int sm_max = 1;
//long long unsigned array_size = 16;
//printf("array size =%d\n",array_size);
fprintf(fp,"%d\t",array_size);
long long unsigned device_size = sizeof(long long unsigned)*array_size*sm_max;
long long unsigned* device_array;
long long unsigned* host_array = (long long unsigned*)malloc(array_size*sizeof(long long unsigned*)*sm_max);
printf("Strat mallocing\n");
assert(cudaSuccess == cudaMalloc((void**)&device_array,device_size));
int stride = 16;//set the access stride = cache_line_size / sizeof(long long unsigned) = 128/8=16
for(int sm_id =0;sm_id<sm_max;sm_id++){
for(int i = 0; i < array_size; i++){
int t = i + stride;
if(t >= array_size) t %= stride;
host_array[i+array_size*sm_id] = (long long unsigned)(&(device_array[sm_id*array_size])) + sizeof(long long unsigned)*t;//converse the device from int* to int; 4 is the byte size of an int type
}
}
/*
cout<< "sizeof long long unsigned" << sizeof(long long unsigned) << endl;
cout<< "device array adress: " << (long long unsigned)device_array << endl;
for(int i=0;i<array_size;i++){
cout << host_array[i] << endl;
}
return 0;
*/
int* timing = (int*)malloc(sizeof(int)*sm_max);
int* timing_d;
// printf ("It took me %d clicks before the funvtion call.\n",timing[0]);
assert(cudaSuccess == cudaMalloc((void**)&timing_d, sizeof(int)*sm_max));
long long unsigned* last_access_value = (long long unsigned*)malloc(sizeof(long long unsigned)*sm_max);
long long unsigned* d_last_access_value;
// printf ("original last_access value: %llu\n", last_access_value[0]);
assert(cudaSuccess == cudaMalloc((void**)&d_last_access_value, sizeof(long long unsigned)*sm_max));
// printf("start computing!\n");
assert(cudaSuccess == cudaMemcpy(device_array,host_array,device_size,cudaMemcpyHostToDevice));
double access_time;
/*
cudaDeviceSynchronize();
static_sequence_read_multism<<<sm_max,1>>>(timing_d, device_array, 4, d_last_access_value, array_size);
cudaDeviceSynchronize();
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int)*sm_max,cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned)*sm_max,cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
access_time=0;
for(int i=0;i<sm_max;i++){
//printf ("It took me %d clicks, last_access value: %llu.\n",timing[i], last_access_value[i]);
access_time+=timing[i];
}
printf("It took me %lf clicks",access_time/sm_max);
*/
cudaDeviceSynchronize();
static_sequence_read_multism<<<sm_max,1>>>(timing_d, device_array, 4, d_last_access_value, array_size);
cudaDeviceSynchronize();
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int)*sm_max,cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned)*sm_max,cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
access_time = 0;
for(int i=0;i<sm_max;i++){
//printf ("It took me %d clicks, last_access value: %llu.\n",timing[i], last_access_value[i]);
access_time+=timing[i];
printf("%llu\n",last_access_value[i]);
}
// printf("It took me %lf clicks",access_time/sm_max);
fprintf(fp,"%lf\n",access_time/sm_max);
fclose(fp);
/*
cudaDeviceSynchronize();
static_sequence_read_multism<<<sm_max,1>>>(timing_d, device_array, 1, d_last_access_value, array_size);
cudaDeviceSynchronize();
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int)*sm_max,cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned)*sm_max,cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
access_time = 0;
for(int i=0;i<sm_max;i++){
//printf ("It took me %d clicks, last_access value: %llu.\n",timing[i], last_access_value[i]);
access_time+=timing[i];
}
printf("It took me %lf clicks",access_time/sm_max);
printf("\n");
*/
/*
static_sequence_read<<<1,1>>>(timing_d[0], device_array, 32, d_last_access_value[0], array_size);
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int),cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
printf ("It took me %d clicks, last_access value: %llu.\n",timing[0], last_access_value[0]);
static_sequence_read<<<1,1>>>(timing_d[0], device_array, 128, d_last_access_value[0], array_size);
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int),cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
printf ("It took me %d clicks, last_access value: %llu.\n",timing[0], last_access_value[0], array_size);
*/
/* static_sequence_read_noinitialize<<<1,1>>>(timing_d[0], device_array, 4096, d_last_access_value[0], array_size);
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int),cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
printf ("no initiliaze took %d clicks, last_access value: %llu.\n",timing[0], last_access_value[0]);
*/
/*
static_sequence_read<<<1,1>>>(timing_d[0], device_array, 4096, d_last_access_value[0], array_size);
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int),cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
printf ("It took me %d clicks, last_access value: %llu.\n",timing[0], last_access_value[0]);
*/
/*
static_sequence_read_noinitialize<<<1,1>>>(timing_d[0], device_array, 4096, d_last_access_value[0], array_size);
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int),cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
printf ("no initiliaze took %d clicks, last_access value: %llu.\n",timing[0], last_access_value[0]);
*/
/*
static_sequence_read<<<1,1>>>(timing_d[0], device_array, 4096, d_last_access_value[0], array_size);
assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int),cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
printf ("It took me %d clicks, last_access value: %llu.\n",timing[0], last_access_value[0]);
delete host_array;
printf ("\n");
*/
}
return 0;
}
|
12,273 | /*For convenience, threadIdx is a 3-component vector, so that threads can be identified using a one-dimensional,
two-dimensional, or three-dimensional thread index, forming a one-dimensional, two-dimensional, or
three-dimensional block of threads, called a thread block. This provides a natural way to invoke computation
across the elements in a domain such as a vector, matrix, or volume.
The index of a thread and its thread ID relate to each other in a straightforward way: For a one-dimensional block,
they are the same; for a two-dimensional block of size (Dx, Dy),the thread ID of a thread of index (x, y) is (x + y Dx);
for a three-dimensional block of size (Dx, Dy, Dz), the thread ID of a thread of index (x, y, z) is (x + y Dx + z Dx Dy).
As an example, the following code adds two matrices A and B of size NxN and stores the result into matrix C:
Read more at: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#ixzz47mgjEZkJ
*/
#include <stdio.h>
#define N 1024
// Kernel definition
__device__ int A[N][N];
__device__ int B[N][N];
__device__ int C[N][N];
__global__ void MatAdd()
{
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
int main()
{
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
MatAdd<<<numBlocks, threadsPerBlock>>>();
}
/*There is a limit to the number of threads per block, since all threads of a block are expected to reside on the same processor
core and must share the limited memory resources of that core. On current GPUs, a thread block may contain up to 1024 threads.
However, a kernel can be executed by multiple equally-shaped thread blocks, so that the total number of threads is equal to the
number of threads per block times the number of blocks.
Blocks are organized into a one-dimensional, two-dimensional, or three-dimensional grid of thread blocks as illustrated by Figure
6. The number of thread blocks in a grid is usually dictated by the size of the data being processed or the number of processors
in the system, which it can greatly exceed.
Read more at: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#ixzz47rZpJHPt
Follow us: @GPUComputing on Twitter | NVIDIA on Facebook
*/ |
12,274 | __global__ void calculateCount(int *keypoints ,const unsigned char *in, float *allProbablities, int *allIndexList, int patchSize, int width, int height, int fernNum, int fernSize, int lenght, int REGULARIZATION_TERM){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int y = keypoints[index*2];
int x = keypoints[index*2+1];
int startX = x - patchSize;
int endX = x + patchSize;
int startY = y - patchSize;
int endY = y + patchSize;
if(startX < 0 ){
startX = 0;
}
if (endX >= width ){
endX = width -1;
}
if(startY < 0 ){
startY = 0;
}
if (endY >= height){
endY = height -1;
}
int patchHeight = endX - startX;
int patchLenght = patchHeight * (endY - startY);
int patch[1024];
int count = 0;
for(int j= 0; j < patchHeight; j++){
for(int i = startY ; i < endY; i++){
patch[count] = in[startX*height+i];
count++;
}
startX = startX +1;
}
int I1, I2,num, decimalNum, index2;
for(int i = 0; i< fernNum ; i++){
decimalNum = 0;
num = lenght/2;
for(int j = 0; j < fernSize; j++){
index2 = (fernSize*i*2)+(j*2);
I1 = allIndexList[index2];
I2 = allIndexList[index2+1];
if(I1 < patchLenght && I2 < patchLenght){
if(patch[I1] < patch[I2]){
decimalNum = decimalNum +num;
}
num = num /2;
}
}
allProbablities[index*lenght+decimalNum] = allProbablities[index*lenght+decimalNum]+ 1;
}
for(int i = 0; i< lenght; i++){
float num2 = allProbablities[index*lenght+i];
float value = (num2 + REGULARIZATION_TERM) / (fernNum + lenght*REGULARIZATION_TERM);
allProbablities[index*lenght+i] = value;
}
} |
12,275 | #include "includes.h"
__global__ void square_array(float *ad, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N)
ad[index] *= ad[index]; //adding values in GPU memory
} |
12,276 | #include "includes.h"
__global__ void iReduceSingle(int *idata, int *single, int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2){
if(tid%(2*s) == 0){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0)*single=sdata[0];
} |
12,277 | #include <stdio.h>
__global__ void
emptyKernel(){
}
extern "C" void
call(){
dim3 grid( 1, 1, 1);
dim3 threads( 256, 1, 1);
emptyKernel <<<grid, threads >>> ();
cudaThreadSynchronize();
printf("Called\n");
}
|
12,278 | #include "includes.h"
__global__ void multiplyGlobal(unsigned const* left, unsigned const* right, unsigned* result, size_t size)
{
auto row = blockIdx.y * blockDim.y + threadIdx.y;
auto col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < size && col < size) {
auto sum = 0u;
for (int k = 0; k < size; k++) {
sum += left[row * size + k] * right[k * size + col];
}
result[row * size + col] = sum;
}
} |
12,279 | __global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
}
|
12,280 | // Designed by: Amir Yazdanbakhsh
// Date: March 26th - 2015
// Alternative Computing Technologies Lab.
// Georgia Institute of Technology
#include "stdlib.h"
#include <fstream>
#include <iostream>
#include <cstddef>
// Cuda Libraries
#include <cuda_runtime_api.h>
#include <cuda.h>
#define EPSILON 1e-12 // EPSILON represents the error buffer used to denote a hit
using namespace std;
__device__ bool newComputeIntervals(float vv0, float vv1, float vv2, float d0, float d1, float d2, float d0d1, float d0d2, float abc[3], float x0x1[2])
{
if (d0d1 > 0.0f) {
// d0d2 <= 0 --> i.e. d0, d1 are on the same side, d2 on the other or on the plane
abc[0] = vv2;
abc[1] = (vv0 - vv2) * d2;
abc[2] = (vv1 - vv2) * d2;
x0x1[0] = d2 - d0;
x0x1[1] = d2 - d1;
} else if (d0d2 > 0.0f) {
// d0d1 <= 0
abc[0] = vv1;
abc[1] = (vv0 - vv1) * d1;
abc[2] = (vv2 - vv1) * d1;
x0x1[0] = d1 - d0;
x0x1[1] = d1 - d2;
} else if (d1 * d2 > 0.0f || d0 != 0.0f) {
// d0d1 <= 0 or d0 != 0
abc[0] = vv0;
abc[1] = (vv1 - vv0) * d0;
abc[2] = (vv2 - vv0) * d0;
x0x1[0] = d0 - d1;
x0x1[1] = d0 - d2;
} else if (d1 != 0.0f) {
abc[0] = vv1;
abc[1] = (vv0 - vv1) * d1;
abc[2] = (vv2 - vv1) * d1;
x0x1[0] = d1 - d0;
x0x1[1] = d1 - d2;
} else if (d2 != 0.0f) {
abc[0] = vv2;
abc[1] = (vv0 - vv2) * d2;
abc[2] = (vv1 - vv2) * d2;
x0x1[0] = d2 - d0;
x0x1[1] = d2 - d1;
} else {
// Triangles are coplanar
return true;
}
return false;
}
__device__ bool edgeEdgeTest(float v0[3], float u0[3], float u1[3], int i0, int i1, float Ax, float Ay)
{
float Bx = u0[i0] - u1[i0];
float By = u0[i1] - u1[i1];
float Cx = v0[i0] - u0[i0];
float Cy = v0[i1] - u0[i1];
float f = Ay * Bx - Ax * By;
float d = By * Cx - Bx * Cy;
if ((f > 0 && d >= 0 && d <= f) || (f < 0 && d <= 0 && d >= f)) {
float e = Ax * Cy - Ay * Cx;
if (f > 0) {
if (e >= 0 && e <= f)
return true;
} else {
if (e <= 0 && e >= f)
return true;
}
}
return false;
}
__device__ bool pointInTri(float V0[3], float U0[3], float U1[3], float U2[3], int i0, int i1)
{
// Check if V0 is inside triangle (U0,U1,U2)
float a, b, c, d0, d1, d2;
a = U1[i1] - U0[i1];
b = -(U1[i0] - U0[i0]);
c = -a * U0[i0] - b * U0[i1];
d0 = a * V0[i0] + b * V0[i1] + c;
a = U2[i1] - U1[i1];
b = -(U2[i0] - U1[i0]);
c = -a * U1[i0] - b * U1[i1];
d1 = a * V0[i0] + b * V0[i1] + c;
a = U0[i1] - U2[i1];
b = -(U0[i0] - U2[i0]);
c = -a * U2[i0] - b * U2[i1];
d2 = a * V0[i0] + b * V0[i1] + c;
if ((d0 * d1) > 0.0 && (d0 * d2) > 0.0)
return true;
return false;
}
__device__ bool coplanarTriTri(float n[3], float v0[3], float v1[3], float v2[3], float u0[3], float u1[3], float u2[3])
{
float a[3];
short i0, i1;
a[0] = abs(n[0]);
a[1] = abs(n[1]);
a[2] = abs(n[2]);
if (a[0] > a[1]) {
if (a[0] > a[2]) {
i0 = 1;
i1 = 2;
} else {
i0 = 0;
i1 = 1;
}
} else {
if (a[2] > a[1]) {
i0 = 0;
i1 = 1;
} else {
i0 = 0;
i1 = 2;
}
}
// Test all edges of triangle 1 against edges of triangle 2
float aX = v1[i0] - v0[i0];
float aY = v1[i1] - v0[i1];
float bX = v2[i0] - v1[i0];
float bY = v2[i1] - v1[i1];
float cX = v0[i0] - v2[i0];
float cY = v0[i1] - v2[i1];
if ( edgeEdgeTest(v0, u0, u1, i0, i1, aX, aY) || edgeEdgeTest(v0, u1, u2, i0, i1, aX, aY) || edgeEdgeTest(v0, u2, u0, i0, i1, aX, aY) ||
edgeEdgeTest(v1, u0, u1, i0, i1, bX, bY) || edgeEdgeTest(v1, u1, u2, i0, i1, bX, bY) || edgeEdgeTest(v1, u2, u0, i0, i1, bX, bY) ||
edgeEdgeTest(v2, u0, u1, i0, i1, cX, cY) || edgeEdgeTest(v2, u1, u2, i0, i1, cX, cY) || edgeEdgeTest(v2, u2, u0, i0, i1, cX, cY) )
return true;
// Finally, test if either triangle is totally contained in the other
if (pointInTri(v0, u0, u1, u2, i0, i1) || pointInTri(u0, v0, v1, v2, i0, i1))
return true;
return false;
}
__device__ bool jmeint_kernel_impl(float v0[3], float v1[3], float v2[3], float u0[3], float u1[3], float u2[3])
{
float e1[3], e2[3], n1[3], n2[3], d[3];
float d1, d2;
float du0, du1, du2, dv0, dv1, dv2;
float du0du1, du0du2, dv0dv1, dv0dv2;
float isect1[2];
float isect2[2];
short index;
float vp0, vp1, vp2;
float up0, up1, up2;
float bb, cc, max;
float xx, yy, xxyy, tmp;
// Compute plane equation of triangle (v0,v1,v2)
e1[0] = v1[0] - v0[0];
e1[1] = v1[1] - v0[1];
e1[2] = v1[2] - v0[2];
e2[0] = v2[0] - v0[0];
e2[1] = v2[1] - v0[1];
e2[2] = v2[2] - v0[2];
// Cross product: n1 = e1 x e2
n1[0] = (e1[1] * e2[2]) - (e1[2] * e2[1]);
n1[1] = (e1[2] * e2[0]) - (e1[0] * e2[2]);
n1[2] = (e1[0] * e2[1]) - (e1[1] * e2[0]);
// Plane equation 1: n1.X + d1 = 0
d1 = -(n1[0] * v0[0] + n1[1] * v0[1] + n1[2] * v0[2]);
// Put u0,u1,u2 into plane equation 1 to compute signed distances to the plane
du0 = (n1[0] * u0[0] + n1[1] * u0[1] + n1[2] * u0[2]) + d1;
du1 = (n1[0] * u1[0] + n1[1] * u1[1] + n1[2] * u1[2]) + d1;
du2 = (n1[0] * u2[0] + n1[1] * u2[1] + n1[2] * u2[2]) + d1;
// Coplanarity robustness check
if ((du0 > 0 && du0 < EPSILON) || (du0 < 0 && du0 > EPSILON))
du0 = 0.0f;
if ((du1 > 0 && du1 < EPSILON) || (du1 < 0 && du1 > EPSILON))
du1 = 0.0f;
if ((du2 > 0 && du2 < EPSILON) || (du2 < 0 && du2 > EPSILON))
du2 = 0.0f;
du0du1 = du0 * du1;
du0du2 = du0 * du2;
if (du0du1 > 0.0f && du0du2 > 0.0f) {
// All 3 have same sign and their values are not equal to 0 --> no intersection
return false;
}
// Compute plane equation of triangle (u0,u1,u2)
e1[0] = u1[0] - u0[0];
e1[1] = u1[1] - u0[1];
e1[2] = u1[2] - u0[2];
e2[0] = u2[0] - u0[0];
e2[1] = u2[1] - u0[1];
e2[2] = u2[2] - u0[2];
// Cross product: n2 = e1 x e2
n2[0] = (e1[1] * e2[2]) - (e1[2] * e2[1]);
n2[1] = (e1[2] * e2[0]) - (e1[0] * e2[2]);
n2[2] = (e1[0] * e2[1]) - (e1[1] * e2[0]);
// Plane equation 2: n2.X + d2 = 0
d2 = -(n2[0] * u0[0] + n2[1] * u0[1] + n2[2] * u0[2]);
// Put v0,v1,v2 into plane equation 2 to compute signed distances to the plane
dv0 = (n2[0] * v0[0] + n2[1] * v0[1] + n2[2] * v0[2]) + d2;
dv1 = (n2[0] * v1[0] + n2[1] * v1[1] + n2[2] * v1[2]) + d2;
dv2 = (n2[0] * v2[0] + n2[1] * v2[1] + n2[2] * v2[2]) + d2;
// Coplanarity robustness check
if ((dv0 > 0 && dv0 < EPSILON) || (dv0 < 0 && dv0 > EPSILON))
dv0 = 0.0f;
if ((dv1 > 0 && dv1 < EPSILON) || (dv1 < 0 && dv1 > EPSILON))
dv1 = 0.0f;
if ((dv2 > 0 && dv2 < EPSILON) || (dv2 < 0 && dv2 > EPSILON))
dv2 = 0.0f;
dv0dv1 = dv0 * dv1;
dv0dv2 = dv0 * dv2;
if (dv0dv1 > 0.0f && dv0dv2 > 0.0f) {
// All 3 have same sign and their values are not equal to 0 --> no intersection
return false;
}
// Compute direction of intersection line --> cross product: d = n1 x n2
d[0] = (n1[1] * n2[2]) - (n1[2] * n2[1]);
d[1] = (n1[2] * n2[0]) - (n1[0] * n2[2]);
d[2] = (n1[0] * n2[1]) - (n1[1] * n2[0]);
// Compute and index to the largest component of d
index = 0;
max = abs(d[0]);
bb = abs(d[1]);
cc = abs(d[2]);
if (bb > max) {
max = bb;
index = 1;
}
if (cc > max) {
max = cc;
vp0 = v0[2];
vp1 = v1[2];
vp2 = v2[2];
up0 = u0[2];
up1 = u1[2];
up2 = u2[2];
} else if (index == 1) {
vp0 = v0[1];
vp1 = v1[1];
vp2 = v2[1];
up0 = u0[1];
up1 = u1[1];
up2 = u2[1];
} else {
vp0 = v0[0];
vp1 = v1[0];
vp2 = v2[0];
up0 = u0[0];
up1 = u1[0];
up2 = u2[0];
}
// Compute interval for triangle 1
float abc[3];
float x0x1[2];
if (newComputeIntervals(vp0, vp1, vp2, dv0, dv1, dv2, dv0dv1, dv0dv2, abc, x0x1)) {
return coplanarTriTri(n1, v0, v1, v2, u0, u1, u2);
}
// Compute interval for triangle 2
float def[3];
float y0y1[2];
if (newComputeIntervals(up0, up1, up2, du0, du1, du2, du0du1, du0du2, def, y0y1)) {
return coplanarTriTri(n1, v0, v1, v2, u0, u1, u2);
}
xx = x0x1[0] * x0x1[1];
yy = y0y1[0] * y0y1[1];
xxyy = xx * yy;
tmp = abc[0] * xxyy;
isect1[0] = tmp + abc[1] * x0x1[1] * yy;
isect1[1] = tmp + abc[2] * x0x1[0] * yy;
tmp = def[0] * xxyy;
isect2[0] = tmp + def[1] * xx * y0y1[1];
isect2[1] = tmp + def[2] * xx * y0y1[0];
// Sort isect1 and isect2
if (isect1[0] > isect1[1]) {
float f = isect1[0];
isect1[0] = isect1[1];
isect1[1] = f;
}
if (isect2[0] > isect2[1]) {
float f = isect2[0];
isect2[0] = isect2[1];
isect2[1] = f;
}
if (isect1[1] < isect2[0] || isect2[1] < isect1[0])
{
return false;
}
return true;
}
__global__ void jmeint_kernel(float *v0_d, float *v1_d, float *v2_d, float *u0_d, float*u1_d, float*u2_d, bool* intersect_d, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int idx = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
float v0[3];
float v1[3];
float v2[3];
float u0[3];
float u1[3];
float u2[3];
if(idx < size)
{
v0[0] = v0_d[idx * 3 + 0];
v0[1] = v0_d[idx * 3 + 1];
v0[2] = v0_d[idx * 3 + 2];
v1[0] = v1_d[idx * 3 + 0];
v1[1] = v1_d[idx * 3 + 1];
v1[2] = v1_d[idx * 3 + 2];
v2[0] = v2_d[idx * 3 + 0];
v2[1] = v2_d[idx * 3 + 1];
v2[2] = v2_d[idx * 3 + 2];
u0[0] = u0_d[idx * 3 + 0];
u0[1] = u0_d[idx * 3 + 1];
u0[2] = u0_d[idx * 3 + 2];
u1[0] = u1_d[idx * 3 + 0];
u1[1] = u1_d[idx * 3 + 1];
u1[2] = u1_d[idx * 3 + 2];
u2[0] = u2_d[idx * 3 + 0];
u2[1] = u2_d[idx * 3 + 1];
u2[2] = u2_d[idx * 3 + 2];
#if defined(orig_code)
intersect_d[idx] = jmeint_kernel_impl(v0, v1, v2, u0, u1, u2);
#endif
}
}
int main(int argc, char* argv[])
{
if(argc != 3)
{
std::cerr << "Usage: ./jmeint.out <input file locations> <output file>" << std::endl;
exit(EXIT_FAILURE);
}
float (*v0)[3];
float (*v1)[3];
float (*v2)[3];
float (*u0)[3];
float (*u1)[3];
float (*u2)[3];
bool *intersect;
cudaError_t cudaStatus;
int data_size = 0;
// process the files
ifstream locations_in_file (argv[1]);
ofstream intersect_out_file (argv[2]);
if(locations_in_file.is_open())
{
locations_in_file >> data_size;
std::cout << "# Data Size = " << data_size << std::endl;
}
intersect = new (nothrow) bool[data_size];
// allocate the memory
v0 = new (nothrow) float[data_size][3];
if(v0 == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// allocate the memory
v1 = new (nothrow) float[data_size][3];
if(v1 == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// allocate the memory
v2 = new (nothrow) float[data_size][3];
if(v2 == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// allocate the memory
u0 = new (nothrow) float[data_size][3];
if(u0 == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// allocate the memory
u1 = new (nothrow) float[data_size][3];
if(u1 == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// allocate the memory
u2 = new (nothrow) float[data_size][3];
if(u2 == NULL)
{
std::cerr << "Memory allocation fails!!!" << std::endl;
exit(EXIT_FAILURE);
}
// Prepare
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// add data to the arrays
int loc_index = 0;
while(loc_index < data_size)
{
locations_in_file >> v0[loc_index][0] >> v0[loc_index][1] >> v0[loc_index][2]
>> v1[loc_index][0] >> v1[loc_index][1] >> v1[loc_index][2]
>> v2[loc_index][0] >> v2[loc_index][1] >> v2[loc_index][2]
>> u0[loc_index][0] >> u0[loc_index][1] >> u0[loc_index][2]
>> u1[loc_index][0] >> u1[loc_index][1] >> u1[loc_index][2]
>> u2[loc_index][0] >> u2[loc_index][1] >> u2[loc_index][2];
loc_index++;
}
std::cout << "# Coordinates are read from file..." << std::endl;
// memory allocations on the host
float *v0_d;
float *v1_d;
float *v2_d;
float *u0_d;
float *u1_d;
float *u2_d;
bool *intersect_d;
cudaMalloc((void**) &v0_d, data_size * 3 * sizeof(float));
cudaMalloc((void**) &v1_d, data_size * 3 * sizeof(float));
cudaMalloc((void**) &v2_d, data_size * 3 * sizeof(float));
cudaMalloc((void**) &u0_d, data_size * 3 * sizeof(float));
cudaMalloc((void**) &u1_d, data_size * 3 * sizeof(float));
cudaMalloc((void**) &u2_d, data_size * 3 * sizeof(float));
cudaMalloc((void**) &intersect_d, data_size * sizeof(bool));
std::cout << "# Memory allocation on GPU is done..." << std::endl;
cudaMemcpy(v0_d, v0, data_size * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(v1_d, v1, data_size * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(v2_d, v2, data_size * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(u0_d, u0, data_size * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(u1_d, u1, data_size * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(u2_d, u2, data_size * 3 * sizeof(float), cudaMemcpyHostToDevice);
std::cout << "# Data are transfered to GPU..." << std::endl;
dim3 dimBlock ( 512, 1 );
dim3 dimGrid ( data_size / 512, 1 );
cudaEventRecord(start, 0);
#pragma parrot.start("jmeint_kernel")
jmeint_kernel<<<dimGrid, dimBlock>>>(v0_d, v1_d, v2_d, u0_d, u1_d, u2_d, intersect_d, data_size);
#pragma parrot.end("jmeint_kernel")
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::cout << "Something was wrong! Error code: " << cudaStatus << std::endl;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout << "# Elapsed Time in `jmeint` kernel = " << elapsedTime << std::endl;
std::cout << "# GPU computation is done ..." << std::endl;
cudaMemcpy(intersect, intersect_d, data_size * sizeof(bool), cudaMemcpyDeviceToHost);
for(int i = 0; i < data_size; i++)
{
intersect_out_file << intersect[i];
intersect_out_file << std::endl;
}
// close files
locations_in_file.close();
intersect_out_file.close();
// de-allocate the memory
delete[] v0;
delete[] v1;
delete[] v2;
delete[] u0;
delete[] u1;
delete[] u2;
delete[] intersect;
// de-allocate cuda memory
cudaFree(v0_d);
cudaFree(v1_d);
cudaFree(v2_d);
cudaFree(u0_d);
cudaFree(u1_d);
cudaFree(u2_d);
cudaFree(intersect_d);
std::cout << "Thank you..." << std::endl;
} |
12,281 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define TILE_DIM 32
#define BLOCK_ROWS 32
#define CHECK(call) \
{ \
cudaError_t err = call; \
if (err != cudaSuccess) \
{ \
fprintf(stderr, "Failed with error code %s\n", cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
__global__
void transposeDiagonalBlocks(float *odata, float *idata, int width, int height)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int blockIdx_x, blockIdx_y;
if (width == height)
{
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else
{
int bid = blockIdx.y * gridDim.x + blockIdx.x;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = yIndex * width + xIndex;
int i;
for (i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
__syncthreads();
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = yIndex * height + xIndex;
for (i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
__global__
void transposeNoBankConflicts(float *odata, float *idata, const int nx, const int ny)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
int j;
for (j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
odata[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__
void transposeFineGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM + 1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = yIndex * width + xIndex;
int i;
for (i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y + i][threadIdx.x] = idata[index + i * width];
}
__syncthreads();
for (i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index + i * height] = block[threadIdx.x][threadIdx.y + i];
}
}
__global__
void transposeCoarseGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM + 1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = yIndex * width + xIndex;
int i;
for (i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
__syncthreads();
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = (yIndex) * height + xIndex;
for (i = 0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index_out + i * height] = block[threadIdx.y + i][threadIdx.x];
}
}
__global__
void transposeCoalesced(float *odata, float *idata, const int nx, const int ny)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
int j = 0;
for (j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
odata[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
int main(int argc, char **argv)
{
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nx = 1 << 13;
int ny = 1 << 13;
int blockx = TILE_DIM;
int blocky = BLOCK_ROWS;
size_t nBytes = nx * ny * sizeof(float);
dim3 block(blockx, blocky, 1);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y, 1);
float *h_A = (float *) malloc(nBytes);
float *gpuRef = (float *) malloc(nBytes);
int i;
srand(time(0));
for (i = 0; i < nx * ny; ++i)
{
h_A[i] = rand() % 10000;
}
float *d_A, *d_C;
CHECK(cudaMalloc((float **) &d_A, nBytes));
CHECK(cudaMalloc((float **) &d_C, nBytes));
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
void (*kernel)(float *, float *, int, int);
int iKernel = 4;
char *kernelName;
if (argc > 0)
{
iKernel = atoi(argv[1]);
}
switch (iKernel)
{
case 0:
kernel = &transposeCoalesced;
kernelName = "Coalesced_transpose";
break;
case 1:
kernel = &transposeCoarseGrained;
kernelName = "Coarse_Grained_Transpose";
break;
case 2:
kernel = &transposeFineGrained;
kernelName = "Fine_Grained_Transpose";
break;
case 3:
kernel = &transposeNoBankConflicts;
kernelName = "Transpose_without_Bank_Conflicts";
break;
default:
kernel = &transposeDiagonalBlocks;
kernelName = "Transpose_with_Diagonal_Blocks";
}
kernel<<<grid, block>>>(d_C, d_A, nx, ny);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
int j;
if (iKernel != 1 && iKernel != 2)
{
for (i = 0; i < nx; ++i)
{
for (j = 0; j < ny; ++j)
{
if (fabs(gpuRef[i * nx + j] - h_A[j * ny + i]) > 1e-5)
{
fprintf(stderr, "Error in the matrix transposition kernel %s.\n", kernelName);
exit(EXIT_FAILURE);
}
}
}
}
printf("TEST PASSED with kernel %s!\n", kernelName);
return 0;
} |
12,282 |
#include <iostream>
#include "cuda.h"
using Real = double;
//Test wrapper to run a function multiple times
template<typename PerfFunc>
float kernel_timer_wrapper(const int n_burn, const int n_perf, PerfFunc perf_func){
//Initialize the timer and test
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for( int i_run = 0; i_run < n_burn + n_perf; i_run++){
if(i_run == n_burn){
//Burn in time is over, start timing
cudaEventRecord(start);
}
//Run the function timing performance
perf_func();
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
return milliseconds/1000.;
}
__global__ void k_array2d(Real* array2d_in, Real* array2d_out){
const int i_grid = threadIdx.x + blockDim.x*blockIdx.x;
const int j_var = blockIdx.y;
const int n_grid = blockDim.x*gridDim.x;
array2d_out[j_var*n_grid + i_grid] = 2.*array2d_in[j_var*n_grid + i_grid];
}
__global__ void k_array_of_array1d(Real** array_of_array1d_in, Real** array_of_array1d_out){
const int i_grid = threadIdx.x + blockDim.x*blockIdx.x;
const int j_var = blockIdx.y;
array_of_array1d_out[j_var][i_grid] = 2.*array_of_array1d_in[j_var][i_grid];
}
int main(int argc, char* argv[]) {
std::size_t pos;
const int n_var = std::stoi(argv[1],&pos);
const int n_grid = std::stoi(argv[2],&pos);
const int n_run = std::stoi(argv[3],&pos);
const int threads_per_block = 64;
const dim3 cuda_grid(n_grid/threads_per_block,n_var,1);
const dim3 cuda_block(threads_per_block,1,1);
//Setup a raw 2D view
Real* d_array2d_in;
cudaMalloc(&d_array2d_in, sizeof(Real)*n_var*n_grid);
Real* d_array2d_out;
cudaMalloc(&d_array2d_out, sizeof(Real)*n_var*n_grid);
float time_array2d = kernel_timer_wrapper( n_run, n_run,
[&] () {
k_array2d<<< cuda_grid, cuda_block >>>
(d_array2d_in, d_array2d_out);
});
//Setup an array of arrays
//Array of arrays on device
CUdeviceptr* d_array_of_array1d_in;
cudaMalloc(&d_array_of_array1d_in, sizeof(CUdeviceptr)*n_var);
CUdeviceptr* d_array_of_array1d_out;
cudaMalloc(&d_array_of_array1d_out, sizeof(CUdeviceptr)*n_var);
//Array of arrays on host
CUdeviceptr* h_array_of_array1d_in = (CUdeviceptr*) malloc(sizeof(CUdeviceptr)*n_var);
CUdeviceptr* h_array_of_array1d_out = (CUdeviceptr*) malloc(sizeof(CUdeviceptr)*n_var);
//Malloc each 1d array
for(int i = 0; i < n_var; i++) {
cudaMalloc((void**)(h_array_of_array1d_in+i ), n_grid * sizeof(Real));
cudaMalloc((void**)(h_array_of_array1d_out+i), n_grid * sizeof(Real));
}
//Move h_array_of_array1d to d_array_of_array1d
cudaMemcpy(d_array_of_array1d_in, h_array_of_array1d_in, sizeof(CUdeviceptr) * n_var, cudaMemcpyHostToDevice);
cudaMemcpy(d_array_of_array1d_out, h_array_of_array1d_out, sizeof(CUdeviceptr) * n_var, cudaMemcpyHostToDevice);
double time_array_of_array1d = kernel_timer_wrapper( n_run, n_run,
[&] () {
k_array_of_array1d<<< cuda_grid, cuda_block >>>
( (Real**) d_array_of_array1d_in, (Real**) d_array_of_array1d_out);
});
double cell_cycles_per_second_array2d = static_cast<double>(n_grid)*static_cast<double>(n_run)/time_array2d;
double cell_cycles_per_second_array_of_array1d = static_cast<double>(n_grid)*static_cast<double>(n_run)/time_array_of_array1d;
std::cout<< n_var << " " << n_grid << " " << n_run << " " << time_array2d << " " << time_array_of_array1d << " "
<< cell_cycles_per_second_array2d << " " << cell_cycles_per_second_array_of_array1d << std::endl;
//free each 1d array
for(int i = 0; i < n_var; i++) {
cudaFree(h_array_of_array1d_in+i);
cudaFree(h_array_of_array1d_out+i);
}
free(h_array_of_array1d_in);
free(h_array_of_array1d_out);
cudaFree(d_array_of_array1d_in);
cudaFree(d_array_of_array1d_out);
cudaFree(d_array2d_in);
cudaFree(d_array2d_out);
}
|
12,283 | // The boilerplatte code is taken from tuwel
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cuda_profiler_api.h>
#include "main.cuh"
#include "helper.cuh"
#include "GPUStream.cuh"
#include "StreamFunctions.cuh"
#include "StreamFunctionsThrust.cuh"
void experimental_time(){
}
void test_count(){
printf("count() test\n");
int *input_1 = (int*)malloc(5*sizeof(int));
int *input_2 = (int*)malloc(5*sizeof(int));
input_1[0] = 0;
input_1[1] = 1;
input_1[2] = 2;
input_1[3] = 3;
input_1[4] = 4;
input_2[0] = 1;
input_2[1] = 2;
input_2[2] = 3;
input_2[3] = 4;
input_2[4] = 5;
std::shared_ptr<GPUUnitStream> inp_1(new GPUUnitStream(input_1, 5));
std::shared_ptr<GPUUnitStream> inp_2(new GPUUnitStream(input_2, 5));
printf("made inputs\n");
inp_1->copy_to_device();
inp_2->copy_to_device();
std::shared_ptr<GPUIntStream> res_1 = count(inp_1);
std::shared_ptr<GPUIntStream> res_2 = count(inp_2);
res_1->host_offset = (int*)malloc(sizeof(int));
res_1->host_timestamp = (int*)malloc(6*sizeof(int));
res_1->host_values = (int*)malloc(6*sizeof(int));
res_1->size = 6;
res_2->host_offset = (int*)malloc(sizeof(int));
res_2->host_timestamp = (int*)malloc(6*sizeof(int));
res_2->host_values = (int*)malloc(6*sizeof(int));
res_2->size = 6;
res_1->copy_to_host();
res_2->copy_to_host();
printf("RESULT 1:\n");
res_1->print();
printf("RESULT 2:\n");
res_2->print();
}
void test_slift(){
printf("slift test\n");
/*
0: y = 1
1: z = 2
1: y = 2
2: y = 2
3: z = 2
3: y = 2
50: y = 2
51: z = 10
*/
int sx = 5;
int sy = 3;
int *x_v = (int*)malloc(sx*sizeof(int));
int *y_v = (int*)malloc(sy*sizeof(int));
int *x_ts = (int*)malloc(sx*sizeof(int));
int *y_ts = (int*)malloc(sy*sizeof(int));
x_ts[0] = 0;
x_ts[1] = 1;
x_ts[2] = 2;
x_ts[3] = 3;
x_ts[4] = 50;
x_v[0] = 1;
x_v[1] = 2;
x_v[2] = 2;
x_v[3] = 2;
x_v[4] = 2;
y_ts[0] = 1;
y_ts[1] = 3;
y_ts[2] = 51;
y_v[0] = 2;
y_v[1] = 2;
y_v[2] = 10;
//int *res_ts = (int*)malloc((sx+sy)*sizeof(int));
//int *res_v = (int*)malloc((sx+sy)*sizeof(int));
/*for (int i=0; i<sx; i++){
x_ts[i] = i;
x_v[i] = i;
}
for (int i=0; i<sy; i++){
y_ts[i] = i;
y_v[i] = i;
}*/
std::shared_ptr<GPUIntStream> x(new GPUIntStream(x_ts, x_v, sx));
std::shared_ptr<GPUIntStream> y(new GPUIntStream(y_ts, y_v, sy));
x->copy_to_device();
y->copy_to_device();
std::shared_ptr<GPUIntStream> res = slift(x,y, MRG);
res->host_offset = (int*)malloc(sizeof(int));
res->host_timestamp = (int*)malloc(res->size*sizeof(int));
res->host_values = (int*)malloc(res->size*sizeof(int));
res->copy_to_host();
//x->print();
//y->print();
res->print();
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
test_slift();
//test_count();
return(0);
}
|
12,284 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{70.49,130.80},{69.15,120.96},{73.09,128.52},{86.47,121.82},
{79.33,135.78},{82.97,146.94},{78.52,137.85},{73.39,136.96},
{75.61,114.34},{79.52,146.23},{76.70,133.10},{65.33,122.13},
{79.50,129.38},{85.38,157.06},{72.33,133.95},{35.91,75.40},
{80.77,143.67},{52.68,89.78},{36.13,77.22},{83.90,142.29},
{59.04,106.05},{71.12,130.59},{32.06,71.46},{40.77,93.83},
{46.91,81.58},{94.85,156.72},{12.06,43.28},{ 0.71,30.59},
{21.39,61.08},{88.48,157.99},{50.25,118.75},{31.08,96.31},
{58.31,100.58},{27.50,76.70},{10.69,38.32},{95.66,150.32},
{15.97,68.61},{80.36,139.70},{71.97,130.69},{ 7.44,39.10},
{74.91,138.56},{ 5.87,38.76},{38.96,94.86},{ 8.20,38.92},
{57.23,103.56},{29.79,82.30},{64.80,112.26},{20.84,60.04},
{82.50,136.68},{12.69,56.76},{ 7.73,41.58},{79.38,150.25},
{99.73,182.45},{64.77,107.66},{38.09,78.58},{75.72,143.27},
{93.61,171.88},{46.59,116.39},{31.18,76.09},{89.36,167.09},
{57.97,100.04},{30.65,59.97},{57.40,107.18},{78.58,141.45},
{28.35,66.02},{72.36,132.13},{12.30,50.43},{42.29,86.56},
{42.23,74.52},{91.38,147.51},{35.52,95.32},{75.56,144.53},
{92.61,164.53},{21.57,81.55},{81.52,146.53},{62.55,108.13},
{45.86,94.30},{27.38,56.80},{98.47,164.28},{79.60,132.17},
{49.49,101.96},{99.59,171.38},{91.11,158.91},{52.61,99.92},
{51.00,105.16},{46.34,97.15},{60.72,90.68},{53.70,116.41},
{16.92,71.76},{75.14,129.96},{56.44,114.93},{63.62,122.81},
{87.88,145.18},{93.38,150.65},{24.15,70.67},{70.89,125.06},
{57.82,120.58},{27.41,77.74},{35.24,76.88},{75.73,146.45},
{72.88,135.76},{56.38,118.97},{ 9.35,53.94},{ 6.65,43.07},
{92.78,145.83},{60.23,103.33},{82.59,138.65},{82.59,147.20},
{ 5.38,47.28},{ 9.85,58.44},{45.16,104.55},{20.85,57.53},
{93.27,159.89},{56.15,94.30},{34.84,84.45},{49.78,105.58},
{24.81,73.02},{12.36,45.98},{51.76,116.24},{14.80,50.91},
{58.76,114.94},{ 4.65,34.34},{74.99,126.14},{45.12,99.17},
{75.16,120.98},{20.96,59.97},{62.03,120.94},{29.02,57.61},
{50.10,109.93},{25.70,67.21},{53.37,121.01},{69.83,106.86},
{98.40,167.39},{62.79,103.27},{73.46,129.78},{94.55,146.92},
{71.17,128.64},{ 9.89,61.96},{71.64,99.54},{ 4.08,39.81},
{86.30,139.84},{57.12,119.67},{23.03,69.35},{13.05,50.76},
{87.04,165.95},{ 6.30,42.04},{46.82,111.49},{29.52,74.86},
{57.05,102.30},{44.34,107.69},{85.27,135.83},{10.74,47.09},
{45.03,90.95},{35.25,98.93},{49.98,101.17},{62.33,110.82},
{83.44,128.79},{49.31,80.08},{98.90,162.01},{73.81,113.52},
{17.62,50.47},{63.13,116.08},{13.46,51.40},{27.67,70.81},
{ 5.54,38.33},{83.67,145.25},{49.77,93.82},{44.96,117.71},
{90.09,165.72},{29.28,76.51},{ 2.22,36.30},{ 6.36,32.50},
{96.08,156.79},{62.72,119.35},{ 2.24,32.55},{85.64,154.80},
{76.93,148.22},{ 3.74,32.51},{80.07,129.92},{ 7.57,47.93},
{ 9.00,52.97},{72.69,123.67},{72.76,149.85},{45.74,103.78},
{66.00,121.69},{59.09,124.34},{ 4.60,47.04},{69.41,133.60},
{ 9.09,63.43},{45.74,103.93},{56.63,111.44},{76.34,136.06},
{97.77,165.84},{80.20,147.33},{16.71,43.05},{13.95,65.55},
{96.06,172.59},{ 5.44,48.49},{12.06,58.95},{36.61,73.82},
{29.29,68.38},{37.42,90.24},{50.49,95.83},{47.45,110.52},
{40.96,92.52},{38.24,78.03},{55.77,105.52},{42.17,93.48},
{40.53,82.97},{22.55,73.32},{20.23,65.30},{ 1.88,54.37},
{ 6.84,45.82},{73.05,130.63},{83.94,158.91},{90.96,161.00},
{10.94,41.62},{78.06,146.69},{45.97,112.26},{79.55,133.24},
{ 0.25,28.96},{98.80,171.38},{95.77,176.06},{ 0.82,31.52},
{40.03,96.99},{22.06,81.54},{21.25,57.42},{26.64,60.34},
{ 3.87,29.19},{79.36,135.16},{88.96,145.04},{90.13,146.75},
{42.63,85.20},{68.11,135.21},{94.64,163.26},{31.01,76.30},
{78.95,132.19},{89.73,153.83},{24.83,65.97},{18.69,50.48},
{45.29,74.50},{ 2.68,37.48},{75.57,134.16},{37.04,97.58},
{53.59,88.16},{66.96,141.13},{ 8.31,61.03},{ 4.53,57.53},
{41.66,97.85},{72.11,132.50},{71.86,138.16},{72.87,121.14},
{87.34,143.48},{95.03,141.95},{85.67,167.74},{83.99,148.48},
{84.18,136.06},{59.05,110.64},{75.45,148.87},{10.60,48.52},
{81.85,144.43},{29.44,76.63},{10.76,52.81},{46.80,103.01},
{10.39,47.90},{35.43,82.97},{11.96,52.36},{41.33,74.35},
{34.32,100.25},{90.90,160.50},{89.02,144.90},{32.94,76.30},
{35.15,67.46},{49.81,105.50},{58.97,114.68},{61.15,85.18},
{53.52,105.59},{80.53,136.80},{ 8.25,57.08},{88.87,142.03},
{63.05,116.95},{50.19,110.48},{32.94,72.63},{75.91,128.73},
{98.59,162.18},{ 6.66,53.32},{66.68,116.98},{46.53,95.66},
{80.56,157.54},{ 4.04,39.16},{84.05,142.18},{ 8.82,51.97},
{94.05,154.96},{14.54,71.12},{51.22,96.16},{53.00,102.82},
{65.99,122.14},{80.88,146.18},{ 2.80,28.08},{62.66,127.25},
{81.08,135.03},{11.56,58.03},{46.56,85.84},{48.72,108.46},
{34.11,77.80},{25.72,69.80},{91.39,149.30},{54.37,99.88},
{46.36,107.12},{35.15,75.41},{57.05,105.71},{41.58,83.45},
{22.83,62.83},{23.79,76.76},{53.68,97.62},{40.51,85.35},
{50.73,98.67},{64.43,125.08},{92.16,155.77},{56.76,110.60},
{20.14,61.83},{72.47,131.58},{78.09,141.29},{ 3.64,32.93},
{ 6.73,65.31},{30.47,75.97},{44.28,85.69},{96.01,159.51},
{10.44,53.52},{45.94,100.93},{35.94,92.95},{79.84,148.83},
{42.10,83.36},{48.75,107.21},{66.31,129.78},{ 8.26,45.71},
{19.01,57.81},{12.89,52.33},{34.53,79.61},{57.75,104.38},
{47.06,97.03},{41.79,96.89},{21.96,67.98},{29.73,75.70},
{ 6.13,26.46},{13.22,76.73},{66.13,103.21},{18.58,62.21},
{30.37,42.98},{20.71,54.73},{63.10,130.17},{52.73,105.14},
{38.51,89.64},{10.37,52.75},{13.14,53.41},{23.17,57.08},
{96.43,158.01},{71.44,124.79},{38.76,93.08},{50.23,112.39},
{84.90,144.26},{37.25,88.01},{ 6.49,42.67},{64.16,100.96},
{ 1.50,44.62},{29.76,60.69},{67.03,111.56},{31.42,70.82},
{85.35,142.18},{59.23,107.16},{64.07,104.07},{90.84,151.87},
{75.77,135.87},{59.10,99.71},{88.05,157.91},{25.65,70.84},
{95.05,154.88},{65.83,127.48},{90.27,149.50},{15.15,43.70},
{83.34,130.41},{53.43,118.60},{68.00,121.65},{95.21,160.78},
{74.74,150.54},{42.66,79.34},{25.30,53.27},{25.99,85.53},
{43.21,90.96},{13.26,49.37},{41.67,77.87},{47.19,88.81},
{90.32,166.44},{19.76,60.65},{81.70,130.94},{88.04,138.64},
{32.64,87.28},{52.97,94.55},{ 6.54,44.90},{12.62,62.43},
{ 1.99,60.37},{24.10,66.02},{39.88,97.35},{85.36,149.25},
{89.75,149.66},{85.48,135.83},{64.70,115.71},{99.39,155.02},
{66.68,111.85},{88.72,146.10},{24.89,49.71},{97.13,163.31},
{ 8.04,42.95},{72.37,123.91},{ 6.28,47.33},{ 9.04,36.47},
{27.92,52.38},{76.89,125.98},{86.88,149.43},{62.49,113.11},
{75.93,136.43},{39.81,88.85},{37.76,78.30},{48.62,105.76},
{11.06,47.21},{21.55,64.30},{18.34,65.05},{60.29,94.85},
{97.17,144.12},{55.76,94.79},{74.14,112.63},{50.67,102.12},
{78.33,147.34},{87.66,139.50},{95.28,142.74},{15.79,60.86},
{51.55,86.75},{66.70,117.50},{16.42,74.91},{96.96,173.36},
{72.45,117.10},{82.60,150.17},{67.67,130.28},{ 1.46,40.37},
{65.45,118.92},{80.27,139.24},{88.31,144.56},{77.83,139.24},
{16.50,61.86},{ 3.68,43.30},{86.42,146.72},{82.20,144.05},
{60.26,122.63},{35.91,79.84},{ 6.38,36.90},{61.15,133.48},
{75.59,130.90},{66.25,122.05},{39.81,65.67},{22.03,72.67},
{49.81,97.68},{42.75,72.90},{79.72,135.65},{14.02,43.73},
{50.97,113.88},{25.92,75.34},{71.34,131.50},{90.16,159.00},
{90.00,173.48},{ 5.93,32.51},{93.47,164.66},{80.15,137.14},
{96.50,161.72},{ 5.22,36.44},{59.09,127.71},{67.61,142.50},
{37.95,72.89},{36.28,80.51},{ 1.75,37.11},{32.50,81.37},
{68.29,110.92},{ 3.19,38.92},{10.42,47.43},{23.38,68.25},
{ 1.24,39.85},{95.36,147.53},{14.70,39.25},{16.27,49.69},
{78.54,121.12},{20.68,61.07},{89.24,153.20},{37.41,92.99},
{31.54,72.54},{ 9.04,36.12},{71.16,157.70},{40.54,101.05},
{87.40,146.76},{40.03,64.89},{65.93,106.88},{51.99,91.49},
{30.11,69.89},{ 4.20,29.63},{72.94,121.91},{84.03,140.15},
{18.21,65.22},{22.75,72.70},{ 5.03,62.94},{84.19,121.12},
{49.73,109.18},{50.97,96.52},{17.84,61.19},{22.23,63.79},
{98.64,161.96},{47.67,98.78},{95.89,164.82},{17.60,40.57},
{19.55,60.32},{39.65,100.65},{78.04,145.50},{21.25,57.30},
{75.44,132.79},{20.74,51.29},{99.76,167.69},{24.02,68.15},
{83.83,144.62},{28.83,79.70},{81.39,140.50},{54.20,114.42},
{65.66,114.08},{38.43,82.74},{45.69,81.81},{30.16,71.89},
{ 5.60,54.27},{83.32,146.93},{11.91,37.69},{72.86,145.12},
{94.26,157.64},{77.50,145.38},{28.53,70.20},{62.64,144.67},
{46.98,87.65},{17.94,66.43},{94.83,154.61},{70.00,115.57},
{81.49,146.60},{53.42,112.37},{73.41,122.83},{28.85,77.99},
{61.51,103.53},{ 9.43,45.86},{61.79,112.81},{22.91,62.04},
{18.97,73.47},{71.89,125.20},{21.33,49.19},{60.95,107.95},
{50.48,100.19},{44.09,102.06},{90.72,162.02},{54.67,95.87},
{80.13,146.92},{19.49,64.20},{22.27,51.03},{65.80,125.90},
{84.97,142.32},{61.33,129.41},{81.98,151.09},{41.93,94.51},
{69.72,122.51},{20.44,59.72},{52.94,92.47},{53.87,108.83},
{66.10,131.38},{53.89,118.39},{90.61,141.08},{ 1.48,43.49},
{55.65,104.78},{15.90,60.60},{46.88,105.80},{64.44,112.85},
{52.33,117.11},{85.09,153.46},{73.22,115.56},{ 3.81,49.12},
{10.66,30.84},{23.27,55.05},{48.66,109.58},{23.29,50.31},
{88.92,150.73},{26.52,73.72},{65.10,115.43},{17.14,69.33},
{90.44,164.86},{40.92,92.54},{29.13,54.22},{36.01,92.08},
{62.79,95.56},{21.66,69.26},{41.24,83.40},{22.49,75.67},
{60.91,120.06},{94.45,165.13},{13.20,56.43},{59.92,90.43},
{39.19,80.79},{76.59,139.35},{36.67,81.34},{11.06,32.61},
{88.81,151.04},{44.19,86.43},{98.74,170.51},{14.20,57.27},
{ 0.12,34.46},{80.95,146.47},{80.91,137.87},{41.60,89.96},
{74.73,146.95},{10.15,34.76},{99.40,156.65},{ 2.58,40.48},
{97.86,172.37},{78.82,139.27},{58.57,109.60},{96.57,169.35},
{79.00,152.23},{39.99,94.14},{66.95,126.50},{59.33,105.83},
{13.71,60.63},{45.88,100.72},{ 5.73,42.26},{73.24,138.38},
{18.70,59.33},{44.16,103.88},{18.93,63.40},{ 8.89,56.46},
{64.87,119.64},{59.27,128.50},{65.70,125.98},{31.45,76.90},
{47.62,106.65},{55.24,102.65},{66.98,129.90},{67.20,120.15},
{82.89,160.45},{87.63,156.09},{86.84,154.94},{49.71,106.31},
{81.13,141.18},{83.95,148.70},{24.82,68.16},{ 6.29,36.96},
{45.53,100.22},{54.86,118.40},{20.11,73.23},{36.27,77.63},
{34.99,87.72},{82.93,147.98},{15.79,47.57},{16.52,38.24},
{41.72,91.70},{88.28,162.99},{41.99,86.34},{19.14,71.88},
{46.82,92.30},{63.26,119.18},{95.62,168.26},{16.65,53.28},
{37.05,97.31},{23.12,52.65},{94.77,164.76},{92.08,141.33},
{73.24,117.79},{26.84,57.89},{79.50,144.53},{ 4.19,28.60},
{72.43,135.74},{53.96,102.81},{34.51,71.36},{ 8.26,36.34},
{70.16,133.65},{58.46,96.95},{95.49,147.11},{61.54,129.50},
{53.80,99.09},{20.07,70.56},{92.32,161.17},{77.15,131.94},
{13.48,47.35},{98.88,169.61},{54.80,84.26},{29.52,77.65},
{46.78,81.14},{50.98,100.42},{34.22,71.59},{92.79,162.00},
{41.44,107.55},{65.00,105.16},{25.10,75.73},{ 5.68,47.49},
{55.63,122.32},{59.70,105.98},{ 0.83,18.38},{93.49,170.66},
{74.24,125.12},{21.73,56.04},{69.00,129.79},{74.33,131.77},
{87.29,162.96},{49.45,108.64},{39.85,95.13},{65.94,128.56},
{96.42,164.68},{75.72,135.47},{74.00,128.64},{22.69,79.03},
{16.49,49.83},{51.75,105.92},{18.35,39.89},{12.11,47.51},
{11.40,53.07},{42.69,75.97},{34.09,90.98},{89.58,138.92},
{61.38,116.03},{15.99,53.12},{51.36,98.31},{ 8.43,41.23},
{99.48,160.38},{28.02,72.25},{18.56,67.65},{20.40,75.66},
{ 9.16,51.61},{16.99,69.22},{16.63,62.37},{98.07,172.62},
{ 2.13,37.96},{34.13,90.66},{46.26,90.77},{91.73,155.07},
{38.47,84.49},{62.03,123.28},{22.39,52.12},{32.11,73.83},
{90.83,141.57},{55.57,125.48},{31.37,72.59},{74.83,150.41},
{84.81,158.26},{68.49,137.47},{28.18,66.69},{30.45,95.28},
{35.25,85.16},{68.88,111.04},{69.27,138.32},{99.21,173.21},
{12.99,44.58},{33.35,93.76},{51.33,90.40},{61.72,112.41},
{59.57,115.05},{68.79,118.10},{43.68,103.93},{28.34,72.36},
{65.11,117.06},{80.55,143.77},{19.12,65.14},{19.35,73.32},
{ 5.25,43.43},{61.76,111.72},{72.75,138.36},{57.36,101.49},
{49.69,113.93},{86.72,139.78},{87.23,144.77},{82.63,140.81},
{86.35,146.08},{85.91,147.89},{98.85,174.96},{92.35,159.01},
{25.75,70.99},{39.70,81.86},{ 3.86,33.06},{61.49,112.99},
{55.07,136.48},{70.31,120.27},{74.20,122.49},{76.62,139.04},
{59.92,107.95},{67.72,150.15},{90.39,159.74},{56.12,99.84},
{25.27,65.76},{47.30,88.16},{87.88,125.22},{66.52,121.60},
{56.18,105.57},{23.84,65.30},{47.42,103.93},{14.72,54.60},
{55.42,93.79},{72.59,123.12},{97.52,153.87},{57.87,111.74},
{16.32,67.04},{61.16,108.39},{10.41,59.99},{21.46,50.16},
{88.81,161.65},{87.42,146.69},{58.95,125.92},{76.51,138.78},
{ 9.07,60.01},{23.03,70.96},{ 0.74,43.37},{94.22,142.83},
{39.50,74.27},{ 9.36,54.88},{39.38,108.91},{47.11,97.19},
{ 8.02,27.25},{ 2.14,30.21},{ 2.24,47.15},{28.53,75.91},
{53.16,116.06},{67.95,131.53},{39.90,96.56},{ 4.89,46.30},
{96.71,151.69},{52.71,86.57},{72.33,127.71},{57.81,113.64},
{20.66,50.51},{60.82,122.96},{52.86,93.88},{14.65,64.75},
{74.36,132.70},{46.84,81.11},{ 3.79,32.15},{39.85,87.00},
{42.20,88.52},{78.22,130.13},{93.58,152.52},{57.03,92.98},
{26.96,71.50},{ 3.42,36.65},{ 2.61,34.84},{88.96,150.75},
{92.04,157.28},{51.04,108.19},{59.44,120.82},{55.34,95.53},
{41.00,96.36},{59.79,131.01},{30.89,63.48},{43.47,90.16},
{18.84,72.90},{42.70,78.42},{44.85,90.97},{41.23,99.03},
{16.14,52.82},{10.22,69.66},{86.11,150.33},{43.47,96.18},
{97.45,180.39},{31.67,77.81},{75.57,130.89},{16.87,45.23},
{ 6.68,42.93},{11.99,46.31},{93.15,165.13},{25.97,61.79},
{ 1.98,52.17},{50.93,91.84},{19.96,38.01},{51.04,110.55},
{ 2.94,44.35},{38.64,78.52},{87.43,142.52},{67.31,141.90},
{97.56,162.61},{23.24,58.72},{88.40,126.06},{97.41,152.38},
{ 8.99,60.09},{62.95,121.42},{39.19,78.97},{68.34,124.26},
{67.92,126.91},{18.55,59.65},{ 0.52,42.03},{63.22,127.39},
{61.12,108.44},{38.83,76.44},{75.92,123.50},{24.70,61.13},
{34.53,63.04},{30.55,69.85},{93.81,158.14},{17.02,58.94},
{39.86,86.69},{13.91,43.15},{43.07,80.31},{14.22,52.39},
{28.01,64.04},{17.66,51.30},{64.87,127.50},{68.69,129.09},
{ 3.99,46.66},{27.77,79.85},{82.46,133.97},{11.77,51.57},
{ 3.29,42.13},{28.30,80.83},{56.98,102.61},{41.17,97.33},
{50.10,94.36},{89.95,144.63},{13.52,43.10},{38.27,106.86},
{29.52,59.80},{78.72,146.92},{34.18,96.12},{85.06,152.50},
{79.77,122.94},{36.97,84.69},{16.15,48.64},{80.74,110.63},
{73.75,133.85},{98.49,171.85},{22.60,60.53},{49.58,112.72},
{35.70,75.85},{55.94,117.99},{21.88,51.47},{14.56,45.53},
{12.98,48.68},{61.74,108.64},{84.13,156.45},{10.53,63.37},
{67.73,117.73},{28.39,78.10},{83.21,138.03},{76.86,135.79},
{67.45,121.59},{54.79,102.89},{87.09,145.27},{78.89,141.41},
{93.95,154.16},{82.44,149.57},{46.98,99.33},{52.73,110.86},
{74.92,127.56},{18.70,67.47},{28.05,67.85},{17.31,50.26},
{51.58,107.92},{ 6.23,51.92},{ 3.91,30.74},{69.02,125.15},
{80.46,138.83},{35.14,80.49},{92.95,163.01},{ 8.26,53.66},
{39.88,96.76},{55.01,105.77},{55.70,105.97},{ 7.84,49.25},
{ 7.46,32.19},{ 6.66,43.31},{82.11,133.48},{87.68,144.55},
{ 9.06,45.72},{50.11,90.64},{85.47,162.49},{53.97,96.09},
{ 3.95,43.61},{70.93,114.76},{63.70,121.28},{12.35,41.48},
{61.28,108.55},{36.19,71.01},{ 5.82,46.69},{31.71,88.30},
{70.95,121.80},{28.23,69.52},{ 7.46,38.60},{85.07,137.40},
{38.88,85.77},{41.81,81.44},{ 9.77,46.36},{84.85,146.87},
{49.52,113.65},{58.38,108.35},{19.87,65.23},{71.50,130.83},
{71.13,127.30},{80.05,139.42},{27.85,76.60},{37.16,76.01}
};
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
printf("best m,c is %lf,%lf with error %lf in direction %d\n",
dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
12,285 | #include <stdio.h>
#include <math.h>
void readInput(const char *filename, int **Aos, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F1 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
int * A_F2 = (int *) malloc(*rows * (*cols) * sizeof(int));
for(int j = 0; j < 4; j++) {
int counter = 0;
for(int i = 0; i < *cols*(*rows); i++){
fscanf(file, "%d ", &A_F1[counter +j]);
counter = counter + 4;
}
}
int counter = 0;
for(int j = 0; j < *cols*(*rows);j++){
A_F2[j] = A_F1[counter]*1 + A_F1[counter+1]*2 + A_F1[counter+2]*2*2 + A_F1[counter+3]*2*2*2;
counter = counter +4;
}
*Aos = A_F2;
}
void printMatrix(int *A, int rows, int cols) {
for(int i = 0; i < rows*cols; i++){
printf("%i ", A[i]);
}
printf("\n");
};
__global__ void step_periodic(int * array,int *buffer,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int c_aux;
int reject = 1;
int x = tId%(cols);
int y = (int) tId/rows;
int total = 0;
c_aux = (x-1 < 0 ? cols-1 : x-1);
reject = (x-1 < 0 ? 0:1);
total = (reject ==1? (buffer[(y*cols + c_aux)]==1?(buffer[(y*cols + c_aux)] == 3?(buffer[(y*cols + c_aux)] == 10? (buffer[(y*cols + c_aux)] == 9?(buffer[(y*cols + c_aux)]==7?(buffer[(y*cols + c_aux)]==11?(buffer[(y*cols + c_aux)]==13?(buffer[(y*cols + c_aux)] == 15? total+1:0):0):0):0):0):0):0):0):0);
total = (c_aux==0? (buffer[(y*cols + c_aux)]==4?(buffer[(y*cols + c_aux)] == 10?(buffer[(y*cols + c_aux)] == 6? (buffer[(y*cols + c_aux)] == 12?(buffer[(y*cols + c_aux)]==7?(buffer[(y*cols + c_aux)]==13?(buffer[(y*cols + c_aux)]==14?(buffer[(y*cols + c_aux)] == 15? total+1:0):0):0):0):0):0):0):0):0);
c_aux = (x+1 == cols ? 0: x+1);
reject = (x+1 == cols ? 0:1);
total = (reject ==1? (buffer[(y*cols + c_aux)]==4?(buffer[(y*cols + c_aux)] == 10?(buffer[(y*cols + c_aux)] == 6? (buffer[(y*cols + c_aux)] == 12?(buffer[(y*cols + c_aux)]==7?(buffer[(y*cols + c_aux)]==13?(buffer[(y*cols + c_aux)]==14?(buffer[(y*cols + c_aux)] == 15? total+4:0):0):0):0):0):0):0):0):0);
total = (c_aux==0? (buffer[(y*cols + c_aux)]==1?(buffer[(y*cols + c_aux)] == 3?(buffer[(y*cols + c_aux)] == 10? (buffer[(y*cols + c_aux)] == 9?(buffer[(y*cols + c_aux)]==7?(buffer[(y*cols + c_aux)]==11?(buffer[(y*cols + c_aux)]==13?(buffer[(y*cols + c_aux)] == 15? total+4:0):0):0):0):0):0):0):0):0);
c_aux = (((y+1)%rows)*cols);
reject = (y+1 == cols ? 0:1);
total = (reject ==1? (buffer[(c_aux + x)]==2?(buffer[(c_aux + x)] == 3?(buffer[(c_aux + x)] == 6? (buffer[(c_aux + x)] == 5?(buffer[(c_aux + x)]==7?(buffer[(c_aux + x)]==11?(buffer[(c_aux + x)]==14?(buffer[(c_aux + x)] == 15? total+2:0):0):0):0):0):0):0):0):0);
total = (c_aux==0? (buffer[(c_aux + x)]==8?(buffer[(c_aux + x)] == 12?(buffer[(c_aux + x)] == 5? (buffer[(c_aux + x)] == 9?(buffer[(c_aux + x)]==14?(buffer[(c_aux + x)]==13?(buffer[(c_aux + x)]==11?(buffer[(c_aux + x)] == 15? total+2:0):0):0):0):0):0):0):0):0);
c_aux = (((y-1)%rows)+rows)%rows*cols;
reject = (y-1 < 0 ? 0:1);
total = (c_aux==0? (buffer[(c_aux + x)]==2?(buffer[(c_aux + x)] == 3?(buffer[(c_aux + x)] == 6? (buffer[(c_aux + x)] == 5?(buffer[(c_aux + x)]==7?(buffer[(c_aux + x)]==11?(buffer[(c_aux + x)]==14?(buffer[(c_aux + x)] == 15? total+2:0):0):0):0):0):0):0):0):0);
total = (reject ==1? (buffer[(c_aux + x)]==8?(buffer[(c_aux + x)] == 12?(buffer[(c_aux + x)] == 5? (buffer[(c_aux + x)] == 9?(buffer[(c_aux + x)]==14?(buffer[(c_aux + x)]==13?(buffer[(c_aux + x)]==11?(buffer[(c_aux + x)] == 15? total+2:0):0):0):0):0):0):0):0):0);
array[tId] = total;
}
}
int main(int argc, char const *argv[])
{
int rows, cols;
int *array;
int *d_array;
int *d_buffer;
readInput("../initial.txt", &array, &rows, &cols);
int n = (int)(rows*cols);
int block_size = 256;
int grid_size = (int) ceil((float) n/ block_size);
cudaMalloc(&d_array ,rows * cols * sizeof(int));
cudaMalloc(&d_buffer,rows*cols*sizeof(int));
cudaMemcpy(d_array, array, rows * cols * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_buffer, array, rows * cols * sizeof(int), cudaMemcpyHostToDevice);
for(int k = 0; k < 1000; k++){
step_periodic<<<grid_size, block_size>>>(d_array, d_buffer, rows, cols);
cudaMemcpy(d_buffer,d_array,rows*cols * sizeof(int), cudaMemcpyDeviceToDevice);
}
cudaMemcpy(array, d_array, rows * cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_array);
cudaFree(d_buffer);
return(0);
}
|
12,286 | //Usage: diff3dp-cuda4 natom box gpu_id k_min delta_k k_max
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <math.h>
#include <unistd.h>
#define _USE_MATH_DEFINES
#define BS 512
#define CHECK(x)\
{cudaError_t err = (x);\
if (err != cudaSuccess) {\
printf("API error failed %s:%d Returned:%d\n", __FILE__, __LINE__, err);\
exit(1); \
}\
}
void diffcore(double *coords, int natom, float box, double k_step, int ik_min, int ik_max, int iphi_max, int itheta_max);
int compare_doubles (const void *a, const void *b);
__global__ void diff_atom_loop ( double *d_coords, int natom, double k_x, double k_y, double k_z, double *d_sk_re_array, double *d_sk_im_array);
template <unsigned int blockSize> __global__ void reduce6 (double *g_idata, double *g_odata, unsigned int n);
void calculateAnisotropy(double *sk_array, int i_phi_theta, double *sk_ave, double *anisotropy);
int main (int argc, char *argv[]){
FILE *infile;
int natom, i, j, device_number;
double *coords;
int ik_min, ik_max, iphi_max = 36, itheta_max = 18;
double k_resolution = 4;
float box;
double k_step;
double k_min, k_max;
natom = atoi(argv[2]);
printf("#Natom = %i\n", natom);
box = atof(argv[3]);
printf("#Box = %f\n", box);
device_number = atoi(argv[4]);
printf("#Device ID = %i\n", device_number);
cudaSetDevice(device_number);
printf("#k sk_mod_avg sk_mod_max sk_mod_top1 sk_mod_top5 sk_mod_top10 sk_sq_avg sk_sq_max sk_sq_top1 sk_sq_top5 sk_sq_top10\n");
if(argc > 5)
{
k_min = (double) atof(argv[5]);
k_max = (double) atof(argv[7]);
k_step = (double) atof(argv[6]);
ik_min = (int) (k_min / k_step);
ik_max = (int) (k_max / k_step);
}
else
{
ik_min = k_resolution;
ik_max = 100;
k_step = (double) 1. / k_resolution / box;
}
infile = fopen(argv[1], "rb");
coords = (double *)calloc(3 * natom, sizeof(double));
if( coords != NULL )
{
for(i=0;i<natom;i++)
{
for(j=0;j<3;j++)
{
fscanf(infile, "%lf", &coords[i*3+j]);
}
}
}
setvbuf (stdout, NULL, _IONBF, 0);
diffcore( coords, natom, box, k_step, ik_min, ik_max, iphi_max, itheta_max);
free(coords);
return 0;
}
void diffcore(double *coords, int natom, float box, double k_step, int ik_min, int ik_max, int iphi_max, int itheta_max)
{
int i, i1, i5, i10;
int ik, iphi, itheta;
double k, phi, theta;
double cos_phi, sin_phi, k_cos_theta;
double k_x, k_y, k_z;
double *sk_re_list, *sk_im_list;
double sk_re, sk_im, sk_sq, sk_mod, sk_sq_ave, sk_mod_ave;
double *sk_sq_array, *sk_mod_array;
double *sk_sq_anisotropy, *sk_mod_anisotropy;
int i_phi_theta;
i_phi_theta = iphi_max * itheta_max;
double *d_coords, *d_sk_re_array, *d_sk_im_array;
double *d_sk_re_list, *d_sk_im_list;
CHECK(cudaMalloc(&d_coords, 3 *natom * sizeof(double)));
CHECK(cudaMemcpy(d_coords, coords, natom * 3 * sizeof(double), cudaMemcpyHostToDevice));
for(ik=ik_min; ik<=ik_max; ik++)
{
k = (double)ik * (double)k_step;
sk_sq_array = (double *)calloc(i_phi_theta, sizeof(double));
sk_mod_array = (double *)calloc(i_phi_theta, sizeof(double));
for(iphi=0; iphi < iphi_max; iphi++)
{
phi = M_PI * 2 * (double)iphi / (double)iphi_max;
cos_phi = (double)cos(phi);
sin_phi = (double)sin(phi);
for(itheta=0; itheta < itheta_max; itheta++)
{
theta = M_PI * ( (double)itheta / (double)itheta_max - 0.5);
k_cos_theta = k * (double)cos(theta);
k_x = k_cos_theta * cos_phi;
k_y = k_cos_theta * sin_phi;
k_z = k * (double)sin(theta);
CHECK(cudaMalloc((void **) &d_sk_re_array, natom * sizeof(double)));
CHECK(cudaMalloc((void **) &d_sk_im_array, natom * sizeof(double)));
CHECK(cudaDeviceSynchronize());
diff_atom_loop<<<natom/BS + 1, BS>>>(d_coords, natom, k_x, k_y, k_z, d_sk_re_array, d_sk_im_array);
sk_re_list = (double *)calloc(natom/BS + 1, sizeof(double));
sk_im_list = (double *)calloc(natom/BS + 1, sizeof(double));
CHECK(cudaMalloc((void **) &d_sk_re_list, (natom/BS + 1) * sizeof(double)));
CHECK(cudaMalloc((void **) &d_sk_im_list, (natom/BS + 1) * sizeof(double)));
CHECK(cudaDeviceSynchronize());
size_t shm_size = BS * sizeof(double);
reduce6<(unsigned int)BS><<<natom/BS+1, BS, shm_size>>>(d_sk_re_array, d_sk_re_list, natom);
reduce6<(unsigned int)BS><<<natom/BS+1, BS, shm_size>>>(d_sk_im_array, d_sk_im_list, natom);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(sk_re_list, d_sk_re_list, (natom/BS + 1) * sizeof(double), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(sk_im_list, d_sk_im_list, (natom/BS + 1) * sizeof(double), cudaMemcpyDeviceToHost));
CHECK(cudaDeviceSynchronize());
sk_re = 0;
sk_im = 0;
for(i=0;i<=(natom/BS);i++)
{
sk_re = sk_re + sk_re_list[i];
sk_im = sk_im + sk_im_list[i];
}
CHECK(cudaFree(d_sk_re_array));
CHECK(cudaFree(d_sk_im_array));
CHECK(cudaFree(d_sk_re_list));
CHECK(cudaFree(d_sk_im_list));
free(sk_re_list);
free(sk_im_list);
sk_sq = ( sk_re * sk_re + sk_im * sk_im ) / (double)natom / (double)natom;
sk_mod = sqrt( sk_sq );
sk_sq_array[iphi * itheta_max + itheta] = sk_sq;
sk_mod_array[iphi * itheta_max + itheta] = sk_mod;
}
}
sk_sq_anisotropy = (double *)calloc(i_phi_theta, sizeof(double));
sk_mod_anisotropy = (double *)calloc(i_phi_theta, sizeof(double));
calculateAnisotropy(sk_sq_array, i_phi_theta, &sk_sq_ave, sk_sq_anisotropy);
calculateAnisotropy(sk_mod_array, i_phi_theta, &sk_mod_ave, sk_mod_anisotropy);
//printf("#k sk_mod_avg sk_mod_max sk_mod_top1 sk_mod_top5 sk_mod_top10 sk_sq_avg sk_sq_max sk_sq_top1 sk_sq_top5 sk_sq_top10\n");
if ((i1 = i_phi_theta / 100 - 1) < 0) i1 = 0;
if ((i5 = i_phi_theta / 20 - 1) < 0) i5 = 0;
if ((i10 = i_phi_theta / 10 - 1) < 0) i10 = 0;
printf("%f %f %f %f %f %f %f %f %f %f %f\n", k, sk_mod_ave, sk_mod_anisotropy[0], sk_mod_anisotropy[i1], sk_mod_anisotropy[i5], sk_mod_anisotropy[i10], sk_sq_ave, sk_sq_anisotropy[0], sk_sq_anisotropy[i1], sk_sq_anisotropy[i5], sk_sq_anisotropy[i10]);
free(sk_sq_anisotropy);
free(sk_mod_anisotropy);
free(sk_sq_array);
free(sk_mod_array);
}
CHECK(cudaFree(d_coords));
}
int compare_doubles (const void *a, const void *b)
{
const double *da = (const double *) a;
const double *db = (const double *) b;
return (*da > *db) - (*da < *db);
}
__global__ void diff_atom_loop ( double *d_coords, int natom, double k_x, double k_y, double k_z, double *d_sk_re_array, double *d_sk_im_array)
{
double power;
int ib;
double sk_re, sk_im;
double x_a, y_a, z_a;
double x_b, y_b, z_b;
unsigned int ia = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
while (ia < natom)
{
sk_re = 0;
sk_im = 0;
x_a = d_coords[ia * 3];
y_a = d_coords[ia * 3 + 1];
z_a = d_coords[ia * 3 + 2];
for(ib = 0; ib < ia; ib++)
{
x_b = d_coords[ib * 3];
y_b = d_coords[ib * 3 + 1];
z_b = d_coords[ib * 3 + 2];
power = 2 * M_PI * ( k_x * (x_a - x_b) + k_y * (y_a - y_b) + k_z * (z_a - z_b));
sk_re += cos(power);
sk_im += sin(power);
}
d_sk_re_array[ia] = sk_re;
d_sk_im_array[ia] = sk_im;
ia += gridSize;
}
}
template <unsigned int blockSize> __global__ void reduce6 (double *g_idata, double *g_odata, unsigned int n)
{
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize + tid;
unsigned int gridSize = blockSize * gridDim.x;
sdata[tid] = 0;
while (i < n)
{
sdata[tid] += g_idata[i];
i += gridSize;
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
sdata[tid] += sdata[tid + 256];
} __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sdata[tid] += sdata[tid + 128];
} __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sdata[tid] += sdata[tid + 64];
} __syncthreads();
}
if (tid < 32)
{
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
__syncthreads();
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
__syncthreads();
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
__syncthreads();
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
__syncthreads();
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
__syncthreads();
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
void calculateAnisotropy(double *sk_array, int i_phi_theta, double *sk_ave, double *anisotropy)
{
int i;
double sk_sum = 0;
qsort(sk_array, i_phi_theta, sizeof(double), compare_doubles);
for(i = 1; i <= i_phi_theta; i++)
{
sk_sum += sk_array[i_phi_theta - i];
anisotropy[i - 1] = sk_sum / i;
}
*sk_ave = sk_sum / i_phi_theta;
for(i = 0; i < i_phi_theta; i++)
{
anisotropy[i] = anisotropy[i] / *sk_ave;
}
}
|
12,287 | //
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include "AddVector.cuh"
//#include "box2d.h"
//#include <stdio.h>
//#include <iostream>
//
//const double MapWidth = 2;
//const double MapHeight = 3;
//
////typedef float float32;
//
//using namespace std;
//
//
//// Helper function for using CUDA to add vectors in parallel.
////cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
////{
//// int *dev_a = 0;
//// int *dev_b = 0;
//// int *dev_c = 0;
//// cudaError_t cudaStatus;
////
//// // Choose which GPU to run on, change this on a multi-GPU system.
//// cudaStatus = cudaSetDevice(0);
////
//// // Allocate GPU buffers for three vectors (two input, one output) .
//// cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
//// cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
//// cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
//// // Copy input vectors from host memory to GPU buffers.
//// cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
//// cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
//// // Launch a kernel on the GPU with one thread for each element.
//// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
////
//// // cudaDeviceSynchronize waits for the kernel to finish, and returns
//// // any errors encountered during the launch.
//// cudaStatus = cudaDeviceSynchronize();
//// // Copy output vector from GPU buffer to host memory.
//// cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
////
//// cudaFree(dev_c);
//// cudaFree(dev_a);
//// cudaFree(dev_b);
////
//// return cudaStatus;
////}
//
////__global__ void addKernel(int *c, int *a, int *b)
////{
//// int i = threadIdx.x;
//// c[i] = a[i] + b[i];
////}
//
//int main()
//{
// /*const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);*/
//
// int c = MapWidth * MapHeight;
//
// float32 *re = new float32[c];
// float32 *ha = new float32[c];
// float32 *hb = new float32[c];
//
// for (int i = 0; i < c; i++)
// {
// ha[i] = 3.0;
// hb[i] = 4.0;
// }
//
// addVectorI(re, ha, hb, c);
// for (int i = 0; i < c; i++)
// {
// cout << re[i] << " ";
// }
//
// system("pause");
// return 0;
//}
//
|
12,288 | #include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
#include <iostream>
#include <math.h>
#define NX 3200000
#define NY 4
//const int NX = 4;
//const int NY = 4;
const int DEFAULT_FFT_TRIALS = 1000;
const int DEFAULT_META_TRIALS = 10;
__global__ void bp_soa_to_aos(cufftComplex *d_A, cufftComplex *d_B,
int elem_count, int fid_count, int c_sz) {
int real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x);
int dst_base = 0;
int src_base0 = 0;
int src_base1 = elem_count;
int src_base2 = elem_count*2;
int src_base3 = elem_count*3;
int loop_term = (elem_count*fid_count)/c_sz;
int inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
int lt = c_sz / fid_count;
for (int t_id = real_tid; t_id < loop_term; t_id += inc){
dst_base = t_id*c_sz;
#pragma unroll
for (int i = 0; i < lt; ++i){
d_B[dst_base + 0 + i*fid_count].x = d_A[src_base0 + t_id*lt + i].x;
d_B[dst_base + 0 + i*fid_count].y = d_A[src_base0 + t_id*lt + i].y;
d_B[dst_base + 1 + i*fid_count].x = d_A[src_base1 + t_id*lt + i].x;
d_B[dst_base + 1 + i*fid_count].y = d_A[src_base1 + t_id*lt + i].y;
d_B[dst_base + 2 + i*fid_count].x = d_A[src_base2 + t_id*lt + i].x;
d_B[dst_base + 2 + i*fid_count].y = d_A[src_base2 + t_id*lt + i].y;
d_B[dst_base + 3 + i*fid_count].x = d_A[src_base3 + t_id*lt + i].x;
d_B[dst_base + 3 + i*fid_count].y = d_A[src_base3 + t_id*lt + i].y;
}
}
}
__global__ void bp_aos_to_soa(cufftComplex *d_A, cufftComplex *d_B,
int elem_count, int fid_count, int c_sz) {
int real_tid = ((blockIdx.x + blockIdx.y*gridDim.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x);
int dst_base0 = 0;
int dst_base1 = elem_count;
int dst_base2 = elem_count*2;
int dst_base3 = elem_count*3;
int lt = c_sz / fid_count;
int loop_term = elem_count/lt;
int inc = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
int src_base = 0;
for (int t_id = real_tid; t_id < loop_term; t_id += inc){
src_base = t_id*c_sz;
#pragma unroll
for (int i = 0; i < lt; ++i){
d_B[dst_base0 + t_id*lt + i].x = d_A[src_base + 0 + i*fid_count].x;
d_B[dst_base0 + t_id*lt + i].y = d_A[src_base + 0 + i*fid_count].y;
d_B[dst_base1 + t_id*lt + i].x = d_A[src_base + 1 + i*fid_count].x;
d_B[dst_base1 + t_id*lt + i].y = d_A[src_base + 1 + i*fid_count].y;
d_B[dst_base2 + t_id*lt + i].x = d_A[src_base + 2 + i*fid_count].x;
d_B[dst_base2 + t_id*lt + i].y = d_A[src_base + 2 + i*fid_count].y;
d_B[dst_base3 + t_id*lt + i].x = d_A[src_base + 3 + i*fid_count].x;
d_B[dst_base3 + t_id*lt + i].y = d_A[src_base + 3 + i*fid_count].y;
}
}
}
int main(int argc, char **argv) {
int fft_trials = DEFAULT_FFT_TRIALS;
int meta_trials = DEFAULT_META_TRIALS;
printf("[INFO] META trials: %d\n", meta_trials);
printf("[INFO] FFT trials: %d\n", fft_trials);
int nx = NX;
int ny = NY;
printf("[INFO] NX Length: %d\n", nx);
printf("[INFO] NY Length: %d\n", ny);
cufftComplex *h_M, *h_M2, *h_M3;
cudaMallocHost((void **) &h_M, sizeof(cufftComplex) * NX * NY);
cudaMallocHost((void **) &h_M2, sizeof(cufftComplex) * NX * NY);
cudaMallocHost((void **) &h_M3, sizeof(cufftComplex) * NX * NY);
cufftComplex *d_M, *d_M1, *d_M2, *d_M3, *d_M4, *d_M5;
cudaMalloc((void **) &d_M, sizeof(cufftComplex) * NX * NY);
cudaMalloc((void **) &d_M1, sizeof(cufftComplex) * NX * NY);
cudaMalloc((void **) &d_M2, sizeof(cufftComplex) * NX * NY);
cudaMalloc((void **) &d_M3, sizeof(cufftComplex) * NX * NY);
cudaMalloc((void **) &d_M4, sizeof(cufftComplex) * NX * NY);
cudaMalloc((void **) &d_M5, sizeof(cufftComplex) * NX * NY);
/*
* generate random signal as original signal
*/
srand(0); // initialize random seed
for (int i = 0; i < NX*NY; i++) {
h_M[i].x = (int)((float)rand()) % 10;
h_M[i].y = 0.0;
}
cudaMemcpy(d_M, h_M, sizeof(cufftComplex) * NX * NY, cudaMemcpyHostToDevice);
cudaMemcpy(d_M1, h_M, sizeof(cufftComplex) * NX * NY, cudaMemcpyHostToDevice);
//cudaMemcpy(d_M3, h_M, sizeof(cufftComplex) * NX * NY, cudaMemcpyHostToDevice);
cufftHandle fft_plan2D;
cufftPlan2d(&fft_plan2D, nx, ny, CUFFT_C2C);
cufftHandle fft_plan1D1D;
int *n = new int[1];
n[0] = nx;
int inembed_pre = nx;
int *inembed = &inembed_pre;
int istride = ny;
int idist = 1;
cufftPlanMany(&fft_plan1D1D, 1, n, inembed, istride, idist, inembed, istride, idist, CUFFT_C2C, ny);
cufftHandle fft_plan1D1DR;
int *nR = new int[1];
nR[0] = ny;
int inembed_preR = ny;
int *inembedR = &inembed_preR;
int istrideR = 1;
int idistR = ny;
cufftPlanMany(&fft_plan1D1DR, 1, nR, inembedR, istrideR, idistR, inembedR, istrideR, idistR, CUFFT_C2C, nx);
cufftHandle fft_plan1D1DR2;
int *nR2 = new int[1];
nR2[0] = nx;
int inembed_preR2 = nx;
int *inembedR2 = &inembed_preR2;
int istrideR2 = 1;
int idistR2 = nx;
cufftPlanMany(&fft_plan1D1DR2, 1, nR2, inembedR2, istrideR2, idistR2, inembedR2, istrideR2, idistR2, CUFFT_C2C, ny);
//cufftPlan1d(&fft_plan1D1D, 128, CUFFT_C2C, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float sum_of_elapsed_times2D = 0.0;
float sum_of_elapsed_times1D1D = 0.0;
printf("[INFO] Run benchmark 1D1D\n");
for (int i = 0; i < meta_trials; i++) {
cudaEventRecord(start, 0);
for (int j = 0; j < fft_trials; j++) {
//2D version
// cufftExecC2C(fft_plan2D, d_M, d_M5, CUFFT_FORWARD);
//1D1D version
//cufftExecC2C(fft_plan1D1D, d_M, d_M3, CUFFT_FORWARD);
//cufftExecC2C(fft_plan1D1DR, d_M3, d_M3, CUFFT_FORWARD);
//1DT1D version
// cufftExecC2C(fft_plan1D1DR, d_M, d_M3, CUFFT_FORWARD);
//bp_aos_to_soa<<<2,32>>>((cufftComplex*)d_M3, (cufftComplex*)d_M, NX, NY, NY);
//bp_aos_to_soa<<<2,32>>>((cufftComplex*)d_M, (cufftComplex*)d_M3, NX, NY, NY);
//cudaDeviceSynchronize();
//cufftExecC2C(fft_plan1D1DR2, d_M, d_M3, CUFFT_FORWARD);
//cufftExecC2C(fft_plan1D1DR2, d_M3, d_M, CUFFT_FORWARD);
//cufftExecC2C(fft_plan1D1DR, d_M3, d_M, CUFFT_FORWARD);
// 1D on the columns
// cufftExecC2C(fft_plan1D1D, d_M, d_M5, CUFFT_FORWARD);
cufftExecC2C(fft_plan1D1D, d_M, d_M5, CUFFT_FORWARD);
// transpose and 1D on the "colms" which are now rows.
// bp_aos_to_soa<<<200,32>>>((cufftComplex*)d_M, (cufftComplex*)d_M2, NX, NY, NY);
// cufftExecC2C(fft_plan1D1DR2, d_M2, d_M5, CUFFT_FORWARD);
//transpose and 1D on the "colms" which are now rows, transpose back
bp_aos_to_soa<<<500,32>>>((cufftComplex*)d_M1, (cufftComplex*)d_M2, NX, NY, NY);
cufftExecC2C(fft_plan1D1DR2, d_M2, d_M3, CUFFT_FORWARD);
bp_soa_to_aos<<<500,32>>>((cufftComplex*)d_M3, (cufftComplex*)d_M4, NX, NY, NY);
// cufftExecC2C(fft_plan1D1DR, d_M, d_M, CUFFT_FORWARD);
// bp_aos_to_soa<<<500,32>>>((cufftComplex*)d_M, (cufftComplex*)d_M2, NX, NY, NY);
// cufftExecC2C(fft_plan1D1DR2, d_M2, d_M3, CUFFT_FORWARD);
// bp_soa_to_aos<<<500,32>>>((cufftComplex*)d_M3, (cufftComplex*)d_M5, NX, NY, NY);
// rows, transpose, other rows
// cufftExecC2C(fft_plan1D1DR, d_M, d_M2, CUFFT_FORWARD);
// bp_aos_to_soa<<<500,32>>>((cufftComplex*)d_M2, (cufftComplex*)d_M3, NX, NY, NY);
// cufftExecC2C(fft_plan1D1DR2, d_M3, d_M4, CUFFT_FORWARD);
// bp_soa_to_aos<<<500,32>>>((cufftComplex*)d_M4, (cufftComplex*)d_M5, NX, NY, NY);
//bp_soa_to_aos<<<1,32>>>((cufftComplex*)d_M3, (cufftComplex*)d_M, NX, NY, NX);
//cudaDeviceSynchronize();
//cufftExecC2C(fft_plan1D1D, d_M, d_M3, CUFFT_FORWARD);
//bp_aos_to_soa<<<2,32>>>((cufftComplex*)d_M3, (cufftComplex*)d_M, NX, NY, NY);
//cufftExecC2C(fft_plan1D1D2, d_M, d_M3, CUFFT_FORWARD);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed_time_ms;
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
float elapsed_time_sec = elapsed_time_ms / 1000.0;
sum_of_elapsed_times1D1D += elapsed_time_sec;
printf("%f sec\n", elapsed_time_sec);
}
//h_M3 is the 1D only result
cudaMemcpy(h_M3, d_M5, sizeof(cufftComplex) * NX * NY, cudaMemcpyDeviceToHost);
//h_M2 is the T 1D T version
cudaMemcpy(h_M2, d_M4, sizeof(cufftComplex) * NX * NY, cudaMemcpyDeviceToHost);
//cudaMemcpy(h_M3, d_M3, sizeof(cufftComplex) * NX * NY, cudaMemcpyDeviceToHost);
bool correct = true;
for (int i = 0; i < NX; ++i){
for (int j = 0; j < NY; ++j){
// std::cout << h_M3[i*NY +j].x << " ";
// std::cout << h_M2[i*NY +j].x << " ";
if (fabs(h_M3[i*NY +j].x - h_M2[i*NY +j].x) > 1e-1 || fabs(h_M3[i*NY +j].y - h_M2[i*NY +j].y) > 1e-1){
correct = false;
}
}
// std::cout << std::endl;
}
std::cout << "correct result? " << (correct ? "yes" : "no") << std::endl;
// for (int i = 0; i < NX; ++i){
// for (int j = 0; j < NY; ++j){
// std::cout << h_M3[i*NY +j].x << " ";
// }
// // std::cout << h_M3[i].x << " " << h_M3[i].y << std::endl;
// std::cout << std::endl;
// }
printf("[INFO] Finished!\n");
printf("[INFO] Average 2D: %lf sec\n", sum_of_elapsed_times2D / meta_trials);
printf("[INFO] Average 1D1D: %lf sec\n", sum_of_elapsed_times1D1D / meta_trials);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
12,289 | //==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <cuda.h>
#include <iostream>
#include <vector>
#define N 16
//# kernel code to perform VectorAdd on GPU
__global__ void VectorAddKernel(float* A, float* B, float* C)
{
C[threadIdx.x] = A[threadIdx.x] + B[threadIdx.x];
}
int main()
{
//# Initialize vectors on host
float A[N] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
float B[N] = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
float C[N] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
//# Allocate memory on device
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, N*sizeof(float));
cudaMalloc(&d_B, N*sizeof(float));
cudaMalloc(&d_C, N*sizeof(float));
//# copy vector data from host to device
cudaMemcpy(d_A, A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, N*sizeof(float), cudaMemcpyHostToDevice);
//# sumbit task to compute VectorAdd on device
VectorAddKernel<<<1, N>>>(d_A, d_B, d_C);
//# copy result of vector data from device to host
cudaMemcpy(C, d_C, N*sizeof(float), cudaMemcpyDeviceToHost);
//# print result on host
for (int i = 0; i < N; i++) std::cout<< C[i] << " ";
std::cout << "\n";
//# free allocation on device
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} |
12,290 | #include <stdio.h>
#include <cuda.h>
/* Read a square matrix (N*N) and count
* the no. of prime numbers on its border.
* Use 2,2 grid and a 2D block.
*/
__device__ bool checkPrime(int num){
bool isPrime = 1;
// Check if num is a prime number
for(int i = 2; i <= sqrtf(num); i++){
if(num % i == 0){
isPrime = 0;
break;
}
}
if (num <= 1)
isPrime = 0;
return isPrime;
}
// Each thread determines if it's on a prime valued border element
// If it is, it increments primeCount atomically
__global__ void borderPrimeCount(int *mat, int *primeCount, int N){
int _blockId = (gridDim.x * blockIdx.y ) + blockIdx.x;
int globalTid = (_blockId * blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
int ele = mat[globalTid];
/* Condition 1 --> Top or Bottom border
* Condition 2 --> Left border
* Condition 3 --> Right border
*/
if(globalTid < N || globalTid >= N * (N-1)
|| globalTid % N == 0
|| globalTid % N == (N-1)){
if(checkPrime(ele))
atomicAdd(primeCount, 1);
}
}
int main()
{
const int N = 6;
// host copy of matrix
int mat[N * N] = {
0, 1, 2, 3, 4, 5,
1, 2, 3, 4, 5, 6,
2, 3, 4, 5, 6, 7,
3, 4, 5, 6, 7, 8,
4, 5, 6, 7, 8, 9,
5, 6, 7, 8, 9, 10
};
int primeCount = 0;
// device copies of variables
int *d_mat, *d_primeCount;
int sizeMat = N * N * sizeof(int);
// Allocate space for device copies of a, b, sum
cudaMalloc((void **)&d_mat, sizeMat);
cudaMalloc((void **)&d_primeCount, sizeof(int));
// Copy inputs to device
cudaMemcpy(d_mat, mat, sizeMat, cudaMemcpyHostToDevice);
cudaMemcpy(d_primeCount, &primeCount, sizeof(int), cudaMemcpyHostToDevice);
cudaError err;
// Use a (2,2) grid and 2D block:
dim3 dimGrid(2,2,1);
dim3 dimBlock(N/2, N/2, 1);
// Launch kernel on GPU
borderPrimeCount<<<dimGrid, dimBlock>>>(d_mat, d_primeCount, N);
err = cudaMemcpy(&primeCount, d_primeCount, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
printf("CUDA error copying to Host: %s\n", cudaGetErrorString(err));
printf("Found %d primes on border elements\n", primeCount);
// Cleanup
cudaFree(d_mat);
cudaFree(d_primeCount);
return 0;
}
|
12,291 | extern "C" {
typedef struct {
int e0;
char* e1;
} struct_Buffer_6987;
typedef struct {
struct_Buffer_6987 e0;
int e1;
int e2;
} struct_filter_6986;
typedef struct {
struct_Buffer_6987 e0;
struct_Buffer_6987 e1;
int e2;
int e3;
} struct_image_6992;
__device__ inline int threadIdx_x() { return threadIdx.x; }
__device__ inline int threadIdx_y() { return threadIdx.y; }
__device__ inline int threadIdx_z() { return threadIdx.z; }
__device__ inline int blockIdx_x() { return blockIdx.x; }
__device__ inline int blockIdx_y() { return blockIdx.y; }
__device__ inline int blockIdx_z() { return blockIdx.z; }
__device__ inline int blockDim_x() { return blockDim.x; }
__device__ inline int blockDim_y() { return blockDim.y; }
__device__ inline int blockDim_z() { return blockDim.z; }
__device__ inline int gridDim_x() { return gridDim.x; }
__device__ inline int gridDim_y() { return gridDim.y; }
__device__ inline int gridDim_z() { return gridDim.z; }
__global__ void lambda_28285(struct_filter_6986, double*, struct_image_6992, double*, struct_Buffer_6987);
__global__ void lambda_28609(struct_Buffer_6987, struct_image_6992, struct_filter_6986, double*, double*, double*);
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_28285(struct_filter_6986 _28288_34329, double* _28289_34330, struct_image_6992 _28290_34331, double* _28291_34332, struct_Buffer_6987 _28292_34333) {
int bdimx_34339;
int pbdimx_34339;
int bdimy_34345;
int pbdimy_34345;
int bidx_34351;
int pbidx_34351;
int bidy_34357;
int pbidy_34357;
int tidx_34363;
int ptidx_34363;
int tidy_34369;
int ptidy_34369;
double* reserve_shared_34377;
double* preserve_shared_34377;
double* reserve_shared_34385;
double* preserve_shared_34385;
int _34396;
int p_34396;
int _34452;
int p_34452;
int _34506;
int p_34506;
int _34531;
int p_34531;
int _34580;
int p_34580;
double sum_34582;
double psum_34582;
int _34536;
int p_34536;
int _34457;
int p_34457;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimx_34339 = blockDim_x();
pbdimx_34339 = bdimx_34339;
l34337: ;
bdimx_34339 = pbdimx_34339;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimy_34345 = blockDim_y();
pbdimy_34345 = bdimy_34345;
l34343: ;
bdimy_34345 = pbdimy_34345;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidx_34351 = blockIdx_x();
pbidx_34351 = bidx_34351;
l34349: ;
bidx_34351 = pbidx_34351;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidy_34357 = blockIdx_y();
pbidy_34357 = bidy_34357;
l34355: ;
bidy_34357 = pbidy_34357;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidx_34363 = threadIdx_x();
ptidx_34363 = tidx_34363;
l34361: ;
tidx_34363 = ptidx_34363;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidy_34369 = threadIdx_y();
ptidy_34369 = tidy_34369;
l34367: ;
tidy_34369 = ptidy_34369;
#line 215 "impala/gpu_device_shm.impala"
__shared__ double reserver_reserve_shared_34377[938];
preserve_shared_34377 = reserver_reserve_shared_34377;
l34375: ;
reserve_shared_34377 = preserve_shared_34377;
#line 223 "impala/gpu_device_shm.impala"
__shared__ double reserver_reserve_shared_34385[49];
preserve_shared_34385 = reserver_reserve_shared_34385;
l34383: ;
reserve_shared_34385 = preserve_shared_34385;
#line 203 "impala/gpu_device_shm.impala"
int _34419;
_34419 = bidy_34357 * bdimy_34345;
#line 201 "impala/gpu_device_shm.impala"
int _34410;
_34410 = bidx_34351 * bdimx_34339;
#line 201 "impala/gpu_device_shm.impala"
int gid_x_34411;
gid_x_34411 = _34410 + tidx_34363;
#line 11 "impala/main.impala"
int _34425;
_34425 = _28290_34331.e3;
#line 11 "impala/main.impala"
int _34416;
_34416 = _28290_34331.e2;
#line 4 "impala/gaussian.impala"
int _34399;
_34399 = _28288_34329.e1;
#line 203 "impala/gpu_device_shm.impala"
int gid_y_34420;
gid_y_34420 = _34419 + tidy_34369;
#line 207 "impala/gpu_device_shm.impala"
int _34389;
_34389 = _28288_34329.e2;
#line 4 "impala/gaussian.impala"
int h_anchor_34400;
h_anchor_34400 = _34399 / 2;
#line 207 "impala/gpu_device_shm.impala"
int extend_height_34390;
extend_height_34390 = _34389 / 2;
#line 209 "impala/gpu_device_shm.impala"
int _34401;
_34401 = 2 * h_anchor_34400;
#line 211 "impala/gpu_device_shm.impala"
int _34391;
_34391 = 2 * extend_height_34390;
#line 209 "impala/gpu_device_shm.impala"
int shm_dimx_34402;
shm_dimx_34402 = bdimx_34339 + _34401;
#line 211 "impala/gpu_device_shm.impala"
int shm_dimy_34392;
shm_dimy_34392 = bdimy_34345 + _34391;
#line 52 "impala/gpu_device_shm.impala"
bool _34393;
_34393 = 0 < shm_dimy_34392;
#line 52 "impala/gpu_device_shm.impala"
if (_34393) goto l34394; else goto l34629;
l34629: ;
#line 253 "impala/gpu_device_shm.impala"
goto l34501;
l34394: ;
#line 241 "impala/gpu_device_shm.impala"
int _34412;
_34412 = gid_x_34411 - h_anchor_34400;
#line 239 "impala/gpu_device_shm.impala"
bool _34408;
_34408 = tidy_34369 < shm_dimy_34392;
#line 243 "impala/gpu_device_shm.impala"
int _34421;
_34421 = gid_y_34420 - extend_height_34390;
#line 246 "impala/gpu_device_shm.impala"
bool _34422;
_34422 = 0 <= _34421;
#line 248 "impala/gpu_device_shm.impala"
int _34437;
_34437 = tidy_34369 * shm_dimx_34402;
#line 246 "impala/gpu_device_shm.impala"
bool _34426;
_34426 = _34421 < _34425;
#line 249 "impala/gpu_device_shm.impala"
int _34431;
_34431 = _34421 * _34416;
#line 52 "impala/gpu_device_shm.impala"
p_34396 = 0;
goto l34395;
l34395: ;
_34396 = p_34396;
#line 52 "impala/gpu_device_shm.impala"
bool _34403;
_34403 = _34396 < shm_dimx_34402;
#line 52 "impala/gpu_device_shm.impala"
if (_34403) goto l34404; else goto l34450;
l34450: ;
#line 52 "impala/gpu_device_shm.impala"
p_34452 = bdimy_34345;
goto l34451;
l34451: ;
_34452 = p_34452;
#line 52 "impala/gpu_device_shm.impala"
bool _34454;
_34454 = _34452 < shm_dimy_34392;
#line 52 "impala/gpu_device_shm.impala"
if (_34454) goto l34455; else goto l34500;
l34500: ;
#line 253 "impala/gpu_device_shm.impala"
goto l34501;
l34501: ;
#line 52 "impala/gpu_device_shm.impala"
bool _34503;
_34503 = 0 < _34389;
#line 52 "impala/gpu_device_shm.impala"
if (_34503) goto l34504; else goto l34628;
l34628: ;
#line 271 "impala/gpu_device_shm.impala"
goto l34563;
l34504: ;
#line 265 "impala/gpu_device_shm.impala"
bool _34513;
_34513 = tidy_34369 < _34389;
#line 267 "impala/gpu_device_shm.impala"
int _34518;
_34518 = tidy_34369 * _34399;
#line 52 "impala/gpu_device_shm.impala"
p_34506 = 0;
goto l34505;
l34505: ;
_34506 = p_34506;
#line 52 "impala/gpu_device_shm.impala"
bool _34508;
_34508 = _34506 < _34399;
#line 52 "impala/gpu_device_shm.impala"
if (_34508) goto l34509; else goto l34529;
l34529: ;
#line 52 "impala/gpu_device_shm.impala"
p_34531 = bdimy_34345;
goto l34530;
l34530: ;
_34531 = p_34531;
#line 52 "impala/gpu_device_shm.impala"
bool _34533;
_34533 = _34531 < _34389;
#line 52 "impala/gpu_device_shm.impala"
if (_34533) goto l34534; else goto l34562;
l34562: ;
#line 271 "impala/gpu_device_shm.impala"
goto l34563;
l34563: ;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
__syncthreads();
l34568: ;
#line 277 "impala/gpu_device_shm.impala"
bool _34570;
_34570 = gid_x_34411 < _34416;
#line 277 "impala/gpu_device_shm.impala"
if (_34570) goto l34571; else goto l34627;
l34627: ;
#line 280 "impala/gpu_device_shm.impala"
goto l34626;
l34571: ;
#line 277 "impala/gpu_device_shm.impala"
bool _34572;
_34572 = gid_y_34420 < _34425;
#line 277 "impala/gpu_device_shm.impala"
if (_34572) goto l34573; else goto l34625;
l34625: ;
#line 280 "impala/gpu_device_shm.impala"
goto l34626;
l34626: ;
return ;
l34573: ;
#line 78 "impala/gpu_device_shm.impala"
char* _34607;
_34607 = _28292_34333.e1;
#line 78 "impala/gpu_device_shm.impala"
double* _34608;
union { double* dst; char* src; } u_34608;
u_34608.src = _34607;
_34608 = u_34608.dst;
#line 217 "impala/gpu_device_shm.impala"
int _34597;
_34597 = h_anchor_34400 - _34410;
#line 78 "impala/gpu_device_shm.impala"
int _34609;
_34609 = gid_y_34420 * _34416;
#line 218 "impala/gpu_device_shm.impala"
int _34592;
_34592 = extend_height_34390 - _34419;
#line 86 "impala/gpu_device_shm.impala"
int _34593;
_34593 = gid_y_34420 + _34592;
#line 78 "impala/gpu_device_shm.impala"
int _34610;
_34610 = _34609 + gid_x_34411;
#line 17 "impala/gaussian.impala"
bool _34574;
_34574 = h_anchor_34400 <= gid_x_34411;
#line 78 "impala/gpu_device_shm.impala"
double* _34611;
_34611 = _34608 + _34610;
#line 86 "impala/gpu_device_shm.impala"
int _34594;
_34594 = _34593 * shm_dimx_34402;
#line 17 "impala/gaussian.impala"
if (_34574) goto l34575; else goto l34624;
l34624: ;
#line 27 "impala/gaussian.impala"
goto l34616;
l34575: ;
#line 17 "impala/gaussian.impala"
int _34576;
_34576 = _34416 - h_anchor_34400;
#line 17 "impala/gaussian.impala"
bool _34577;
_34577 = gid_x_34411 < _34576;
#line 17 "impala/gaussian.impala"
if (_34577) goto l34578; else goto l34615;
l34615: ;
#line 27 "impala/gaussian.impala"
goto l34616;
l34616: ;
#line 86 "impala/gpu_device_shm.impala"
int _34617;
_34617 = _34594 + gid_x_34411;
#line 86 "impala/gpu_device_shm.impala"
int _34618;
_34618 = _34617 + _34597;
#line 86 "impala/gpu_device_shm.impala"
double* _34619;
_34619 = reserve_shared_34377 + _34618;
#line 86 "impala/gpu_device_shm.impala"
double _34620;
_34620 = *_34619;
#line 86 "impala/gpu_device_shm.impala"
double _34622;
_34622 = _34620;
#line 78 "impala/gpu_device_shm.impala"
*_34611 = _34622;
return ;
l34578: ;
#line 19 "impala/gaussian.impala"
int _34584;
_34584 = 1 + h_anchor_34400;
#line 19 "impala/gaussian.impala"
int _34613;
_34613 = 0 - h_anchor_34400;
#line 27 "impala/gpu_device_shm.impala"
p_34580 = _34613;
psum_34582 = 0.000000e+00;
goto l34579;
l34579: ;
_34580 = p_34580;
sum_34582 = psum_34582;
#line 27 "impala/gpu_device_shm.impala"
bool _34585;
_34585 = _34580 < _34584;
#line 27 "impala/gpu_device_shm.impala"
if (_34585) goto l34586; else goto l34606;
l34606: ;
#line 78 "impala/gpu_device_shm.impala"
*_34611 = sum_34582;
return ;
l34586: ;
#line 31 "impala/gpu_device_shm.impala"
int _34587;
_34587 = 1 + _34580;
#line 21 "impala/gaussian.impala"
int _34595;
_34595 = gid_x_34411 + _34580;
#line 21 "impala/gaussian.impala"
int _34588;
_34588 = _34580 + h_anchor_34400;
#line 86 "impala/gpu_device_shm.impala"
int _34596;
_34596 = _34594 + _34595;
#line 90 "impala/gpu_device_shm.impala"
double* i_34589;
i_34589 = reserve_shared_34385 + _34588;
#line 86 "impala/gpu_device_shm.impala"
int _34598;
_34598 = _34596 + _34597;
#line 91 "impala/gpu_device_shm.impala"
double _34590;
_34590 = *i_34589;
#line 86 "impala/gpu_device_shm.impala"
double* _34599;
_34599 = reserve_shared_34377 + _34598;
#line 91 "impala/gpu_device_shm.impala"
double _34602;
_34602 = _34590;
#line 86 "impala/gpu_device_shm.impala"
double _34600;
_34600 = *_34599;
#line 86 "impala/gpu_device_shm.impala"
double _34603;
_34603 = _34600;
#line 21 "impala/gaussian.impala"
double _34604;
_34604 = _34602 * _34603;
#line 21 "impala/gaussian.impala"
double _34605;
_34605 = sum_34582 + _34604;
#line 27 "impala/gpu_device_shm.impala"
p_34580 = _34587;
psum_34582 = _34605;
goto l34579;
l34534: ;
#line 262 "impala/gpu_device_shm.impala"
int shm_index_y_34543;
shm_index_y_34543 = tidy_34369 + _34531;
#line 265 "impala/gpu_device_shm.impala"
bool _34544;
_34544 = shm_index_y_34543 < _34389;
#line 267 "impala/gpu_device_shm.impala"
int _34549;
_34549 = shm_index_y_34543 * _34399;
#line 52 "impala/gpu_device_shm.impala"
p_34536 = 0;
goto l34535;
l34535: ;
_34536 = p_34536;
#line 52 "impala/gpu_device_shm.impala"
bool _34538;
_34538 = _34536 < _34399;
#line 52 "impala/gpu_device_shm.impala"
if (_34538) goto l34539; else goto l34560;
l34560: ;
#line 56 "impala/gpu_device_shm.impala"
int _34561;
_34561 = _34531 + bdimy_34345;
#line 52 "impala/gpu_device_shm.impala"
p_34531 = _34561;
goto l34530;
l34539: ;
#line 260 "impala/gpu_device_shm.impala"
int shm_index_x_34540;
shm_index_x_34540 = tidx_34363 + _34536;
#line 265 "impala/gpu_device_shm.impala"
bool _34541;
_34541 = shm_index_x_34540 < _34399;
#line 265 "impala/gpu_device_shm.impala"
if (_34541) goto l34542; else goto l34559;
l34559: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34558;
l34542: ;
#line 265 "impala/gpu_device_shm.impala"
if (_34544) goto l34545; else goto l34557;
l34557: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34558;
l34558: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34546;
l34545: ;
#line 267 "impala/gpu_device_shm.impala"
int _34550;
_34550 = _34549 + shm_index_x_34540;
#line 267 "impala/gpu_device_shm.impala"
double* _34551;
_34551 = _28291_34332 + _34550;
#line 267 "impala/gpu_device_shm.impala"
double* _34554;
_34554 = reserve_shared_34385 + _34550;
#line 268 "impala/gpu_device_shm.impala"
double _34552;
_34552 = *_34551;
#line 268 "impala/gpu_device_shm.impala"
double _34555;
_34555 = _34552;
#line 267 "impala/gpu_device_shm.impala"
*_34554 = _34555;
#line 54 "impala/gpu_device_shm.impala"
goto l34546;
l34546: ;
#line 56 "impala/gpu_device_shm.impala"
int _34548;
_34548 = _34536 + bdimx_34339;
#line 52 "impala/gpu_device_shm.impala"
p_34536 = _34548;
goto l34535;
l34509: ;
#line 260 "impala/gpu_device_shm.impala"
int shm_index_x_34510;
shm_index_x_34510 = tidx_34363 + _34506;
#line 265 "impala/gpu_device_shm.impala"
bool _34511;
_34511 = shm_index_x_34510 < _34399;
#line 265 "impala/gpu_device_shm.impala"
if (_34511) goto l34512; else goto l34528;
l34528: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34527;
l34512: ;
#line 265 "impala/gpu_device_shm.impala"
if (_34513) goto l34514; else goto l34526;
l34526: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34527;
l34527: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34515;
l34514: ;
#line 267 "impala/gpu_device_shm.impala"
int _34519;
_34519 = _34518 + shm_index_x_34510;
#line 267 "impala/gpu_device_shm.impala"
double* _34523;
_34523 = reserve_shared_34385 + _34519;
#line 267 "impala/gpu_device_shm.impala"
double* _34520;
_34520 = _28291_34332 + _34519;
#line 268 "impala/gpu_device_shm.impala"
double _34521;
_34521 = *_34520;
#line 268 "impala/gpu_device_shm.impala"
double _34524;
_34524 = _34521;
#line 267 "impala/gpu_device_shm.impala"
*_34523 = _34524;
#line 54 "impala/gpu_device_shm.impala"
goto l34515;
l34515: ;
#line 56 "impala/gpu_device_shm.impala"
int _34517;
_34517 = _34506 + bdimx_34339;
#line 52 "impala/gpu_device_shm.impala"
p_34506 = _34517;
goto l34505;
l34455: ;
#line 243 "impala/gpu_device_shm.impala"
int img_index_y_34472;
img_index_y_34472 = _34421 + _34452;
#line 249 "impala/gpu_device_shm.impala"
int _34480;
_34480 = img_index_y_34472 * _34416;
#line 246 "impala/gpu_device_shm.impala"
bool _34475;
_34475 = img_index_y_34472 < _34425;
#line 236 "impala/gpu_device_shm.impala"
int shm_index_y_34464;
shm_index_y_34464 = tidy_34369 + _34452;
#line 246 "impala/gpu_device_shm.impala"
bool _34473;
_34473 = 0 <= img_index_y_34472;
#line 239 "impala/gpu_device_shm.impala"
bool _34465;
_34465 = shm_index_y_34464 < shm_dimy_34392;
#line 248 "impala/gpu_device_shm.impala"
int _34485;
_34485 = shm_index_y_34464 * shm_dimx_34402;
#line 52 "impala/gpu_device_shm.impala"
p_34457 = 0;
goto l34456;
l34456: ;
_34457 = p_34457;
#line 52 "impala/gpu_device_shm.impala"
bool _34459;
_34459 = _34457 < shm_dimx_34402;
#line 52 "impala/gpu_device_shm.impala"
if (_34459) goto l34460; else goto l34498;
l34498: ;
#line 56 "impala/gpu_device_shm.impala"
int _34499;
_34499 = _34452 + bdimy_34345;
#line 52 "impala/gpu_device_shm.impala"
p_34452 = _34499;
goto l34451;
l34460: ;
#line 234 "impala/gpu_device_shm.impala"
int shm_index_x_34461;
shm_index_x_34461 = tidx_34363 + _34457;
#line 239 "impala/gpu_device_shm.impala"
bool _34462;
_34462 = shm_index_x_34461 < shm_dimx_34402;
#line 239 "impala/gpu_device_shm.impala"
if (_34462) goto l34463; else goto l34497;
l34497: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34496;
l34463: ;
#line 239 "impala/gpu_device_shm.impala"
if (_34465) goto l34466; else goto l34495;
l34495: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34496;
l34496: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34477;
l34466: ;
#line 241 "impala/gpu_device_shm.impala"
int img_index_x_34467;
img_index_x_34467 = _34412 + _34457;
#line 246 "impala/gpu_device_shm.impala"
bool _34468;
_34468 = 0 <= img_index_x_34467;
#line 246 "impala/gpu_device_shm.impala"
if (_34468) goto l34469; else goto l34494;
l34494: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34491;
l34469: ;
#line 246 "impala/gpu_device_shm.impala"
bool _34470;
_34470 = img_index_x_34467 < _34416;
#line 246 "impala/gpu_device_shm.impala"
if (_34470) goto l34471; else goto l34493;
l34493: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34491;
l34471: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34473) goto l34474; else goto l34492;
l34492: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34491;
l34474: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34475) goto l34476; else goto l34490;
l34490: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34491;
l34491: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34477;
l34476: ;
#line 249 "impala/gpu_device_shm.impala"
int _34481;
_34481 = _34480 + img_index_x_34467;
#line 248 "impala/gpu_device_shm.impala"
int _34486;
_34486 = _34485 + shm_index_x_34461;
#line 249 "impala/gpu_device_shm.impala"
double* _34482;
_34482 = _28289_34330 + _34481;
#line 248 "impala/gpu_device_shm.impala"
double* _34487;
_34487 = reserve_shared_34377 + _34486;
#line 249 "impala/gpu_device_shm.impala"
double _34483;
_34483 = *_34482;
#line 249 "impala/gpu_device_shm.impala"
double _34488;
_34488 = _34483;
#line 248 "impala/gpu_device_shm.impala"
*_34487 = _34488;
#line 54 "impala/gpu_device_shm.impala"
goto l34477;
l34477: ;
#line 56 "impala/gpu_device_shm.impala"
int _34479;
_34479 = _34457 + bdimx_34339;
#line 52 "impala/gpu_device_shm.impala"
p_34457 = _34479;
goto l34456;
l34404: ;
#line 234 "impala/gpu_device_shm.impala"
int shm_index_x_34405;
shm_index_x_34405 = tidx_34363 + _34396;
#line 239 "impala/gpu_device_shm.impala"
bool _34406;
_34406 = shm_index_x_34405 < shm_dimx_34402;
#line 239 "impala/gpu_device_shm.impala"
if (_34406) goto l34407; else goto l34449;
l34449: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34448;
l34407: ;
#line 239 "impala/gpu_device_shm.impala"
if (_34408) goto l34409; else goto l34447;
l34447: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34448;
l34448: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34428;
l34409: ;
#line 241 "impala/gpu_device_shm.impala"
int img_index_x_34413;
img_index_x_34413 = _34412 + _34396;
#line 246 "impala/gpu_device_shm.impala"
bool _34414;
_34414 = 0 <= img_index_x_34413;
#line 246 "impala/gpu_device_shm.impala"
if (_34414) goto l34415; else goto l34446;
l34446: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34443;
l34415: ;
#line 246 "impala/gpu_device_shm.impala"
bool _34417;
_34417 = img_index_x_34413 < _34416;
#line 246 "impala/gpu_device_shm.impala"
if (_34417) goto l34418; else goto l34445;
l34445: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34443;
l34418: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34422) goto l34423; else goto l34444;
l34444: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34443;
l34423: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34426) goto l34427; else goto l34442;
l34442: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34443;
l34443: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34428;
l34427: ;
#line 249 "impala/gpu_device_shm.impala"
int _34432;
_34432 = _34431 + img_index_x_34413;
#line 248 "impala/gpu_device_shm.impala"
int _34438;
_34438 = _34437 + shm_index_x_34405;
#line 248 "impala/gpu_device_shm.impala"
double* _34439;
_34439 = reserve_shared_34377 + _34438;
#line 249 "impala/gpu_device_shm.impala"
double* _34433;
_34433 = _28289_34330 + _34432;
#line 249 "impala/gpu_device_shm.impala"
double _34434;
_34434 = *_34433;
#line 249 "impala/gpu_device_shm.impala"
double _34440;
_34440 = _34434;
#line 248 "impala/gpu_device_shm.impala"
*_34439 = _34440;
#line 54 "impala/gpu_device_shm.impala"
goto l34428;
l34428: ;
#line 56 "impala/gpu_device_shm.impala"
int _34430;
_34430 = _34396 + bdimx_34339;
#line 52 "impala/gpu_device_shm.impala"
p_34396 = _34430;
goto l34395;
}
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_28609(struct_Buffer_6987 _28612_34633, struct_image_6992 _28613_34634, struct_filter_6986 _28614_34635, double* _28615_34636, double* _28616_34637, double* _28617_34638) {
int bdimx_34641;
int pbdimx_34641;
int bdimy_34644;
int pbdimy_34644;
int bidx_34647;
int pbidx_34647;
int bidy_34650;
int pbidy_34650;
int tidx_34653;
int ptidx_34653;
int tidy_34656;
int ptidy_34656;
double* reserve_shared_34659;
double* preserve_shared_34659;
double* reserve_shared_34662;
double* preserve_shared_34662;
int _34670;
int p_34670;
int _34723;
int p_34723;
int _34777;
int p_34777;
int _34802;
int p_34802;
int _34848;
int p_34848;
double sum_34850;
double psum_34850;
int _34807;
int p_34807;
int _34728;
int p_34728;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimx_34641 = blockDim_x();
pbdimx_34641 = bdimx_34641;
l34639: ;
bdimx_34641 = pbdimx_34641;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimy_34644 = blockDim_y();
pbdimy_34644 = bdimy_34644;
l34642: ;
bdimy_34644 = pbdimy_34644;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidx_34647 = blockIdx_x();
pbidx_34647 = bidx_34647;
l34645: ;
bidx_34647 = pbidx_34647;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidy_34650 = blockIdx_y();
pbidy_34650 = bidy_34650;
l34648: ;
bidy_34650 = pbidy_34650;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidx_34653 = threadIdx_x();
ptidx_34653 = tidx_34653;
l34651: ;
tidx_34653 = ptidx_34653;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidy_34656 = threadIdx_y();
ptidy_34656 = tidy_34656;
l34654: ;
tidy_34656 = ptidy_34656;
#line 215 "impala/gpu_device_shm.impala"
__shared__ double reserver_reserve_shared_34659[938];
preserve_shared_34659 = reserver_reserve_shared_34659;
l34657: ;
reserve_shared_34659 = preserve_shared_34659;
#line 223 "impala/gpu_device_shm.impala"
__shared__ double reserver_reserve_shared_34662[49];
preserve_shared_34662 = reserver_reserve_shared_34662;
l34660: ;
reserve_shared_34662 = preserve_shared_34662;
#line 11 "impala/main.impala"
int _34697;
_34697 = _28613_34634.e3;
#line 6 "impala/gaussian.impala"
int _34663;
_34663 = _28614_34635.e2;
#line 201 "impala/gpu_device_shm.impala"
int _34683;
_34683 = bidx_34647 * bdimx_34641;
#line 201 "impala/gpu_device_shm.impala"
int gid_x_34684;
gid_x_34684 = _34683 + tidx_34653;
#line 203 "impala/gpu_device_shm.impala"
int _34692;
_34692 = bidy_34650 * bdimy_34644;
#line 11 "impala/main.impala"
int _34689;
_34689 = _28613_34634.e2;
#line 205 "impala/gpu_device_shm.impala"
int _34672;
_34672 = _28614_34635.e1;
#line 6 "impala/gaussian.impala"
int v_anchor_34664;
v_anchor_34664 = _34663 / 2;
#line 203 "impala/gpu_device_shm.impala"
int gid_y_34693;
gid_y_34693 = _34692 + tidy_34656;
#line 205 "impala/gpu_device_shm.impala"
int extend_width_34673;
extend_width_34673 = _34672 / 2;
#line 211 "impala/gpu_device_shm.impala"
int _34665;
_34665 = 2 * v_anchor_34664;
#line 209 "impala/gpu_device_shm.impala"
int _34674;
_34674 = 2 * extend_width_34673;
#line 211 "impala/gpu_device_shm.impala"
int shm_dimy_34666;
shm_dimy_34666 = bdimy_34644 + _34665;
#line 209 "impala/gpu_device_shm.impala"
int shm_dimx_34675;
shm_dimx_34675 = bdimx_34641 + _34674;
#line 52 "impala/gpu_device_shm.impala"
bool _34667;
_34667 = 0 < shm_dimy_34666;
#line 52 "impala/gpu_device_shm.impala"
if (_34667) goto l34668; else goto l34897;
l34897: ;
#line 253 "impala/gpu_device_shm.impala"
goto l34772;
l34668: ;
#line 248 "impala/gpu_device_shm.impala"
int _34708;
_34708 = tidy_34656 * shm_dimx_34675;
#line 239 "impala/gpu_device_shm.impala"
bool _34681;
_34681 = tidy_34656 < shm_dimy_34666;
#line 243 "impala/gpu_device_shm.impala"
int _34694;
_34694 = gid_y_34693 - v_anchor_34664;
#line 246 "impala/gpu_device_shm.impala"
bool _34695;
_34695 = 0 <= _34694;
#line 241 "impala/gpu_device_shm.impala"
int _34685;
_34685 = gid_x_34684 - extend_width_34673;
#line 246 "impala/gpu_device_shm.impala"
bool _34698;
_34698 = _34694 < _34697;
#line 249 "impala/gpu_device_shm.impala"
int _34703;
_34703 = _34694 * _34689;
#line 52 "impala/gpu_device_shm.impala"
p_34670 = 0;
goto l34669;
l34669: ;
_34670 = p_34670;
#line 52 "impala/gpu_device_shm.impala"
bool _34676;
_34676 = _34670 < shm_dimx_34675;
#line 52 "impala/gpu_device_shm.impala"
if (_34676) goto l34677; else goto l34721;
l34721: ;
#line 52 "impala/gpu_device_shm.impala"
p_34723 = bdimy_34644;
goto l34722;
l34722: ;
_34723 = p_34723;
#line 52 "impala/gpu_device_shm.impala"
bool _34725;
_34725 = _34723 < shm_dimy_34666;
#line 52 "impala/gpu_device_shm.impala"
if (_34725) goto l34726; else goto l34771;
l34771: ;
#line 253 "impala/gpu_device_shm.impala"
goto l34772;
l34772: ;
#line 52 "impala/gpu_device_shm.impala"
bool _34774;
_34774 = 0 < _34663;
#line 52 "impala/gpu_device_shm.impala"
if (_34774) goto l34775; else goto l34896;
l34896: ;
#line 271 "impala/gpu_device_shm.impala"
goto l34834;
l34775: ;
#line 265 "impala/gpu_device_shm.impala"
bool _34784;
_34784 = tidy_34656 < _34663;
#line 267 "impala/gpu_device_shm.impala"
int _34789;
_34789 = tidy_34656 * _34672;
#line 52 "impala/gpu_device_shm.impala"
p_34777 = 0;
goto l34776;
l34776: ;
_34777 = p_34777;
#line 52 "impala/gpu_device_shm.impala"
bool _34779;
_34779 = _34777 < _34672;
#line 52 "impala/gpu_device_shm.impala"
if (_34779) goto l34780; else goto l34800;
l34800: ;
#line 52 "impala/gpu_device_shm.impala"
p_34802 = bdimy_34644;
goto l34801;
l34801: ;
_34802 = p_34802;
#line 52 "impala/gpu_device_shm.impala"
bool _34804;
_34804 = _34802 < _34663;
#line 52 "impala/gpu_device_shm.impala"
if (_34804) goto l34805; else goto l34833;
l34833: ;
#line 271 "impala/gpu_device_shm.impala"
goto l34834;
l34834: ;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
__syncthreads();
l34836: ;
#line 277 "impala/gpu_device_shm.impala"
bool _34838;
_34838 = gid_x_34684 < _34689;
#line 277 "impala/gpu_device_shm.impala"
if (_34838) goto l34839; else goto l34895;
l34895: ;
#line 280 "impala/gpu_device_shm.impala"
goto l34894;
l34839: ;
#line 277 "impala/gpu_device_shm.impala"
bool _34840;
_34840 = gid_y_34693 < _34697;
#line 277 "impala/gpu_device_shm.impala"
if (_34840) goto l34841; else goto l34893;
l34893: ;
#line 280 "impala/gpu_device_shm.impala"
goto l34894;
l34894: ;
return ;
l34841: ;
#line 39 "impala/gaussian.impala"
bool _34842;
_34842 = v_anchor_34664 <= gid_y_34693;
#line 217 "impala/gpu_device_shm.impala"
int _34864;
_34864 = extend_width_34673 - _34683;
#line 218 "impala/gpu_device_shm.impala"
int _34860;
_34860 = v_anchor_34664 - _34692;
#line 78 "impala/gpu_device_shm.impala"
char* _34874;
_34874 = _28612_34633.e1;
#line 78 "impala/gpu_device_shm.impala"
double* _34875;
union { double* dst; char* src; } u_34875;
u_34875.src = _34874;
_34875 = u_34875.dst;
#line 78 "impala/gpu_device_shm.impala"
int _34876;
_34876 = gid_y_34693 * _34689;
#line 78 "impala/gpu_device_shm.impala"
int _34877;
_34877 = _34876 + gid_x_34684;
#line 78 "impala/gpu_device_shm.impala"
double* _34878;
_34878 = _34875 + _34877;
#line 39 "impala/gaussian.impala"
if (_34842) goto l34843; else goto l34892;
l34892: ;
#line 49 "impala/gaussian.impala"
goto l34882;
l34843: ;
#line 39 "impala/gaussian.impala"
int _34844;
_34844 = _34697 - v_anchor_34664;
#line 39 "impala/gaussian.impala"
bool _34845;
_34845 = gid_y_34693 < _34844;
#line 39 "impala/gaussian.impala"
if (_34845) goto l34846; else goto l34881;
l34881: ;
#line 49 "impala/gaussian.impala"
goto l34882;
l34882: ;
#line 86 "impala/gpu_device_shm.impala"
int _34883;
_34883 = gid_y_34693 + _34860;
#line 86 "impala/gpu_device_shm.impala"
int _34884;
_34884 = _34883 * shm_dimx_34675;
#line 86 "impala/gpu_device_shm.impala"
int _34885;
_34885 = _34884 + gid_x_34684;
#line 86 "impala/gpu_device_shm.impala"
int _34886;
_34886 = _34885 + _34864;
#line 86 "impala/gpu_device_shm.impala"
double* _34887;
_34887 = reserve_shared_34659 + _34886;
#line 86 "impala/gpu_device_shm.impala"
double _34888;
_34888 = *_34887;
#line 86 "impala/gpu_device_shm.impala"
double _34890;
_34890 = _34888;
#line 78 "impala/gpu_device_shm.impala"
*_34878 = _34890;
return ;
l34846: ;
#line 41 "impala/gaussian.impala"
int _34851;
_34851 = 1 + v_anchor_34664;
#line 41 "impala/gaussian.impala"
int _34880;
_34880 = 0 - v_anchor_34664;
#line 27 "impala/gpu_device_shm.impala"
p_34848 = _34880;
psum_34850 = 0.000000e+00;
goto l34847;
l34847: ;
_34848 = p_34848;
sum_34850 = psum_34850;
#line 27 "impala/gpu_device_shm.impala"
bool _34852;
_34852 = _34848 < _34851;
#line 27 "impala/gpu_device_shm.impala"
if (_34852) goto l34853; else goto l34873;
l34873: ;
#line 78 "impala/gpu_device_shm.impala"
*_34878 = sum_34850;
return ;
l34853: ;
#line 43 "impala/gaussian.impala"
int _34859;
_34859 = gid_y_34693 + _34848;
#line 31 "impala/gpu_device_shm.impala"
int _34854;
_34854 = 1 + _34848;
#line 43 "impala/gaussian.impala"
int _34855;
_34855 = _34848 + v_anchor_34664;
#line 90 "impala/gpu_device_shm.impala"
double* i_34856;
i_34856 = reserve_shared_34662 + _34855;
#line 86 "impala/gpu_device_shm.impala"
int _34861;
_34861 = _34859 + _34860;
#line 91 "impala/gpu_device_shm.impala"
double _34857;
_34857 = *i_34856;
#line 86 "impala/gpu_device_shm.impala"
int _34862;
_34862 = _34861 * shm_dimx_34675;
#line 91 "impala/gpu_device_shm.impala"
double _34869;
_34869 = _34857;
#line 86 "impala/gpu_device_shm.impala"
int _34863;
_34863 = _34862 + gid_x_34684;
#line 86 "impala/gpu_device_shm.impala"
int _34865;
_34865 = _34863 + _34864;
#line 86 "impala/gpu_device_shm.impala"
double* _34866;
_34866 = reserve_shared_34659 + _34865;
#line 86 "impala/gpu_device_shm.impala"
double _34867;
_34867 = *_34866;
#line 86 "impala/gpu_device_shm.impala"
double _34870;
_34870 = _34867;
#line 43 "impala/gaussian.impala"
double _34871;
_34871 = _34869 * _34870;
#line 43 "impala/gaussian.impala"
double _34872;
_34872 = sum_34850 + _34871;
#line 27 "impala/gpu_device_shm.impala"
p_34848 = _34854;
psum_34850 = _34872;
goto l34847;
l34805: ;
#line 262 "impala/gpu_device_shm.impala"
int shm_index_y_34814;
shm_index_y_34814 = tidy_34656 + _34802;
#line 265 "impala/gpu_device_shm.impala"
bool _34815;
_34815 = shm_index_y_34814 < _34663;
#line 267 "impala/gpu_device_shm.impala"
int _34820;
_34820 = shm_index_y_34814 * _34672;
#line 52 "impala/gpu_device_shm.impala"
p_34807 = 0;
goto l34806;
l34806: ;
_34807 = p_34807;
#line 52 "impala/gpu_device_shm.impala"
bool _34809;
_34809 = _34807 < _34672;
#line 52 "impala/gpu_device_shm.impala"
if (_34809) goto l34810; else goto l34831;
l34831: ;
#line 56 "impala/gpu_device_shm.impala"
int _34832;
_34832 = _34802 + bdimy_34644;
#line 52 "impala/gpu_device_shm.impala"
p_34802 = _34832;
goto l34801;
l34810: ;
#line 260 "impala/gpu_device_shm.impala"
int shm_index_x_34811;
shm_index_x_34811 = tidx_34653 + _34807;
#line 265 "impala/gpu_device_shm.impala"
bool _34812;
_34812 = shm_index_x_34811 < _34672;
#line 265 "impala/gpu_device_shm.impala"
if (_34812) goto l34813; else goto l34830;
l34830: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34829;
l34813: ;
#line 265 "impala/gpu_device_shm.impala"
if (_34815) goto l34816; else goto l34828;
l34828: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34829;
l34829: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34817;
l34816: ;
#line 267 "impala/gpu_device_shm.impala"
int _34821;
_34821 = _34820 + shm_index_x_34811;
#line 267 "impala/gpu_device_shm.impala"
double* _34825;
_34825 = reserve_shared_34662 + _34821;
#line 267 "impala/gpu_device_shm.impala"
double* _34822;
_34822 = _28615_34636 + _34821;
#line 268 "impala/gpu_device_shm.impala"
double _34823;
_34823 = *_34822;
#line 268 "impala/gpu_device_shm.impala"
double _34826;
_34826 = _34823;
#line 267 "impala/gpu_device_shm.impala"
*_34825 = _34826;
#line 54 "impala/gpu_device_shm.impala"
goto l34817;
l34817: ;
#line 56 "impala/gpu_device_shm.impala"
int _34819;
_34819 = _34807 + bdimx_34641;
#line 52 "impala/gpu_device_shm.impala"
p_34807 = _34819;
goto l34806;
l34780: ;
#line 260 "impala/gpu_device_shm.impala"
int shm_index_x_34781;
shm_index_x_34781 = tidx_34653 + _34777;
#line 265 "impala/gpu_device_shm.impala"
bool _34782;
_34782 = shm_index_x_34781 < _34672;
#line 265 "impala/gpu_device_shm.impala"
if (_34782) goto l34783; else goto l34799;
l34799: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34798;
l34783: ;
#line 265 "impala/gpu_device_shm.impala"
if (_34784) goto l34785; else goto l34797;
l34797: ;
#line 269 "impala/gpu_device_shm.impala"
goto l34798;
l34798: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34786;
l34785: ;
#line 267 "impala/gpu_device_shm.impala"
int _34790;
_34790 = _34789 + shm_index_x_34781;
#line 267 "impala/gpu_device_shm.impala"
double* _34794;
_34794 = reserve_shared_34662 + _34790;
#line 267 "impala/gpu_device_shm.impala"
double* _34791;
_34791 = _28615_34636 + _34790;
#line 268 "impala/gpu_device_shm.impala"
double _34792;
_34792 = *_34791;
#line 268 "impala/gpu_device_shm.impala"
double _34795;
_34795 = _34792;
#line 267 "impala/gpu_device_shm.impala"
*_34794 = _34795;
#line 54 "impala/gpu_device_shm.impala"
goto l34786;
l34786: ;
#line 56 "impala/gpu_device_shm.impala"
int _34788;
_34788 = _34777 + bdimx_34641;
#line 52 "impala/gpu_device_shm.impala"
p_34777 = _34788;
goto l34776;
l34726: ;
#line 243 "impala/gpu_device_shm.impala"
int img_index_y_34743;
img_index_y_34743 = _34694 + _34723;
#line 249 "impala/gpu_device_shm.impala"
int _34751;
_34751 = img_index_y_34743 * _34689;
#line 236 "impala/gpu_device_shm.impala"
int shm_index_y_34735;
shm_index_y_34735 = tidy_34656 + _34723;
#line 246 "impala/gpu_device_shm.impala"
bool _34746;
_34746 = img_index_y_34743 < _34697;
#line 246 "impala/gpu_device_shm.impala"
bool _34744;
_34744 = 0 <= img_index_y_34743;
#line 248 "impala/gpu_device_shm.impala"
int _34756;
_34756 = shm_index_y_34735 * shm_dimx_34675;
#line 239 "impala/gpu_device_shm.impala"
bool _34736;
_34736 = shm_index_y_34735 < shm_dimy_34666;
#line 52 "impala/gpu_device_shm.impala"
p_34728 = 0;
goto l34727;
l34727: ;
_34728 = p_34728;
#line 52 "impala/gpu_device_shm.impala"
bool _34730;
_34730 = _34728 < shm_dimx_34675;
#line 52 "impala/gpu_device_shm.impala"
if (_34730) goto l34731; else goto l34769;
l34769: ;
#line 56 "impala/gpu_device_shm.impala"
int _34770;
_34770 = _34723 + bdimy_34644;
#line 52 "impala/gpu_device_shm.impala"
p_34723 = _34770;
goto l34722;
l34731: ;
#line 234 "impala/gpu_device_shm.impala"
int shm_index_x_34732;
shm_index_x_34732 = tidx_34653 + _34728;
#line 239 "impala/gpu_device_shm.impala"
bool _34733;
_34733 = shm_index_x_34732 < shm_dimx_34675;
#line 239 "impala/gpu_device_shm.impala"
if (_34733) goto l34734; else goto l34768;
l34768: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34767;
l34734: ;
#line 239 "impala/gpu_device_shm.impala"
if (_34736) goto l34737; else goto l34766;
l34766: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34767;
l34767: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34748;
l34737: ;
#line 241 "impala/gpu_device_shm.impala"
int img_index_x_34738;
img_index_x_34738 = _34685 + _34728;
#line 246 "impala/gpu_device_shm.impala"
bool _34739;
_34739 = 0 <= img_index_x_34738;
#line 246 "impala/gpu_device_shm.impala"
if (_34739) goto l34740; else goto l34765;
l34765: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34762;
l34740: ;
#line 246 "impala/gpu_device_shm.impala"
bool _34741;
_34741 = img_index_x_34738 < _34689;
#line 246 "impala/gpu_device_shm.impala"
if (_34741) goto l34742; else goto l34764;
l34764: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34762;
l34742: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34744) goto l34745; else goto l34763;
l34763: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34762;
l34745: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34746) goto l34747; else goto l34761;
l34761: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34762;
l34762: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34748;
l34747: ;
#line 249 "impala/gpu_device_shm.impala"
int _34752;
_34752 = _34751 + img_index_x_34738;
#line 249 "impala/gpu_device_shm.impala"
double* _34753;
_34753 = _28617_34638 + _34752;
#line 248 "impala/gpu_device_shm.impala"
int _34757;
_34757 = _34756 + shm_index_x_34732;
#line 249 "impala/gpu_device_shm.impala"
double _34754;
_34754 = *_34753;
#line 248 "impala/gpu_device_shm.impala"
double* _34758;
_34758 = reserve_shared_34659 + _34757;
#line 249 "impala/gpu_device_shm.impala"
double _34759;
_34759 = _34754;
#line 248 "impala/gpu_device_shm.impala"
*_34758 = _34759;
#line 54 "impala/gpu_device_shm.impala"
goto l34748;
l34748: ;
#line 56 "impala/gpu_device_shm.impala"
int _34750;
_34750 = _34728 + bdimx_34641;
#line 52 "impala/gpu_device_shm.impala"
p_34728 = _34750;
goto l34727;
l34677: ;
#line 234 "impala/gpu_device_shm.impala"
int shm_index_x_34678;
shm_index_x_34678 = tidx_34653 + _34670;
#line 239 "impala/gpu_device_shm.impala"
bool _34679;
_34679 = shm_index_x_34678 < shm_dimx_34675;
#line 239 "impala/gpu_device_shm.impala"
if (_34679) goto l34680; else goto l34720;
l34720: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34719;
l34680: ;
#line 239 "impala/gpu_device_shm.impala"
if (_34681) goto l34682; else goto l34718;
l34718: ;
#line 251 "impala/gpu_device_shm.impala"
goto l34719;
l34719: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34700;
l34682: ;
#line 241 "impala/gpu_device_shm.impala"
int img_index_x_34686;
img_index_x_34686 = _34685 + _34670;
#line 246 "impala/gpu_device_shm.impala"
bool _34687;
_34687 = 0 <= img_index_x_34686;
#line 246 "impala/gpu_device_shm.impala"
if (_34687) goto l34688; else goto l34717;
l34717: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34714;
l34688: ;
#line 246 "impala/gpu_device_shm.impala"
bool _34690;
_34690 = img_index_x_34686 < _34689;
#line 246 "impala/gpu_device_shm.impala"
if (_34690) goto l34691; else goto l34716;
l34716: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34714;
l34691: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34695) goto l34696; else goto l34715;
l34715: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34714;
l34696: ;
#line 246 "impala/gpu_device_shm.impala"
if (_34698) goto l34699; else goto l34713;
l34713: ;
#line 250 "impala/gpu_device_shm.impala"
goto l34714;
l34714: ;
#line 54 "impala/gpu_device_shm.impala"
goto l34700;
l34699: ;
#line 249 "impala/gpu_device_shm.impala"
int _34704;
_34704 = _34703 + img_index_x_34686;
#line 248 "impala/gpu_device_shm.impala"
int _34709;
_34709 = _34708 + shm_index_x_34678;
#line 249 "impala/gpu_device_shm.impala"
double* _34705;
_34705 = _28617_34638 + _34704;
#line 248 "impala/gpu_device_shm.impala"
double* _34710;
_34710 = reserve_shared_34659 + _34709;
#line 249 "impala/gpu_device_shm.impala"
double _34706;
_34706 = *_34705;
#line 249 "impala/gpu_device_shm.impala"
double _34711;
_34711 = _34706;
#line 248 "impala/gpu_device_shm.impala"
*_34710 = _34711;
#line 54 "impala/gpu_device_shm.impala"
goto l34700;
l34700: ;
#line 56 "impala/gpu_device_shm.impala"
int _34702;
_34702 = _34670 + bdimx_34641;
#line 52 "impala/gpu_device_shm.impala"
p_34670 = _34702;
goto l34669;
}
} |
12,292 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <vector>
#include <string>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
__device__ size_t compare(size_t a, size_t b, bool is_dilatation) {
if (is_dilatation)
return a > b ? a : b;
return a < b ? a : b;
}
__global__ void print_cuda(size_t* data, int height, int width) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
printf("%i, %i --> %lu\n", x, y, data[x+y*width]);
}
__global__ void compute_vHGW(size_t* data_read, size_t* data_write, int height, int width, size_t* g, size_t* h, size_t k, bool is_dilatation) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
auto m = width;
auto psa = (k - (m - 1) % k) - 1;
if (index >= width)
return;
size_t* curr_line = data_read+index*width;
size_t* g_line = g+index*width;
size_t* h_line = h+index*width;
size_t* v_line = data_write+index*width;
// Compute G
for (int x = 0; x < m; x++) {
g_line[x] = (x % k) == 0 ? curr_line[x] : compare(g_line[x - 1], curr_line[x], is_dilatation);
}
h_line[m - 1] = curr_line[m - 1];
for (size_t y = 1; y < m; y++)
{
size_t x = m - 1 - y;
h_line[x] = (x + 1) % k == 0 ? curr_line[x] : compare(h_line[x + 1], v_line[x], is_dilatation);
}
// Compute new line
for (size_t x = 0; x < m; x++)
{
auto div2 = k / 2;
if (x < div2)
v_line[x] = g_line[x + div2];
else if (x + div2 >= m)
v_line[x] = x + div2 < m + psa ? compare(g_line[m - 1], h_line[x - (div2)], is_dilatation) : h_line[x - (div2)];
else
v_line[x] = compare(g_line[x + div2], h_line[x - div2], is_dilatation);
}
}
void cuda_vHGW(size_t* data_host, int height, int width, size_t k, bool is_dilatation) {
size_t* data_read;
size_t* data_write;
size_t* h;
size_t* g;
// Allocate device memory
cudaMalloc(&data_read, sizeof(size_t) * height * width);
cudaMalloc(&data_write, sizeof(size_t) * height * width);
cudaMalloc(&g, sizeof(size_t) * height * width);
cudaMalloc(&h, sizeof(size_t) * height * width);
// Transfer data from host to device memory
cudaMemcpy(data_read, data_host, sizeof(size_t) * width * height, cudaMemcpyHostToDevice);
int bsize = 1;
// Executing kernel
compute_vHGW<<<height, bsize>>>(data_read, data_write, height, width, g, h, k, is_dilatation);
cudaDeviceSynchronize();
// Transfer data back to host memory
cudaMemcpy(data_host, data_write, sizeof(size_t) * width * height, cudaMemcpyDeviceToHost);
// Deallocate device memory
cudaFree(data_read);
cudaFree(data_write);
cudaFree(h);
cudaFree(g);
}
|
12,293 | #include <iostream>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <stdlib.h>
#include <locale>
#include <string>
#include <limits>
#include <time.h>
#include <stdio.h>
#include <iomanip>
#include <sys/time.h>
using namespace std;
//------------ Kernel de Processamento
__global__ void Classif(int* d_dados, int* d_class, long dsize, int colsIn, int colsOut)
{
int i=(threadIdx.x * colsIn) + (blockIdx.x * blockDim.x * colsIn);
int o=(threadIdx.x * colsOut) + (blockIdx.x * blockDim.x * colsOut);
int VlOpen,VlHigh,VlLow,VlClose,classe;
if (i<=dsize) {
VlOpen = d_dados[i+1];
VlHigh = d_dados[i+2];
VlLow = d_dados[i+3];
VlClose = d_dados[i+4];
classe=(VlOpen==VlClose ? 512: VlOpen>VlClose ? 256:1024)+(VlLow<VlOpen ? 1:4)+(VlLow<VlClose ? 2:8)+(VlHigh>VlOpen ? 16:64)+(VlHigh>VlClose ? 32:128);
d_class[o]=d_dados[i];
d_class[o+1]=classe;
}
}
//--------------------- Funcoes de tempo --------------------------------
std::string DataHora()
{
time_t rawtime;
struct tm * timeinfo;
char buffer [20];
time ( &rawtime );
timeinfo = localtime ( &rawtime );
strftime (buffer,20,"%F %H-%M-%S",timeinfo);
return buffer;
}
/* funcao de tempo */
double calcula_tempo(const unsigned long int ini, const unsigned long int fim)
{
double r;
if(fim >= ini)
r = ((double)(fim - ini)) / CLOCKS_PER_SEC;
else
r = ((double)( (fim + (unsigned long int)-1) - ini)) / CLOCKS_PER_SEC;
return r;
}
//------- Classif_paralela:: / std::string ---------------------------
void Classif_GPU(const char * nome, long plins, int nthd, const char * sthd){
char arq[256];
//char arqo[256];
//std::ifstream fin;
int colsIn=5, colsOut=2;
long lins,i, c, dsize, csize;
//int classe,VlOpen,VlHigh,VlLow,VlClose;
int v_blocos,v_threads;
std::string sIndice,sVlOpen,sVlHigh,sVlLow,sVlClose;
unsigned long int t_ini;
unsigned long int t_fin;
unsigned long int t_tmp;
std::string dateStr,fn,fnl,s_threads;
/*--- define variaveis de tempo -------------*/
timeval start, end;
double delta;
dateStr=DataHora();
std::cout<<" <DataHora > = "<<dateStr<<std::endl;
/* tempo inicial */
t_ini = (unsigned long int) clock;
gettimeofday(&start, NULL); //marcador de início do processamento
/* -- define as dimensões dos vetores que serão criados em logar de matrizes */
/* -- dsize define o tamanho do vetor de dados em função do numero de linhas e colunas*/
dsize=plins*colsIn;
/* -- csize define o tamanho do vetor de classificacao em função do numero de linhas e colunas*/
csize=plins*colsOut;
/* -- Cria os vetores que conterão os dados lidos do arquivo e a classificação */
int *h_dados;
int *h_class;
//std::cout<<"dsize= "<< dsize <<" csize= "<< csize<<std::endl;
size_t d_nbytes=dsize * sizeof(int);
size_t c_nbytes=csize * sizeof(int);
cudaMallocManaged ((void**)&h_dados, d_nbytes);
cudaMallocManaged ((void**)&h_class, c_nbytes);
//h_dados[0]=0;
//h_dados[1]=1;
//std::cout<<"h_dados[0]= "<< h_dados[0] <<" h_dados[1]= "<< h_dados[1]<<std::endl;
lins=plins-0;
std::cout<<" <inicializou lns> = "<<lins<<std::endl;
/* ----- Abre o arquivo csv e inicia a carga dos vetores ------------------- */
strcpy(arq,nome);
ifstream fin(arq);
if (fin.is_open())
{
t_tmp=(unsigned long int) clock();
/*--- carrega o arquivo no vetor host h_dados e inicializa h_class, transformando valores float em int*/
i=0;
c=0;
while (fin.good())
{
getline(fin,sIndice,',');
getline(fin,sVlOpen,',');
getline(fin,sVlHigh,',');
getline(fin,sVlLow,',');
getline(fin,sVlClose,'\n');
//std::cout<<"sIndice= "<< sIndice <<"sVlOpen= "<< sVlOpen<<"sVlHigh= "<< sVlHigh<<"sVlLow= "<< sVlLow<<"sVlClose= "<< sVlClose<<std::endl;
//h_dados[i]=std::stoi(sIndice);
h_dados[i]=std::atoi(sIndice.c_str());
//h_dados[i+1]=static_cast<int>(std::stof(sVlOpen,NULL)*100);
h_dados[i+1]=static_cast<int>(std::atof(sVlOpen.c_str())*100);
h_dados[i+2]=static_cast<int>(std::atof(sVlHigh.c_str())*100);
h_dados[i+3]=static_cast<int>(std::atof(sVlLow.c_str())*100);
h_dados[i+4]=static_cast<int>(std::atof(sVlClose.c_str())*100);
h_class[c]=0;
h_class[c+1]=0;
i+=colsIn;
c+=colsOut;
}
//std::cout<<" <Carregou h_dados com "<< i <<" posições e h_class com "<< c << " posicoes"<<std::endl;
/*--- Calcula o número de blocos e threads em função do número de registros
i = número de posições geradas para o vetor vezes o número de colunas de entrada (colsIn)
Fixei as threads em 256
Para processar todas as linhas do arquivo de entrada, plins, uso i/colsIN que tem o mesmo valor de plins
assim, para 17.000.000 de registros a classificar tremos:
v_blocos=ceil((85.000.000/5)/256)=66406,26 ==> 66407 blocos
---*/
v_threads=nthd;
s_threads=std::string(sthd);
//s_threads = "64";
//v_blocos=ceil((i/colsIn)/v_threads);
v_blocos=(int)ceil((float)lins/v_threads);
//std::cout<<" <Calculou v_blocos com "<< v_blocos <<" threads com "<< v_threads <<std::endl;
/*--- invoca o kernel de classificação ---*/
Classif<<<v_blocos,v_threads>>>(h_dados, h_class, dsize, colsIn, colsOut);
/*--- copia de volta o vetor de classicação --*/
cudaDeviceSynchronize();
//std::cout<<" <Sincronizou -------------------"<<std::endl;
fnl="log/Classif_KernelT"+ s_threads +dateStr+".log.txt";
//arqo=fnl.c_str();
std::ofstream mylog (fnl.c_str());
//std::ofstream mylog (arqo);
mylog<<"Processado em "<< dateStr <<std::endl;
mylog<<"Processado em "<< v_blocos <<" blocos com "<< v_threads <<" threads"<<std::endl;
mylog<<"Tempo total de classificaçao (ler CSV e classificar via kernel)= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/*---- fecha o arquivo de entrada de registros a classificar*/
fin.close();
//mylog<<"Tempo decorrido até o final da classificaçao= "<< calcula_tempo(t_ini, (unsigned long int) clock()) <<std::endl;
/*--- cria o nome do arquivo csv de saída com as classificações ----*/
//fn="/home/UFF/GPU/Trabalho/Dados/Classif_Kernel"+dateStr+".csv";
fn="csv/Classif_KernelT"+ s_threads +dateStr+".csv";
//std::cout<<std::endl<<fn <<std::endl;
t_tmp=(unsigned long int) clock();
/*--- abre o csv de saída ---*/
std::ofstream myfile (fn.c_str());
myfile<<"Indice,IdClasse"<<std::endl;
/*--- exporta o conteúdo do vetor h_class ---*/
for (i=0; i<csize; i+=colsOut)
{
myfile<<h_class[i]<<','<<h_class[i+1]<<"\n";
}
myfile.close();
mylog<<"Tempo para exportar classificaçao para CSV= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
// desaloca a matriz << no Thtrust a desalocação dos vetores é transparente ---------------
//mylog<<"Tempo para free matriz = "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/* tempo final */
t_fin = (unsigned long int) clock();
mylog<<"Total de registros classificados= "<< lins <<std::endl;
mylog<<"Tempo total de processamento= "<< setprecision(6) << calcula_tempo(t_ini, t_fin) <<std::endl;
gettimeofday(&end, NULL);
delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
mylog<<"Tempo total de processamento 2 = "<< delta <<std::endl;
mylog.close();
std::cout<<std::endl<<"Tempo total de processamento= "<< calcula_tempo(t_ini, t_fin) <<std::endl;
std::cout<<"Tempo total de processamento 2 = "<< delta <<std::endl;
}
else
{
std::cout<<std::endl<<"Erro na abertura do arquivo "<< nome <<std::endl;
}
}
//---------------------------------------------------------------------------
int main(int argc, char * argv[])
{
long nlin=0;
int nthd=0;
if (argc < 4){
std::cout<<"Digite o nome do arquivo de entrada e a quantidade de registros e quantas threads"<<std::endl;
abort();
}
// File
std::cout<<" <Arquivo de entrada> = "<<argv[1]<<std::endl;
//nlin=std::stol(argv[2]);
nlin=std::atol(argv[2]);
nthd=std::atoi(argv[3]);
/* processa a classificaçao */
std::cout<<" <Qtd Registros> = "<<nlin<<std::endl;
Classif_GPU(argv[1],nlin,nthd,argv[3]);
}
|
12,294 | // counting Hamilton cycle, CUDA acceleration
#include<stdio.h>
#include<stdlib.h>
#define MAX_BLOCK_SIZE 256
#define MAX_ARRAY_SIZE (1024*8)
typedef unsigned long long u64;
// any 2 <= mod <= 2^31 should work
__host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) {
unsigned c = a+b;
return c >= mod ? c-mod : c;
}
__host__ __device__ u64 mod_sum64(u64 a, u64 b, u64 mod) {
u64 c = a+b;
return c >= mod ? c-mod : c;
}
template<int k>
__launch_bounds__(MAX_BLOCK_SIZE)
__global__ void ha2(int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) {
__shared__ unsigned long long qc[1024]; // transition count
__shared__ unsigned long long ai[64]; // adjacency matrix as bitset
//const int k = blockDim.x;
const int tid = threadIdx.x;
const int bid = threadIdx.y + blockIdx.x * blockDim.y;
const int sha = threadIdx.y * k;
const int gridSize = blockDim.y * gridDim.x;
unsigned long long s = part[bid];
unsigned long long mask = (1ull<<k) - 1;
unsigned long long total = 0;
// fetch adjacency matrix
for (int i = tid+sha; i < n; i += blockDim.y * k) {
unsigned long long aa = 0;
for (int j = 0; j < n; j++) {
aa = aa | static_cast<unsigned long long>(adj[i * n + j]) << j;
}
ai[i] = aa;
}
__syncthreads();
for (int runs = 0; runs < work; runs += gridSize) {
unsigned at;
{
unsigned long long row = s;
for (int i = 0; i < tid; i++) {
row = row & (row-1);
}
at = __ffsll(row)-1;
}
// making row "long long" would make program 3x slow, so I use 2 unsigned int
unsigned row = 0, row2 = 0;
{
// build transition table
unsigned long long me = ai[at];
for (int i = n-2; i >= 0; i--) {
if (s>>i & 1) {
row2 = row2 << 1 | row >> 31;
row = row + row + (me>>i & 1);
}
}
// initial state
qc[tid+sha] = (me >> (n-1)) & 1;
__syncthreads();
}
// calculate each transition, uses GPU SIMD feature
for (int t = 1; t < n-1; t++) {
unsigned long long sum = 0;
unsigned rr = row;
for (int i = 0; i < min(k, 32); i++) {
//sum = mod_sum(sum, qc[i+sha] * (row>>i & 1), mod);
//sum = mod_sum64(sum, qc[i+sha] * (rr & 1), mod);
//sum = mod_sum64(sum, qc[i+sha] * dd[i], mod);
sum = mod_sum64(sum, qc[i+sha] & 0LL-(rr & 1), mod);
rr >>= 1;
}
if (k > 32) {
rr = row2;
for (int i = 0; i < k-32; i++) {
sum = mod_sum64(sum, qc[i+32+sha] & 0ULL-(rr & 1), mod);
rr >>= 1;
}
}
__syncthreads();
qc[tid+sha] = sum;
__syncthreads();
}
// last transition
{
if (!(ai[n-1] >> at & 1)) qc[tid+sha] = 0;
__syncthreads();
unsigned long long count = 0;
for (int i = 0; i < k; i++) {
count = mod_sum64(count, qc[i+sha], mod);
}
//if (tid==0) printf("[%d:%d],", s, count);
if (runs + bid < work) {
total = mod_sum64(count, total, mod);
}
}
// get next work
unsigned bit = s & (-s);
s += bit;
s |= mask >> __popcll(s);
__syncthreads();
}
if (tid == 0) {
// output total for this block
ret[bid] = total;
}
}
int n;
int adj[64*64];
unsigned part[MAX_ARRAY_SIZE];
unsigned long long ret[MAX_ARRAY_SIZE];
long long nCr[65][65];
u64 getComb(long long idx, int n, int r) {
u64 ans = 0;
n -= 1;
while (r > 0) {
if (idx < nCr[n][r]) n -= 1;
else {
ans |= u64(1)<<(n);
idx -= nCr[n][r];
n -= 1;
r -= 1;
}
}
return ans;
}
void ha4(int gridSize, int blockSize, int k, int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) {
dim3 bsz(k, blockSize);
switch (k) {
#define HA4_k(k) case k: ha2<k><<<gridSize, bsz>>>(n, work, part, adj, ret, mod); break;
HA4_k(2)
HA4_k(3)
HA4_k(4)
HA4_k(5)
HA4_k(6)HA4_k(7)HA4_k(8)HA4_k(9)HA4_k(10)
HA4_k(11)HA4_k(12)HA4_k(13)HA4_k(14)HA4_k(15)
HA4_k(16)HA4_k(17)HA4_k(18)HA4_k(19)HA4_k(20)
HA4_k(21)HA4_k(22)HA4_k(23)HA4_k(24)HA4_k(25)
HA4_k(26)HA4_k(27)HA4_k(28)HA4_k(29)HA4_k(30)
HA4_k(31)HA4_k(32)
#undef HA4_k
}
cudaError_t status = cudaGetLastError();
if (status != cudaSuccess) {
fprintf(stderr, "%s\n", cudaGetErrorString(status));
}
}
int main() {
int *gpu_adj;
unsigned *gpu_part;
unsigned long long *gpu_ret;
scanf("%d", &n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i != j) adj[i*n+j] = rand()>>5&1;
}
}
for (int i = 0; i < n; i++) {
char op;
for (int j = 0; j < n; j++) {
if (scanf(" %c", &op) == 1 && i != j) {
adj[i*n+j] = op == '1';
}
}
}
for (int i = 0; i <= 64; i++) {
nCr[i][0] = nCr[i][i] = 1;
for (int j = 1; j < i; j++) nCr[i][j] = nCr[i-1][j-1] + nCr[i-1][j];
}
cudaMalloc(&gpu_part, sizeof part);
cudaMalloc(&gpu_adj, sizeof adj);
cudaMalloc(&gpu_ret, sizeof ret);
cudaMemcpy(gpu_adj, adj, sizeof adj, cudaMemcpyHostToDevice);
unsigned long long ans = 0;
unsigned long long mod = 0;
for (int k = 1; k <= n-1; k++) {
int wo = nCr[n-1][k];
int blockSize = wo;
if (blockSize > MAX_BLOCK_SIZE / k) blockSize = MAX_BLOCK_SIZE / k;
int gridSize = wo / blockSize;
if (blockSize * gridSize > MAX_ARRAY_SIZE) gridSize = MAX_ARRAY_SIZE / blockSize;
int totSize = blockSize * gridSize;
fprintf(stderr, "block size = (%d,%d,1) grid size = (%d,1,1)\n", k, blockSize, gridSize);
//for (int j = 0; j < wo; j++) printf("%d,", getComb(j, n-1, k));
for (int j = 0; j < totSize; j++) {
int step = wo / totSize * j;
if (j < wo % totSize) step += j;
else step += wo % totSize;
//printf("step=%d\n", step);
part[j] = getComb(step, n-1, k);
}
cudaMemcpy(gpu_part, part, sizeof(int) * totSize, cudaMemcpyHostToDevice);
ha4(gridSize, blockSize, k, n, wo, gpu_part, gpu_adj, gpu_ret, mod);
cudaDeviceSynchronize();
cudaMemcpy(ret, gpu_ret, sizeof(long long) * totSize, cudaMemcpyDeviceToHost);
unsigned long long sum = 0;
for (int j = 0; j < totSize; j++) {
sum = mod_sum64(sum, ret[j], 0);
}
//printf("sum = %u\n", sum);
if ((n-k)%2 == 1) ans = mod_sum64(ans, sum, mod);
else if (sum != 0) ans = mod_sum64(ans, mod-sum, mod);
}
printf("ans = %llu\n", ans);
cudaFree(gpu_ret);
cudaFree(gpu_adj);
cudaFree(gpu_part);
return 0;
}
|
12,295 | #include <device_launch_parameters.h>
#include <cuda_runtime.h>
#define TILE_WIDTH 16
__global__ void matMulGlobal(float* A, float* B, float* C, int width)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < width && col < width)
{
float matmul = 0;
for (int i = 0; i < width; ++i)
{
matmul += A[row * width + i] * B[i * width + col];
}
C[row * width + col] = matmul;
}
}
__global__ void matMulShared(float* A, float* B, float* C, int width)
{
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
float matmul = 0;
// iterates over matrix in strides of size TILE_WIDTH
// the iteration happens in x and y direction because the indices are dependent on tx and ty
for (int p = 0; p < width / TILE_WIDTH; ++p)
{
// load all cells of the tile. each cell is loaded by a different thread
ds_A[ty][tx] = A[row * width + p * TILE_WIDTH + tx];
ds_B[ty][tx] = B[(p * TILE_WIDTH + ty) * width + col];
__syncthreads();
// calculate partial sum for tile.
for (int i = 0; i < TILE_WIDTH; ++i)
{
matmul += ds_A[ty][i] * ds_B[i][tx];
}
__syncthreads();
}
C[row * width + col] = matmul;
}
|
12,296 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "cuda.h"
#include "cuda_runtime.h"
// nvcc -lcudart deviceQuery.cu
// must have '-lcudart' to avoid linker error
int
main( int argc, char** argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major < 1)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %.2f Gb\n",
deviceProp.totalGlobalMem / 1e9);
printf(" Total amount of constant memory: %d bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %d bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %d bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %d bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %d kilohertz\n",
deviceProp.clockRate);
}
printf("\nReport Done.\n");
}
|
12,297 | #include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/remove.h>
#include <thrust/count.h>
#include <iostream>
// Given a set of vectors (all of same size), remove the entries corresponding
// to indexes tagged as 'false' in a vector of flags.
int main(void)
{
const bool active[] = { false, true, false, true, false, false };
const float a[] = {0.3f, 0.5f, 0.4f, 0.1f, 0.2f, 0.7f};
const int b[] = {3, 5, 4, 1, 2, 7};
const double c[] = {13, 15, 14, 11, 12, 17 };
thrust::host_vector<bool> h_active(active, active + 6);
thrust::host_vector<float> h_a(a, a + 6);
thrust::host_vector<int> h_b(b, b + 6);
thrust::host_vector<double> h_c(c, c + 6);
// define a tuple of the three vector's iterators
typedef thrust::tuple<thrust::host_vector<float>::iterator, thrust::host_vector<int>::iterator, thrust::host_vector<double>::iterator> IteratorTuple;
// define a zip iterator
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
ZipIterator zip_begin = thrust::make_zip_iterator(thrust::make_tuple(h_a.begin(), h_b.begin(), h_c.begin()));
ZipIterator zip_end = zip_begin + 6;
// call remove if on the zipped ranges (logical_not predicate: remove entries tagged with 'false')
ZipIterator new_end = thrust::remove_if(zip_begin, zip_end, h_active.begin(), thrust::logical_not<bool>());
// call remove if on the zipped ranges (identity predicate: remove entries tagged with 'true')
//ZipIterator new_end = thrust::remove_if(zip_begin, zip_end, h_active.begin(), thrust::identity<bool>());
// Count number of flags set to 'true'
int num_flags = thrust::count_if(h_active.begin(), h_active.end(), thrust::identity<bool>());
std::cout << "Keep " << num_flags << " elements" << std::endl;
// erase the removed elements
//h_a.erase(thrust::get<0>(new_end.get_iterator_tuple()), h_a.end());
//h_b.erase(thrust::get<1>(new_end.get_iterator_tuple()), h_b.end());
//h_c.erase(thrust::get<2>(new_end.get_iterator_tuple()), h_c.end());
h_a.resize(num_flags);
h_b.resize(num_flags);
h_c.resize(num_flags);
// print out the contents of the vectors
std::cout << "New a: " << std::endl;
for(int i = 0; i < h_a.size(); ++i)
{
std::cout << h_a[i] << " ";
}
std::cout << std::endl;
std::cout << "New b: " << std::endl;
for(int i = 0; i < h_b.size(); ++i)
{
std::cout << h_b[i] << " ";
}
std::cout << std::endl;
std::cout << "New c: " << std::endl;
for (int i = 0; i < h_c.size(); ++i)
{
std::cout << h_c[i] << " ";
}
std::cout << std::endl;
return 0;
} |
12,298 | #include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_calc_gL( int layer_id, int *l, int *s, float *z_arr, float *a_arr, float *t_arr, float *gjl ){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
if(idx >= neuron_count-1) return;
float z = z_arr[s[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
gjl[s[layer_id] + idx] = f_deriv*(a_arr[s[layer_id] + idx] - t_arr[idx]);
} |
12,299 | #include "includes.h"
static __device__ float E = 2.718281828;
__global__ void multiplyElementKernel(float *src1, float *src2, float *dst, int block_size)
{
int di = blockIdx.x * block_size + threadIdx.x;
dst[di] = src1[di] * src2[di];
} |
12,300 | #include "includes.h"
__global__ void matrixAdd_C_Kernel(float* A, float* B, float* C, size_t pitch, int width){
//compute indexes
int col = blockIdx.x * blockDim.x + threadIdx.x;
int rowWidthWithPad = pitch/sizeof(float);
if(col < width){
for (int row = 0; row < width; ++row) {
if(row < width)
C[row * rowWidthWithPad + col] = A[row * rowWidthWithPad + col] + B[row * rowWidthWithPad + col];
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.