serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,901 | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int alpha = 0;
if (idx % 2 == 0)
{
alpha = A[tid + 2];
}
if (idx % 6 == 0)
{
A[tid] = A[tid] + alpha;
}
} |
12,902 | #include <cstdio>
#include <iostream>
#include <string>
#include <vector>
#include <time.h>
#include <cuda_runtime.h>
#include <cmath>
#include <cstdlib>
#define BLOCK_SIZE 12288 //number of floats processed per SM
#define MAX_THREADS_PER_BLOCK 1024 //Limit of GTX 1080
#define BLOCK_HEIGHT 32
#define BLOCK_WIDTH 32
#define ELM_PER_THREAD 12 // BLOCK_SIZE / MAX_THREADS
using namespace std;
timespec start_time;
timespec stop_time;
void start_clock();
void stop_clock();
double get_clock_result_seconds();
void print_time_seconds(double seconds);
void PrintPartialMatrix(size_t n, float* matrix)
{
if(n < 5)
{
printf("Matrix is too small to print.\n");
return;
}
for(size_t i = 0; i < 5; ++i)
{
for(size_t j = 0; j < 5; j++)
{
printf("%0.2f\t", matrix[(i*n) + j]);
}
printf("\n");
}
}
__global__ void transpose_one_to_one(size_t n, float* input, float* output)
{
int global_col = (blockDim.x * blockIdx.x) + threadIdx.x;
int global_row = (blockDim.y * blockIdx.y) + threadIdx.y;
int i = (n * global_row) + global_col;
//printf("block(%i, %i)\tthread(%i, %i)\ti:%i\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, i);
if(i >= n*n)
return;
output[((i % n)*n) + (i / n)] = input[i];
}
__global__ void transpose_optimized(size_t n, float* input, float* output)
{
__shared__ float s_data[BLOCK_SIZE];
//unsigned int global_col = ((blockDim.x * blockIdx.x) * ELM_PER_THREAD) + (threadIdx.x * ELM_PER_THREAD);
//unsigned int global_row = (blockDim.y * blockIdx.y) + threadIdx.y;
//unsigned int i = (n * (global_row)) + (global_col);
unsigned int i = (n * ((blockDim.y * blockIdx.y) + threadIdx.y)) + (((blockDim.x * blockIdx.x) * ELM_PER_THREAD) + (threadIdx.x * ELM_PER_THREAD));
unsigned int block_level_index = ((threadIdx.y * blockDim.x) + threadIdx.x) * ELM_PER_THREAD;
unsigned int start = i + block_level_index;
unsigned int stop = start + ELM_PER_THREAD;
int s_idx = block_level_index;
for(unsigned int i = start; i < stop; i++, s_idx++)
{
if(i >= n*n)
break;
s_data[s_idx] = input[i];
}
__syncthreads();
s_idx = block_level_index;
for(int i = start; i < stop; i++, s_idx++)
{
if(i >= n*n)
break;
output[((i % n)*n) + (i / n)] = s_data[s_idx];
}
}
int main(int argc, char** argv)
{
if (argc < 2)
{
printf("Not enough arguments\n");
printf("Usage is ./a.out [matrix dim]\n");
return 1;
}
string dimension_arg = argv[1];
size_t N = 0;
try{
N = strtoul(dimension_arg.c_str(), NULL, 10);
}
catch(...){
printf("Matrix dimension argument %s is not valid\n", dimension_arg.c_str());
return 2;
}
size_t matrix_size = N*N*sizeof(float);
float* d_input_matrix;
float* d_resultant_matrix;
cudaMalloc((void **)&d_input_matrix, matrix_size);
cudaMalloc((void **)&d_resultant_matrix, matrix_size);
float* h_input_matrix = new float[N*N];
float* h_resultant_matrix_1 = new float[N*N];
float* h_resultant_matrix_2 = new float[N*N];
srand(time(NULL));
for( int i = 0; i < N*N; i++)
h_input_matrix[i] = (float)rand() / (float)RAND_MAX;
//h_input_matrix[i] = 0.11;
cudaMemcpy(d_input_matrix, h_input_matrix, matrix_size, cudaMemcpyHostToDevice);
dim3 block(BLOCK_WIDTH, BLOCK_HEIGHT);
size_t grid_width = N / BLOCK_WIDTH;
grid_width += N % BLOCK_WIDTH > 0 ? 1 : 0;
size_t grid_height = N / (BLOCK_HEIGHT);
grid_height += N % BLOCK_HEIGHT > 0 ? 1 : 0;
dim3 grid(grid_width, grid_height);
printf("grid(%lu, %lu)\n", grid_width, grid_height);
start_clock();
transpose_one_to_one<<<grid, block>>>(N, d_input_matrix, d_resultant_matrix);
cudaDeviceSynchronize();
stop_clock();
printf("naive time:\t");
print_time_seconds(get_clock_result_seconds());
printf("\n\n");
cudaMemcpy(h_resultant_matrix_1, d_resultant_matrix, matrix_size, cudaMemcpyDeviceToHost);
block = dim3(BLOCK_WIDTH, BLOCK_HEIGHT);
grid_width = N / (BLOCK_WIDTH * ELM_PER_THREAD);
grid_width += N % BLOCK_WIDTH > 0 ? 1 : 0;
grid_height = N / (BLOCK_HEIGHT);
grid_height += N % BLOCK_HEIGHT > 0 ? 1 : 0;
grid = dim3(grid_width, grid_height);
printf("grid(%lu, %lu)\n", grid_width, grid_height);
start_clock();
transpose_optimized<<<grid, block>>>(N, d_input_matrix, d_resultant_matrix);
cudaDeviceSynchronize();
stop_clock();
printf("optimized time:\t");
print_time_seconds(get_clock_result_seconds());
printf("\n");
cudaMemcpy(h_resultant_matrix_2, d_resultant_matrix, matrix_size, cudaMemcpyDeviceToHost);
if (memcmp(h_resultant_matrix_1, h_resultant_matrix_2, matrix_size) != 0)
{
printf("Results DO NOT match!\n");\
for(size_t i = 0; i < matrix_size/sizeof(float); i++)
{
if(h_resultant_matrix_1[i] != h_resultant_matrix_2[i])
{
printf("index %lu doesn't match\n", i);
printf("--1: %0.2f\t2: %0.2f--\n", h_resultant_matrix_1[i], h_resultant_matrix_2[i]);
printf("Input:\n");
PrintPartialMatrix(N, h_input_matrix);
printf("\nOutput 1:\n");
PrintPartialMatrix(N, h_resultant_matrix_1);
printf("\nOutput 2:\n");
PrintPartialMatrix(N, h_resultant_matrix_2);
break;
}
}
}
else
{
printf("Results match\n");
}
cudaFree(d_input_matrix);
cudaFree(d_resultant_matrix);
delete[] h_input_matrix;
delete[] h_resultant_matrix_1;
delete[] h_resultant_matrix_2;
return 0;
}
void start_clock()
{
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &start_time);
}
void stop_clock()
{
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &stop_time);
}
double get_clock_result_seconds()
{
double result = stop_time.tv_sec - start_time.tv_sec;
result += (double)(stop_time.tv_nsec - start_time.tv_nsec) / 1000000000;
return result;
}
void print_time_seconds(double seconds)
{
#ifdef _WIN32
printf("%0.3f seconds", seconds);
#elif __linux__
printf("%0.9f seconds", seconds);
#endif
}
|
12,903 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
//Global Variable Declaration
float* p;
//Memory Allocation Function
void memAlloc(float **data_ptr, int dim_x, int dim_y)
{
float *data;
data = (float *) malloc(sizeof(float *) * dim_x * dim_y);
*data_ptr = data;
}
//void cleanp()
//{
//if(p)
// free(p);
//}
/* ----------------------------------------------------
main method for Cholesky decomposition.
input n size of matrix
input/output a Symmetric positive def. matrix
output p vector of resulting diag of a
----------------------------------------------------- */
int choldc1(int n, float* a)
{
int i,j,k;
float sum;
for (i = 0; i < n; i++)
{
for (j = i; j < n; j++)
{
sum = a[i * n + j];
for (k = i - 1; k >= 0; k--)
{
sum -= a[i * n + k] * a[j * n + k];
}
if (i == j)
{
if (sum <= 0)
{
printf(" S is not positive definite!\n");
return 0;
}
p[i] = sqrt(sum);
}
else
{
a[j * n + i] = sum / p[i];
}
}
}
return 1;
}
/* -----------------------------------------------------
Inverse of Cholesky decomposition.
input n size of matrix
input A Symmetric positive def. matrix
output a inverse of lower deomposed matrix
uses choldc1(int,MAT,VEC)
----------------------------------------------------- */
int choldcsl(int n, float* A, float* a)
{
int i,j,k; float sum;
int success;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
a[i * n + j] = A[i * n + j];
success = choldc1(n, a);
if (success == 0)
return 0;
for (i = 0; i < n; i++)
{
a[i * n + i] = 1 / p[i];
for (j = i + 1; j < n; j++)
{
sum = 0;
for (k = i; k < j; k++)
{
sum -= a[j * n + k] * a[k * n + i];
}
a[j * n + i] = sum / p[j];
}
}
return 1;
}
/* ---------------------------------------------------
Matrix inverse using Cholesky decomposition
input n size of matrix
input A Symmetric positive def. matrix
output a inverse of A
uses choldc1(MAT, VEC)
--------------------------------------------------- */
int inverse(int n, float* A, float* a)
{
int i,j,k,success;
//temp memory allocation for p
memAlloc(&p,n,n);
//printf("\n memory allocation done \n");
success = choldcsl(n,A,a);
if( success == 0)
{
//cleanp();
return 0;
}
for (i = 0; i < n; i++)
{
for (j = i + 1; j < n; j++)
{
a[i * n + j] = 0.0;
}
}
for (i = 0; i < n; i++)
{
a[i * n + i] *= a[i * n + i];
for (k = i + 1; k < n; k++)
{
a[i * n + i] += a[k * n + i] * a[k * n + i];
}
for (j = i + 1; j < n; j++)
{
for (k = j; k < n; k++)
{
a[i * n + j] += a[k * n + i] * a[k * n + j];
}
}
}
for (i = 0; i < n; i++)
{
for (j = 0; j < i; j++)
{
a[i * n + j] = a[j * n + i];
}
}
//cleanp();
return 1;
}
//Inversion Complete
//Other Matrix operations
//Addition
void add(float* C, float* A, float* B, int h, int w)
{
int i,j;
for (i = 0; i < h; i++)
{
for(j = 0; j < w ;j++)
{
C[i * w +j] = A[i * w + j] + B[i * w + j];
}
}
}
//subtraction
void sub(float* C, float* A, float* B, int h, int w)
{
int i,j;
for (i = 0; i < h; i++)
{
for(j = 0; j < w ;j++)
{
C[i * w + j] = A[i * w + j] - B[i * w + j];
}
}
}
//Multiplication
void mult(float* C, float* A, float* B, int ha, int wa, int hb, int wb)
{
int i,j,k;
float sum,a = 0,b = 0;
for (i = 0; i < ha; i++)
{
for(j = 0; j < wb ;j++)
{
sum = 0;
for(k = 0; k < wa; k++)
{
a = A[i * wa + k];
b = B[k * wb + j];
sum += a * b;
}
C[i * wb + j] = sum;
}
}
}
//Transpose
void transpose(float* B, float* A,int h, int w)
{
int i,j;
for (i = 0; i < h; i++)
{
for(j = 0; j < w ;j++)
{
B[j * h + i] = A[i * w + j];
}
}
}
//print the matrix
void matPrint(float *A, int h, int w)
{
int i,j;
for(i = 0;i < h;i++)
{
for(j = 0;j < w;j++)
{
printf("%f ", A[i * w + j]);
}
printf("\n");}
}
//Matrix Copy
void matcopy(float *B, float *A, int h, int w)
{
int i;
for(i = 0;i < (h*w);i++)
B[i] = A[i];
}
// generating L
void generateL(float *L, int n)
{
int i,j;
srand(1);
for (i = 0; i < n; i++)
{
for(j = 0; j < n ;j++)
{
if(j <= i)
L[i*n + j]= (rand() % 10) + 1;
else
L[i*n + j] = 0;
}
}
}
//Random Initialize
void RandomInit(float* data, int n1, int n2)
{
srand(1);
for (int i = 0; i < (n1*n2); ++i)
data[i] = (rand() % 10) + 1;
}
//Ideintity Matrix Generation
void Identity(float *data, int n)
{
for (int i = 0; i < (n*n); i=i+1)
{
if((i%(n+1))==0)
data[i] = 1;
else
data[i] = 0;
}
}
void Initialize(float *X,float *P,float *F,float *Z,float *H,float *E,float *I,float *Ht,float *Ft,float *s, int ns, int no)
{
RandomInit(X, ns, 1);
RandomInit(Z, no, 1);
RandomInit(H, no, ns);
transpose(Ht,H,no,ns);
//printf("\n Transpose of H successful\n");
float *P1;
float *P2;
memAlloc(&P1,ns,ns);
memAlloc(&P2,ns,ns);
generateL(P1,ns);
transpose(P2,P1,ns,ns);
mult(P,P1,P2,ns,ns,ns,ns);
if(P1)
free(P1);
if(P2)
free(P2);
float *F1;
float *F2;
memAlloc(&F1,ns,ns);
memAlloc(&F2,ns,ns);
generateL(F1,ns);
transpose(F2,F1,ns,ns);
mult(F,F1,F2,ns,ns,ns,ns);
if(F1)
free(F1);
if(F2)
free(F2);
transpose(Ft,F,ns,ns);
//printf("\n Transpose of F successful\n");
float *E1;
float *E2;
memAlloc(&E1,no,no);
memAlloc(&E2,no,no);
generateL(E1,no);
transpose(E2,E1,no,no);
mult(E,E1,E2,no,no,no,no);
if(E1)
free(E1);
if(E2)
free(E2);
float *s1;
float *s2;
memAlloc(&s1,no,no);
memAlloc(&s2,no,no);
generateL(s1,no);
transpose(s2,s1,no,no);
mult(s,s1,s2,no,no,no,no);
if(s1)
free(s1);
if(s2)
free(s2);
Identity(I, ns);
}
|
12,904 | #include<cmath>
#include<cstdio>
#define M 2
#define N 2
#define K 2
#define n 4
__global__
void multiply(int*A,int*B,int*C)
{
int k,i,j,tmp;
for( i=0;i<M*K;i++)
{ printf("%d\n",A[i]);}
int I=blockIdx.x*blockDim.x+threadIdx.x;
int J=blockIdx.y*blockDim.y+threadIdx.y;
if( I < M || j < N)
{
for( k=0;k<K;k++){
C[I*N+J]+= A[I*K+k]*B[k*N+J];
}
}
for (i = 0; i < M*N; i++)
{ printf("[%d] =%d\n",i, C[i]);
}
}
int main(){
int A[M][K]={{1,2},{3,1}};
int B[K][N]={{2,4},{5,2}};
int C[M][N]={{0,0},{0,0}};
int* d_A;int* d_B;int* d_C;
cudaMalloc(&d_A,n* sizeof(int));//let memory store that m*n space for you of size ints
cudaMalloc(&d_B,n* sizeof(int));
cudaMalloc(&d_C,n* sizeof(int));
//copy Aand B FROM HOST TO DEVICE
cudaMemcpy(d_A, &A[0],n* sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_B, &B[0],n *sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_C, &C[0],n*sizeof(int) , cudaMemcpyHostToDevice);
multiply<<<1,1>>>(d_A,d_B,d_C);
//COPY RESULT BACK TO HOST
cudaMemcpy(&C[0], d_C,n* sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d", C[0]);
cudaFree(A);//TO FREE MEMORY
cudaFree(B);
cudaFree(C);
}
|
12,905 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
int *IntArray(int length, int first = 0, int step = 0)
{
int *av = (int *)malloc(sizeof(int) * length);
for (int i = 0; i < length; i++)
{
av[i] = first + step * i;
}
return av;
}
bool CompIntArrays(int *a, int *b, int length)
{
for (int i = 0; i < length; i++)
{
if (a[i] != b[i]) return false;
}
return true;
}
bool CompFloatArrays(float *a, float *b, int length)
{
for (int i = 0; i < length; i++)
{
if (a[i] != b[i]) return false;
}
return true;
}
void PrintFloatArray(float *aa, int width, int length)
{
for (int i = 0; i < length; i++) {
printf("%3.3f ", aa[i]);
if ((i>0) && ((i + 1) % width == 0)) printf("\n");
}
printf("\n");
}
void PrintIntArray(int *aa, int width, int length)
{
for (int i = 0; i < length; i++) {
printf("%d ", aa[i]);
if ((i>0) && ((i + 1) % width == 0)) printf("\n");
}
printf("\n");
}
void PrintUintArray(unsigned int *aa, int width, int length)
{
for (int i = 0; i < length; i++) {
printf("%d ", aa[i]);
if ((i>0) && ((i + 1) % width == 0)) printf("\n");
}
printf("\n");
}
float *RndFloat0to1(int arraySize)
{
float *temp = (float*)malloc(arraySize * sizeof(float));
for (int i = 0; i<arraySize; i++) {
temp[i] = (float)rand() / (float)(RAND_MAX);
}
return temp;
}
unsigned int *RndInts(int arraySize)
{
unsigned int *temp = (unsigned int*)malloc(arraySize * sizeof(int));
for (int i = 0; i<arraySize; i++) {
temp[i] = rand();
}
return temp;
}
int *Rnd0or1(int arraySize, float fracOnes)
{
int *temp = (int*)malloc(arraySize * sizeof(float));
for (int i = 0; i<arraySize; i++) {
float fv = (float)rand() / (float)(RAND_MAX);
temp[i] = fv < fracOnes ? 1 : 0;
}
return temp;
}
int *Rnd_m1or1(int arraySize, float fracOnes)
{
int *temp = (int*)malloc(arraySize * sizeof(float));
for (int i = 0; i<arraySize; i++) {
float fv = (float)rand() / (float)(RAND_MAX);
temp[i] = fv < fracOnes ? 1 : -1;
}
return temp;
}
unsigned int SqrtPow2Lb(unsigned int rhs)
{
unsigned int fRet = 1;
unsigned int nv = fRet;
while (true)
{
nv = fRet * 2;
if (nv * nv > rhs)
{
return fRet;
}
fRet = nv;
}
}
float *LeftRightGradient(unsigned int span, float low_val, float high_val)
{
float delta = (high_val - low_val) / (span / 2.0f);
unsigned int hs = span / 2;
float *outputs = (float*)malloc(span * span * sizeof(float));
for (int i = 0; i < span; i++)
{
for (int j = 0; j < hs; j++)
{
int index = i * span + j;
outputs[index] = high_val - j * delta;
}
for (int j = hs; j < span; j++)
{
int index = i * span + j;
outputs[index] = low_val + (j - hs) * delta;
}
}
return outputs;
}
|
12,906 | #include <iostream>
#include <math.h>
#include <chrono>
#include <ctime>
#include <limits.h>
using namespace std;
__global__
void cadd(int n, float*x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+= stride){
y[i] = x[i] + y[i];
}
}
float* add(int n, float *x, float *y){
for(int i = 0; i < n; i++){
y[i] = x[i] + y[i];
}
return y;
}
int main(void){
int N = 1<<20;//INT_MAX/10;
float *x = new float[N];
float *y = new float[N];
for(int i =0; i < N; i++){
x[i] = i+1.0f;
y[i] = i + 2.0f;
}
//CPU
auto start = chrono::system_clock::now();
float *z = add(N,x,y);
auto end = chrono::system_clock::now();
chrono::duration<double> timevar = end-start;
std::cout << "Time to add: " << timevar.count() << std::endl;
// for(int i = 0; i < sizeof(*z); i ++){
// std::cout << x[i] << " + " << y[i] << " = " << z[i] << endl;
// }
delete [] x;
delete [] y;
//GPU
auto cstart = chrono::system_clock::now();
float *cx, *cy;
cudaMallocManaged(&cx, N*sizeof(float));
cudaMallocManaged(&cy, N*sizeof(float));
for(int i =0; i <N;i++){
cx[i] = i + 1.0f;
cy[i] = i + 2.0f;
}
int blocksize = 256;
int numBlocks = (N + blocksize -1)/blocksize;
cadd<<<numBlocks, blocksize>>>(N,x,y);
cudaDeviceSynchronize();
auto cend = chrono::system_clock::now();
chrono::duration<double> ctimevar = cend-cstart;
cout << "time to complete: " << ctimevar.count() << " seconds" << endl;
// for(int i =0; i < sizeof(*z); i++){
// cout << cx[i] << " + " << cy[i] << " = " << cz[i] << endl;
// }
cudaFree(x);
cudaFree(y);
return 0;
}
|
12,907 | #include <stdio.h>
#include <math.h>
#include <stdlib.h> // drand48
#include <time.h>
//#define DUMP
__global__ void MoveParticles(const int nParticles,float* x, float* y, float* z, float* vx ,
float* vy, float* vz, const float dt) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Loop over particles that experience force
float Fx = 0, Fy = 0, Fz = 0;
// Components of the gravity force on particle i
// Loop over positions that exert force
for (int j = 0; j < nParticles; j++) {
// No self interaction
if (i != j) {
// Avoid singularity and interaction with self
const float softening = 1e-20;
// Newton's law of universal gravity
const float dx = x[j] - x[i];
const float dy = y[j] - y[i];
const float dz = z[j] - z[i];
const float drSquared = dx*dx + dy*dy + dz*dz + softening;
const float drPower32 = powf(drSquared, 3.0/2.0);
// Calculate the net force
Fx += dx / drPower32;
Fy += dy / drPower32;
Fz += dz / drPower32;
}
}
// Accelerate particles in response to the gravitational force
vx[i] += dt*Fx;
vy[i] += dt*Fy;
vz[i] += dt*Fz;
// Move particles according to their velocities
// O(N) work, so using a serial loop
//#pragma acc parallel loop
x[i] += vx[i]*dt;
y[i] += vy[i]*dt;
z[i] += vz[i]*dt;
}
void dump(int iter, int nParticles, float* x, float* y ,float* z)
{
char filename[64];
snprintf(filename, 64, "output_cuda_%d.txt", iter);
FILE *f;
f = fopen(filename, "w+");
int i;
for (i = 0; i < nParticles; i++)
{
fprintf(f, "%e %e %e\n",
x[i], y[i], z[i]);
}
fclose(f);
}
int main(const int argc, const char** argv)
{
// Problem size and other parameters
const int nParticles = (argc > 1 ? atoi(argv[1]) : 16384);
// Duration of test
const int nSteps = (argc > 2)?atoi(argv[2]):10;
// Particle propagation time step
const float dt = 0.0005f;
float* x = (float*)malloc(nParticles*sizeof(float));
float* y = (float*)malloc(nParticles*sizeof(float));
float* z = (float*)malloc(nParticles*sizeof(float));
float* vx = (float*)malloc(nParticles*sizeof(float));
float* vy = (float*)malloc(nParticles*sizeof(float));
float* vz = (float*)malloc(nParticles*sizeof(float));
// Initialize random number generator and particles
srand48(0x2020);
int i;
for (i = 0; i < nParticles; i++)
{
x[i] = 2.0*drand48() - 1.0;
y[i] = 2.0*drand48() - 1.0;
z[i] = 2.0*drand48() - 1.0;
vx[i] = 2.0*drand48() - 1.0;
vy[i] = 2.0*drand48() - 1.0;
vz[i] = 2.0*drand48() - 1.0;
}
// Perform benchmark
printf("\nPropagating %d particles using 1 thread...\n\n",
nParticles
);
float rate = 0, dRate = 0; // Benchmarking data
const int skipSteps = 3; // Skip first iteration (warm-up)
printf("\033[1m%5s %10s %10s %8s\033[0m\n", "Step", "Time, s", "Interact/s", "GFLOP/s"); fflush(stdout);
for (int step = 1; step <= nSteps; step++) {
float *d_x,*d_y,*d_z,*d_vx,*d_vy,*d_vz ;
size_t size = nParticles*sizeof(float);
cudaMalloc(&d_x, size);cudaMalloc(&d_y, size); cudaMalloc(&d_z, size);
cudaMalloc(&d_vx, size);cudaMalloc(&d_vy, size); cudaMalloc(&d_vz, size);
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_z, z, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_vx, vx, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_vy, vy, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_vz , vz, size, cudaMemcpyHostToDevice);
int threadPerBlocs = 256;
/* Ceil */
int blocksPerGrid = (nParticles + threadPerBlocs - 1) / threadPerBlocs;
clock_t tStart = clock(); // Start timing
MoveParticles<<< blocksPerGrid, threadPerBlocs >>>(nParticles,d_x,d_y,d_z,d_vx,d_vy,d_vz, dt);
clock_t tEnd = clock(); // End timing
float time_spent = (tStart - tEnd)/ CLOCKS_PER_SEC;
cudaMemcpy(x, d_x, size, cudaMemcpyDeviceToHost);
cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost);
cudaMemcpy(z, d_z, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vx, d_vx, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vy, d_vy, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vz , d_vz, size, cudaMemcpyDeviceToHost);
cudaFree(d_x); cudaFree(d_y);cudaFree(d_z);
cudaFree(d_vx); cudaFree(d_vy);cudaFree(d_vz);
const float HztoInts = ((float)nParticles)*((float)(nParticles-1)) ;
const float HztoGFLOPs = 20.0*1e-9*((float)(nParticles))*((float)(nParticles-1));
if (step > skipSteps) { // Collect statistics
rate += HztoGFLOPs/(time_spent);
dRate += HztoGFLOPs*HztoGFLOPs/((time_spent)*(time_spent));
}
printf("%5d %10.3e %10.3e %8.1f %s\n",
step, (time_spent), HztoInts/(time_spent), HztoGFLOPs/(time_spent), (step<=skipSteps?"*":""));
fflush(stdout);
#ifdef DUMP
dump(step, nParticles, x,y,z);
#endif
}
rate/=(float)(nSteps-skipSteps);
dRate=sqrt(dRate/(float)(nSteps-skipSteps)-rate*rate);
printf("-----------------------------------------------------\n");
printf("\033[1m%s %4s \033[42m%10.1f +- %.1f GFLOP/s\033[0m\n",
"Average performance:", "", rate, dRate);
printf("-----------------------------------------------------\n");
printf("* - warm-up, not included in average\n\n");
free(x);free(y);free(z);
free(vx);free(vz);free(vz);
return 0;
}
|
12,908 | #ifdef FAST_NOISE
extern "C" __global__ void kFastNoise1_kernel( int numAtoms, int paddedNumAtoms, int numModes, float kT, float4 *noiseVal, float4 *velm, float4 *modes, float *modeWeights, const float4 *__restrict__ random, unsigned int randomIndex, float maxEigenvalue, float stepSize ) {
extern __shared__ float dotBuffer[];
const float val = stepSize / 0.002;
const float noisescale = sqrt( 2 * kT * 1.0f / maxEigenvalue );
for( int mode = blockIdx.x; mode < numModes; mode += gridDim.x ) {
float dot = 0.0f;
unsigned int seed = 100;
for( int atom = threadIdx.x; atom < numAtoms; atom += blockDim.x ) {
const float4 n = random[randomIndex + blockIdx.x * blockDim.x + threadIdx.x];
const float4 randomNoise = make_float4( n.x * noisescale, n.y * noisescale, n.z * noisescale, n.w * noisescale );
noiseVal[atom] = randomNoise;
float4 m = modes[mode * numAtoms + atom];
dot += randomNoise.x * m.x + randomNoise.y * m.y + randomNoise.z * m.z;
}
dotBuffer[threadIdx.x] = dot;
__syncthreads();
if( threadIdx.x == 0 ) {
float sum = 0;
for( int i = 0; i < blockDim.x; i++ ) {
sum += dotBuffer[i];
}
modeWeights[mode] = sum;
}
}
}
extern "C" __global__ void kFastNoise2_kernel( int numAtoms, int numModes, float4 *posq, float4 *noiseVal, float4 *velm, float4 *modes, float *modeWeights ) {
/* Load the weights into shared memory.*/
extern __shared__ float weightBuffer[];
for( int mode = threadIdx.x; mode < numModes; mode += blockDim.x ) {
weightBuffer[mode] = modeWeights[mode];
}
__syncthreads();
/* Compute the projected forces and update the atom positions.*/
for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) {
const float invMass = velm[atom].w, sqrtInvMass = sqrt( invMass );
float3 r = make_float3( 0.0f, 0.0f, 0.0f );
for( int mode = 0; mode < numModes; mode++ ) {
float4 m = modes[mode * numAtoms + atom];
float weight = weightBuffer[mode];
r.x += m.x * weight;
r.y += m.y * weight;
r.z += m.z * weight;
}
noiseVal[atom] = make_float4( noiseVal[atom].x - r.x, noiseVal[atom].y - r.y, noiseVal[atom].z - r.z, 0.0f );
noiseVal[atom].x *= sqrtInvMass;
noiseVal[atom].y *= sqrtInvMass;
noiseVal[atom].z *= sqrtInvMass;
float4 pos = posq[atom];
pos.x += noiseVal[atom].x;
pos.y += noiseVal[atom].y;
pos.z += noiseVal[atom].z;
posq[atom] = pos;
}
}
#endif
|
12,909 | #include <iostream>
#include <stdio.h>
#include <time.h>
#define LENGTH 1000
#define T_WIDTH 16
using namespace std;
__global__ void matmul_shared(float *a, float *b, float *c, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
__shared__ float s_a[T_WIDTH][T_WIDTH];
__shared__ float s_b[T_WIDTH][T_WIDTH];
int row = ty + by*blockDim.y;
int col = tx + bx*blockDim.x;
float result = 0;
for(int k = 0; k < width/T_WIDTH; ++k)
{
s_a[ty][tx] = a[row*width + (k*T_WIDTH + tx)];
s_b[ty][tx] = b[(k*T_WIDTH + ty)*width + col];
__syncthreads();
for(int p = 0; p < T_WIDTH; ++p){
result += s_a[ty][p] * s_b[p][tx];
}
__syncthreads();
}
c[row*width + col] = result;
}
__global__ void matmul(float *a, float *b, float *c, int width)
{
float result = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for(int k=0; k<width; k++){
result += a[row*width + k]*b[k*width + col];
}
c[row*width + col] = result;
}
int main(){
static float a_vec[LENGTH][LENGTH];
static float b_vec[LENGTH][LENGTH];
/* static float h_c[LENGTH][LENGTH]; */
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
h_a = (float*)malloc(LENGTH*LENGTH*sizeof(float));
h_b = (float*)malloc(LENGTH*LENGTH*sizeof(float));
h_c = (float*)malloc(LENGTH*LENGTH*sizeof(float));
for(int i=0 ; i< LENGTH; i++)
{
for(int j=0;j<LENGTH;j++){
a_vec[i][j] = 1;
b_vec[i][j] = 1;
}
}
int k = 0;
for(int i=0;i<LENGTH;i++)
{
for(int j=0;j<LENGTH;j++)
{
h_a[k] = a_vec[i][j];
h_b[k] = b_vec[i][j];
k+=1;
}
}
cudaMalloc((void**)&d_a, LENGTH*LENGTH*sizeof(float));
cudaMalloc((void**)&d_b, LENGTH*LENGTH*sizeof(float));
cudaMalloc((void**)&d_c, LENGTH*LENGTH*sizeof(float));
cudaMemcpy(d_a, h_a, LENGTH*LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, LENGTH*LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
dim3 dimBlock1(16, 16, 1);
dim3 dimGrid1(LENGTH/16, LENGTH/16, 1);
matmul<<<dimGrid1, dimBlock1>>>(d_a, d_b, d_c, LENGTH);
/* matmul<<<LENGTH*LENGTH/256, 256>>>(d_a, d_b, d_c, LENGTH); */
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// free the memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Time taken (normal) in ms: " << milliseconds << std::endl;
cudaMalloc((void**)&d_a, LENGTH*LENGTH*sizeof(float));
cudaMalloc((void**)&d_b, LENGTH*LENGTH*sizeof(float));
cudaMalloc((void**)&d_c, LENGTH*LENGTH*sizeof(float));
cudaMemcpy(d_a, h_a, LENGTH*LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, LENGTH*LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
dim3 threads(16, 16, 1);
dim3 blocks(LENGTH/16, LENGTH/16, 1);
matmul_shared<<<blocks, threads>>>(d_a, d_b, d_c, LENGTH);
/* matmul_shared<<<LENGTH*LENGTH/100, 100>>>(d_a, d_b, d_c, LENGTH); */
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
/* cudaMemcpy(h_c, d_c, LENGTH*LENGTH*sizeof(float), cudaMemcpyDeviceToHost); */
/* for(int i=11000;i<11100;i++){ */
/* std::cout<<h_c[i]<<std::endl; */
/* } */
// free the memory
/* cudaFree(d_a); */
/* cudaFree(d_b); */
/* cudaFree(d_c); */
/* free(milliseconds); */
float millisecond = 0;
cudaEventElapsedTime(&millisecond, start, stop);
std::cout << "Time taken (shared memory) in ms: " <<fixed<<millisecond << std::endl;
}
|
12,910 | #include "includes.h"
__global__ void d_bucketsort(unsigned int * d_in, unsigned int * d_indices, unsigned int * d_sublist, unsigned int * r_outputlist, unsigned int * d_bucketoffsets, int itemCount) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < itemCount) {
int newpos = d_bucketoffsets[d_sublist[idx]] + d_indices[idx];
r_outputlist[newpos] = d_in[idx];
}
} |
12,911 | #include "includes.h"
__global__ void complexmult_conj_kernal(float *afft, const float *bfft, int totaltc)
{
const uint ridx = 2*(threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS);
//maybe use float2 to improve coalessing....
if (ridx < totaltc){
const uint iidx = ridx + 1;
float afftr = afft[ridx];
float affti = afft[iidx];
float bfftr = bfft[ridx];
float bffti = bfft[iidx];
afft[ridx] = afftr*bfftr + affti*bffti; //real portion
afft[iidx] = affti*bfftr - afftr*bffti; //imaginary portion
}
} |
12,912 | #include <stdio.h>
// these are just for timing measurments
#include <time.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int block_size = 4; // CUDA maximum is 1024 *total* threads in block
//const float A_val = 3.0f;
//const float B_val = 2.0f;
// matrix multiply (naive) kernel: C = A * B
__global__ void mmul(const float *A, const float *B, float *C, int r1,int c1, int r2, int c2) {
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < c2) && (idy < c1)){
float temp = 0;
for (int i = 0; i < c1; i++)
temp += A[idy*c1+i] * B[i*c2+idx]; // dot product of row and column
C[idy*c2+idx] = temp;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// start timing
t0 = clock();
//getting matrix from user
int r1,c1,r2,c2;
printf("Enter the row of 1st Matrix: ");
scanf("%d",&r1);
printf("Enter the column of 1st Matrix: ");
scanf("%d",&c1);
printf("Enter the row of 2st Matrix: ");
scanf("%d",&r2);
printf("Enter the column of 2st Matrix: ");
scanf("%d",&c2);
if (c1!=r2){
printf("Invalid Matrix");
}
h_A = new float[r1*c1];
h_B = new float[r2*c2];
h_C = new float[r1*c2];
FILE* matrixA;
matrixA = fopen("A.txt","r");
if(matrixA==NULL){
printf("Matrix A did not open \n");
return 0 ;
}
int i =0;
while(fscanf(matrixA,"%f", &h_A[i] )!= EOF){
i++;
}
FILE* matrixB;
matrixB = fopen("B.txt","r");
if(matrixA==NULL){
printf("Matrix B did not open \n");
return 0;
}
i =0;
while(fscanf(matrixB,"%f",&h_B[i] )!= EOF){
i++;
}
//Printing values on screen for degugg
/*
for (int i = 0; i < r1*c1; i++){
printf("%.1f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < r2*c2; i++){
printf("%.1f ", h_B[i]);
}
*/
//If values was assigned in the program and not through input files
/*
for (int i = 0; i < r1*c1; i++){
h_A[i] = A_val;
}
for (int i = 0; i < r2*c2; i++){
h_B[i] = B_val;
}
for (int i = 0; i < r1*c2; i++){
h_C[i] = 0;}
*/
// Initialization timing
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// Allocate device memory and copy input data over to GPU
cudaMalloc(&d_A, r1*c1*sizeof(float));
cudaMalloc(&d_B, r2*c2*sizeof(float));
cudaMalloc(&d_C, r1*c2*sizeof(float));
cudaCheckErrors("cudaMalloc failure");
cudaMemcpy(d_A, h_A, r1*c1*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, r2*c2*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D failure");
// Cuda processing sequence step 1 is complete
// Launch kernel
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid(((c1+r2)/2+block.x-1)/block.x, ((c1+r2)/2+block.y-1)/block.y);
mmul<<<grid, block>>>(d_A, d_B, d_C, r1,c1,r2,c2 );
cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
cudaMemcpy(h_C, d_C, r1*c2*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < r1*c1; i++){
printf("%.1f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < r2*c2; i++){
printf("%.1f ", h_B[i]);
}
printf("\n");
// Verify results
for (int i = 0; i < r1*c2; i++){
printf("%.1f ", h_C[i]);
}
FILE* matrixC;
matrixC = fopen("C.txt","w");
if(matrixC==NULL){
printf("Matrix A did not open \n");
return 0 ;
}
for(int i =0; i < r1*c2; i++){
fprintf(matrixC, "%.1f ",h_C[i]);
}
fclose(matrixA);
fclose(matrixB);
fclose(matrixC);
// GPU timing
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
// Cuda processing sequence step 3 is complete
// Verify results
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
return 0;
}
|
12,913 | #include "includes.h"
__global__ void symmetrize2D( float *h, int natoms ) {
const int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
const int dof = 3 * natoms;
if( elementNum >= dof * dof ) {
return;
}
int r = elementNum / dof;
int c = elementNum % dof;
if( r > c ) {
return;
} else {
const float avg = 0.5 * ( h[r * dof + c] + h[c * dof + r] );
h[r * dof + c] = avg;
h[c * dof + r] = avg;
}
} |
12,914 | #include "includes.h"
// CUDA runtime
// includes
extern "C"
{
}
#define MEMSIZE 30
/* Function computing the final string to print */
__global__ void kern_compute_string(char *res, char *a, char *b, char *c, int length)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < length)
{
res[i] = a[i] + b[i] + c[i];
}
} |
12,915 | #include "includes.h"
__global__ void gamma_norm_kernel(float* img, int image_height, int image_width, int image_step)
{
// The thread block has size (3,n). The first dimension of the thread block
// corresponds to color channels.
int channel = threadIdx.x;
// The columns of the image are mapped to the first dimension of the block
// grid, but to the second dimension of the thread block, as the first
// already corresponds to color channels.
int pixel_x = blockIdx.x * blockDim.y + threadIdx.y;
// If current position is outside the image, stop here
if(pixel_x >= image_width)
{
return;
}
// The columns of the image are mapped to the second dimension of the block
// grid, but to the third dimension of the thread block.
int pixel_y = blockIdx.y * blockDim.z + threadIdx.z;
// If current position is outside the image, stop here
if(pixel_y >= image_height)
{
return;
}
// Each row has image_step pixels and each pixel has three channels
int in_pixel_idx = pixel_y * image_step + pixel_x * 3 + channel;
// Finally perform the normalization
img[in_pixel_idx] = sqrt(img[in_pixel_idx] / 256.0f);
} |
12,916 | #include "includes.h"
__device__ double2 make_complex(double in, int evolution_type){
double2 result;
switch(evolution_type){
// No change
case 0:
result.x = in;
result.y = 0;
break;
// Im. Time evolution
case 1:
result.x = exp(-in);
result.y = 0;
break;
// Real Time evolution
case 2:
result.x = cos(-in);
result.y = sin(-in);
break;
}
return result;
}
__global__ void make_complex_kernel(double *in, int *evolution_type, double2 *out){
//int id = threadIdx.x + blockIdx.x*blockDim.x;
//out[id] = make_complex(in[id], evolution_type[id]);
for (int i = 0; i < 3; ++i){
out[i] = make_complex(in[i], evolution_type[i]);
}
} |
12,917 | #include "includes.h"
__global__ void _reluforw(int n, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] < 0) y[i] = 0;
i += blockDim.x * gridDim.x;
}
} |
12,918 | #include<stdio.h>
#include<iostream>
#include<cuda.h>
__global__ void add(int* a){ // Addition Kernel
int tid = blockIdx.x*blockDim.x+threadIdx.x;
a[tid]+=tid;
}
int main(int argc, char* argv[]){
if(argc!=3){
std::cout<<"Usage: "<<argv[0]<<" Numblocks BlockDim\n";
return 0;
}
int nBlocks= atoi(argv[1]);
int bDim = atoi(argv[2]);
if(bDim>1024){
std::cout<<"BlockDim should be less than or equal to 1024\n";
return 0;
}
int* da;
cudaMallocManaged (&da, nBlocks*bDim*sizeof(int));// Allocate CPU/GPU Memory
for(int i=0;i<nBlocks*bDim;i++) // Initalize CPU array
da[i]=i;
add<<<nBlocks,bDim>>>(da); // Call addition kernel
cudaDeviceSynchronize();
for(int i=0;i<nBlocks*bDim;i++) // Print final results
std::cout<<da[i]<<"\n";
cudaFree(da);
da=NULL;
}
|
12,919 | /**
* cuda_delta_knn.cu
* block loading delta calculation.
* system('nvcc -ptx -m 64 -arch sm_35 jrc3_cuda_rho.cu')
* J. James Jun, Flatiron Institute, 2018 July 5
*/
#include <cuda_runtime.h>
// #include "cublas_v2.h"
#include <math.h>
#define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val)
#define MIN(A,B) ((A)<(B)) ? (A) : (B)
#define MAX(A,B) ((A)>(B)) ? (A) : (B)
#define NTHREADS 128
#define NC (3*20) // number of Channels
#define CHUNK 16 //previously defined as CHUNK
#define SINGLE_INF (3.402E+38)
__global__ void cuda_delta_knn(float *D, unsigned int *N, const float *B, const float *A, const float *R_B, const float *R_A, const int *vnConst){
int nB = vnConst[0];
int nA = vnConst[1];
int nC = vnConst[2];
int tx = threadIdx.x;
int iA = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK + tx;
int nThreads = blockDim.x; // must be less than NTHREADS
__shared__ float sA[NC][CHUNK], sR_A[CHUNK]; // march through A
__shared__ float sD[NTHREADS][CHUNK]; // count then divide later
__shared__ unsigned int sN[NTHREADS][CHUNK];
// initialize
if (tx < CHUNK){
if (iA < nA){
int iA_ = tx;
for (int iC=0; iC<nC; ++iC) sA[iC][tx] = A[iC + iA*nC]; // copy A->sA
sR_A[iA_] = R_A[iA]; // copy R_A->sR_A
for (int iB_=0; iB_<nThreads; ++iB_) sD[iB_][iA_] = SINGLE_INF; // sD = inf
}
}
__syncthreads();
// Search min distance having a greater rho
for (int iB=tx; iB<nB; iB+=nThreads){
// compute distance
float dist_[CHUNK];
for (int iA_=0; iA_<CHUNK; ++iA_) dist_[iA_] = 0.0f;
for (int iC=0; iC<nC; ++iC){
float b_ = B[iC + iB * nC];
for (int iA_=0; iA_<CHUNK; ++iA_){
float d_ = b_ - sA[iC][iA_];
dist_[iA_] += (d_ * d_);
}
}
// Compare the index and distance
float rb_ = R_B[iB];
int iB_ = tx;
for (int iA_=0; iA_<CHUNK; ++iA_){
if (rb_ > sR_A[iA_] && dist_[iA_] < sD[iB_][iA_]){
sD[iB_][iA_] = dist_[iA_];
sN[iB_][iA_] = iB;
}
}
} // while
__syncthreads();
// final count
if (tx < CHUNK){
if (iA < nA){
int iA_ = tx;
float dmin_ = SINGLE_INF;
unsigned int imin_ = iA; // point to self initially
for (int iB_=0; iB_<nThreads; ++iB_){
if (sD[iB_][iA_] < dmin_){
dmin_ = sD[iB_][iA_];
imin_ = sN[iB_][iA_];
}
}
D[iA] = sqrtf(ABS(dmin_));
N[iA] = imin_ + 1; //Matlab index output
}
}
} // func |
12,920 | #include "includes.h"
using namespace std;
// https://stackoverflow.com/questions/26853363/dot-product-for-dummies-with-cuda-c
__global__ void dotCuda(float* tmp, float* t1, float* t2, int size) {
//unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
tmp[i] = t1[i] * t2[i];
__syncthreads();
int mididx = size / 2;
while (i < mididx) {
tmp[i] += tmp[i + mididx];
mididx /= 2;
__syncthreads();
}
//atomicAdd(tmp, p);
} |
12,921 | #include <stdio.h>
#include <stdlib.h>
#define N 10000
void handleError(cudaError_t error) {
if(error != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x;
if(tid < N) {
c[tid] = a[tid] + b[tid];
}
}
int main(int argc, char *argv[]) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
handleError(cudaMalloc((void **) &dev_a, sizeof(int) * N));
handleError(cudaMalloc((void **) &dev_b, sizeof(int) * N));
handleError(cudaMalloc((void **) &dev_c, sizeof(int) * N));
for(int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
}
handleError(cudaMemcpy(dev_a, a, sizeof(int) * N, cudaMemcpyHostToDevice));
handleError(cudaMemcpy(dev_b, b, sizeof(int) * N, cudaMemcpyHostToDevice));
add<<<N,1>>>(dev_a, dev_b, dev_c);
handleError(cudaMemcpy(c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost));
for(int i = 0; i < N; i++) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
12,922 | //--------------------------------------------//
//Histogram for bag of words CPU
//Authors: Hong Xu and Rajath Javali
//--------------------------------------------//
#include <stdio.h>
#include <iterator>
#include <sys/types.h>
#include <dirent.h>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <iterator>
#include <iostream>
#include <algorithm>
#include <typeinfo>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
using namespace std;
#define BLOCK_SIZE 1024
#define BLOCK_SIZE2 32
#define MAX_K 20000
typedef std::vector<std::string> stringvec;
//List of structs
struct Sift{
float* sift;
int numSift;
int totalNum;
int elementSize;
};
struct Histogram{
float* vals;
int size;
string filename;
};
//List of functions
int countlines(char *filename);
void readFromFile(int size,float *v, char *file);
Histogram createHist(Sift cent, Sift im);
Sift readSift(char *filename);
void printSift(Sift sift, char option);
void freeSift(Sift sift);
void printHisto(Histogram h, char option);
void freeHisto(Histogram h);
void printFiles(stringvec v);
void read_directory(const std::string& name, stringvec& v);
__global__ void createHistCuda (float* siftCentroids, float* siftImage, int linesCent, int linesIm,float*temp);
vector<Sift> readMultiSifts(stringvec v);
vector<Histogram> createHistogramCudaAux(Sift& siftCentroids, vector<Sift>& imSifts, stringvec& v);
vector<vector<int> > matchCudaAux(vector<Histogram>& query, vector<Histogram>& database);
__global__ void matchHistCuda(float*qSet, float*dbSet, size_t qSize, size_t dbSize, size_t hSize, float*out);
void histogram2array(float* array, vector<Histogram>& hists);
template <class T>
void arrayto2Dvec(T* array, vector<vector<T> >& vec, size_t x, size_t y);
template <class T>
void print2DVec(vector<vector<T> >& vec);
template<class T>
void print_array(T& array, int m, int n);
__global__ void rowMin(float* input, int* output, size_t rowS, size_t rowNum);
string extName(string s);
vector<vector<string> > comFinal(vector<Histogram>& query, vector<Histogram>& database, vector<vector<int> >& values, int num);
void analize(vector<vector<string> >& results);
string extRight(string s);
void freeHistograms(vector<Histogram> histos);
//---------------------------------------------//
//Main
//---------------------------------------------//
int main(int argc, char *argv[])
{
//Header
printf("\t\t###########################################\n\t\t####### Histogram Program #############\n\t\t###########################################\n\n");
//Getting query file names
stringvec vquery;
string name = "../input/querySift/";
read_directory(name, vquery);
printFiles(vquery);
//Getting database file names
stringvec vdatabase;
name = "../input/databaseSift/";
read_directory(name, vdatabase);
printFiles(vdatabase);
//Readsing Centroid sifts
Sift siftCentroids = readSift((char *)"../input/querySift/1.png.txt");
printSift(siftCentroids, 'i');
//Reads query sifts from file
std::vector<Sift> imSiftsQuery = readMultiSifts(vquery);
//Reads database sifts from file
std::vector<Sift> imSiftsDatabase = readMultiSifts(vdatabase);
//Creates query histograms CUDA
vector<Histogram> queryHistograms = createHistogramCudaAux(siftCentroids, imSiftsQuery,vquery);
//Creates database histograms CUDA
vector<Histogram> databaseHistograms = createHistogramCudaAux(siftCentroids, imSiftsDatabase,vdatabase);
//Freeing sift memory
freeSift(siftCentroids);
for(int i = 0; i < imSiftsQuery.size(); i++){
freeSift(imSiftsQuery[i]);
}
for(int i = 0; i < imSiftsDatabase.size(); i++){
freeSift(imSiftsDatabase[i]);
}
//Matching, and sorting the histograms
vector<vector<int> > values = matchCudaAux(queryHistograms, databaseHistograms);
cout << "Matched" << endl;
//Computing results
vector<vector<string> > results = comFinal(queryHistograms, databaseHistograms, values, 5);
cout << "Results Computed" << endl;
//Final analysis and results
analize(results);
//Free histograms
freeHistograms(queryHistograms);
freeHistograms(databaseHistograms);
printf("Done!\n\n");
return 0;
}
//---------------------------------------------//
//Free vector of histograms
//---------------------------------------------//
void freeHistograms(vector<Histogram> histos){
for(int i = 0; i < histos.size(); i++){
freeHisto(histos[i]);
}
}
//---------------------------------------------//
//Analyses and displays final results
//---------------------------------------------//
void analize(vector<vector<string> >& results){
printf("\t\t###########################################\n\t\t######### Result Analysis #############\n\t\t###########################################\n\n");
cout << "\n-------------------\nMatches\n-------------------\n"<< endl;
int corrects = 0;
for(int i = 0; i < results.size(); i++){
string dbIm = results[i][0];
string rightAns = extRight(dbIm);
int right = 0;
cout << "Database image: " << dbIm << "\nRight Answer: "<< rightAns << endl;
for(int j = 1; j < results[i].size(); j++){
if(rightAns == results[i][j]){
cout << "\t==" << results[i][j] << endl;
right = 1;
}
else{
cout << "\t" << results[i][j] << endl;
}
}
if(right == 1){
corrects++;
}
cout << endl;
}
float accuracy = ((float)corrects)/((float)results.size());
cout << "-------------------------------\n----Final Accuracy: " << accuracy << "-------\n-------------------------------\n" << endl;
}
//---------------------------------------------//
//Extracts right answer
//---------------------------------------------//
string extRight(string s){
int lastSlash = s.length() - 1;
while(lastSlash >= 0 && s[lastSlash] != '_'){
lastSlash--;
}
return s.substr(0, lastSlash);
}
//---------------------------------------------//
//Computes final results
//---------------------------------------------//
vector<vector<string> > comFinal(vector<Histogram>& query, vector<Histogram>& database, vector<vector<int> >& values, int num){
vector<vector<string> > ans;
for(int i = 0; i < values.size(); i++){
vector<string> temp;
temp.push_back(extName(database[i].filename));
//cout << extName(database[i].filename) << " ";
for(int j = 0; j < num; j++){
temp.push_back(extName(query[values[i][j]].filename));
//cout << extName(query[values[i][j]].filename) << " ";
}
ans.push_back(temp);
//cout << endl;
}
return ans;
}
//---------------------------------------------//
//Name extraction
//---------------------------------------------//
string extName(string s){
int lastSlash = s.length() - 1;
while(lastSlash >= 0 && s[lastSlash] != '/'){
lastSlash--;
}
int count = 0;
int dotAfterSlash = lastSlash;
while(dotAfterSlash < s.length() && s[dotAfterSlash] != '.'){
dotAfterSlash++;
count++;
}
return s.substr(lastSlash+1, count - 1);
}
//---------------------------------------------//
//Cuda Creates Histogram
//---------------------------------------------//
__global__ void createHistCuda (float* siftCentroids, float* siftImage, int linesCent, int linesIm, float* temp)
{
__shared__ float cosines[BLOCK_SIZE][2];
size_t idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t idy = blockIdx.y;
size_t tid = threadIdx.x;
if(idx < linesCent){
int centin = idx * 128;
int imin = idy * 128;
//Cosine similarity code ------------
float sumab = 0;
float suma2 = 0;
float sumb2 = 0;
for(int k = 0; k < 128; k++){
sumab += siftCentroids[centin + k] * siftImage[imin + k];
suma2 += siftImage[imin + k] * siftImage[imin + k];
sumb2 += siftCentroids[centin + k] * siftCentroids[centin + k];
}
float cossim = sumab/(sqrtf(suma2)/sqrtf(sumb2));
//debug[idy*linesCent + idx] = cossim;
cosines[threadIdx.x][0] = cossim;
cosines[threadIdx.x][1] = idx;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s){
size_t tid2 = tid + s;
if(cosines[tid2][0] > cosines[tid][0]){
cosines[tid][0] = cosines[tid2][0];
cosines[tid][1] = cosines[tid2][1];
}
}
__syncthreads();
}
if (tid == 0){
temp[(blockIdx.y*gridDim.x + blockIdx.x)*2] = cosines[0][0];
temp[(blockIdx.y*gridDim.x + blockIdx.x)*2+1] = cosines[0][1];
}
}
}
//---------------------------------------------//
//Histogram Matching CUDA
//---------------------------------------------//
__global__ void matchHistCuda(float*qSet, float*dbSet, size_t qSize, size_t dbSize, size_t hSize, float*out){
size_t idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t idy = blockIdx.y*blockDim.y + threadIdx.y;
if(idx < qSize && idy < dbSize){
size_t qi = idx*hSize;
size_t dbi = idy*hSize;
//Cosine similarity code ------------
float sumab = 0;
float suma2 = 0;
float sumb2 = 0;
for(int k = 0; k < hSize; k++){
sumab += qSet[qi+k] * dbSet[dbi+k];
suma2 += qSet[qi+k] * qSet[qi+k];
sumb2 += dbSet[dbi+k] * dbSet[dbi+k];
}
float cossim = sumab/(sqrtf(suma2)/sqrtf(sumb2));
out[idy*qSize + idx] = cossim;
}
}
//---------------------------------------------//
//Sorting each row
//---------------------------------------------//
__global__ void rowMin(float* input, int* output, size_t rowS, size_t rowNum){
size_t id = blockIdx.x*blockDim.x + threadIdx.x;
if(id < rowNum){
float temp[MAX_K/2][2];
size_t inId = id * rowS;
for(int i = 0; i< rowS;i++){
temp[i][0] = input[inId + i];
temp[i][1] = (float)i;
}
for(int i = 0; i< rowS; i++){
float best = temp[i][0];
int bestInd = i;
for(int j = i; j < rowS; j++){
if(temp[j][0] > best){
best = temp[j][0];
bestInd = j;
}
}
float iVal = temp[i][0];
float iInd = temp[i][1];
temp[i][0] = temp[bestInd][0];
temp[i][1] = temp[bestInd][1];
temp[bestInd][0] = iVal;
temp[bestInd][1] = iInd;
}
for(int i = 0; i< rowS; i++){
output[inId+i] = (int)temp[i][1];
}
}
}
//---------------------------------------------//
//Histogram Matching Auxiliary
//Input: Query histograms and database histograms
//---------------------------------------------//
vector<vector<int> > matchCudaAux(vector<Histogram>& query, vector<Histogram>& database){
//vector<vector<float> > chart;
//Get constant sizes
const size_t hSize = query[0].size;
const size_t querySize = query.size();
const size_t databaseSize = database.size();
//Get the histogram info into 1D arrays
float* h_qSet = (float*) malloc(querySize*hSize*sizeof(float));
histogram2array(h_qSet, query);
float* h_dbSet = (float*) malloc(databaseSize*hSize*sizeof(float));
histogram2array(h_dbSet, database);
//Malloc GPU memory for histograms
float* d_qSet;
cudaMalloc((void **) &d_qSet, querySize*hSize*sizeof(float));
cudaMemcpy(d_qSet, h_qSet, querySize*hSize*sizeof(float),cudaMemcpyHostToDevice);
float* d_dbSet;
cudaMalloc((void **) &d_dbSet, databaseSize*hSize*sizeof(float));
cudaMemcpy(d_dbSet, h_dbSet, databaseSize*hSize*sizeof(float),cudaMemcpyHostToDevice);
//Malloc GPU memory for results array
float * d_results;
cudaMalloc((void **) &d_results, querySize*databaseSize*sizeof(float));
dim3 grid((querySize + BLOCK_SIZE2 - 1)/BLOCK_SIZE2, (databaseSize+ BLOCK_SIZE2 - 1)/BLOCK_SIZE2);
dim3 block(BLOCK_SIZE2,BLOCK_SIZE2);
matchHistCuda<<<grid,block>>>(d_qSet, d_dbSet, querySize, databaseSize, hSize, d_results);
//Copying results back
float* out_results = (float*) malloc(querySize*databaseSize*sizeof(float));
cudaMemcpy(out_results, d_results,querySize*databaseSize*sizeof(float),cudaMemcpyDeviceToHost);
//arrayto2Dvec<float>(out_results, chart, querySize, databaseSize);
//print_array<float*>(out_results, querySize, databaseSize);
//print2DVec<float>(chart);
//Free matchHistCuda memory
free(h_qSet);
free(h_dbSet);
cudaFree(d_qSet);
cudaFree(d_dbSet);
vector<vector<int> > res;
//Preparing input for row min, which is the same as the results of the last kernel
float * d_input;
cudaMalloc((void **) &d_input, querySize*databaseSize*sizeof(float));
cudaMemcpy(d_input, out_results, databaseSize*querySize*sizeof(float),cudaMemcpyHostToDevice);
//Preparing output for row min
int * d_output;
cudaMalloc((void **) &d_output, querySize*databaseSize*sizeof(int));
dim3 grid2((databaseSize + BLOCK_SIZE - 1)/BLOCK_SIZE);
dim3 block2(BLOCK_SIZE);
rowMin<<<grid2,block2>>>(d_input, d_output, querySize, databaseSize);
//Copying back results
int * out_output = (int*) malloc(querySize*databaseSize*sizeof(int));
cudaMemcpy(out_output, d_output,querySize*databaseSize*sizeof(int),cudaMemcpyDeviceToHost);
//print_array<int*>(out_output, querySize, databaseSize);
arrayto2Dvec<int>(out_output, res, querySize, databaseSize);
//Freeing memory
free(out_results);
cudaFree(d_input);
cudaFree(d_output);
free(out_output);
return res;
}
//---------------------------------------------//
//Print_array
//---------------------------------------------//
template<class T>
void print_array(T& array, int m, int n) {
for(int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
std::cout << array[i * n + j] << " ";
}
std::cout << std::endl;
}
}
//---------------------------------------------//
//1D array to 2D vector
//---------------------------------------------//
template <class T>
void arrayto2Dvec(T* array, vector<vector<T> >& vec, size_t x, size_t y){
for(int j = 0; j < y;j++){
vector<T> vecLine;
for(int i = 0; i < x; i++){
vecLine.push_back(array[j*x + i]);
}
vec.push_back(vecLine);
}
}
//---------------------------------------------//
//Print 2D Vector
//---------------------------------------------//
template <class T>
void print2DVec(vector<vector<T> >& vec){
cout << "Vec x size: " << vec.size() << "\nVec y size: " << vec[0].size() << "\nType: " << typeid(vec[0][0]).name() << endl;
for(size_t i = 0; i < vec.size(); i++){
for(size_t j = 0; j < vec[0].size(); j++){
cout << vec[i][j] << " ";
}
cout << endl;
}
}
//---------------------------------------------//
//Histograms to 1D array
//---------------------------------------------//
void histogram2array(float* array, vector<Histogram>& histos){
for(int i = 0; i < histos.size(); i++){
for(int j = 0; j < histos[i].size;j++){
array[i*histos[i].size + j] = histos[i].vals[j];
}
}
}
//---------------------------------------------//
//Reads in sifts
//---------------------------------------------//
vector<Sift> readMultiSifts(stringvec v){
std::vector<Sift> imSifts;
for(int i = 0; i < v.size(); i++){
if(v[i] != "." && v[i] != ".." && v[i] != "all.txt"){
char* fn = new char[v[i].size()+1];
strcpy(fn,v[i].c_str());
Sift siftIm = readSift(fn);
imSifts.push_back(siftIm);
delete[] fn;
}
}
return imSifts;
}
//---------------------------------------------//
//Auxiliary creates histogram cuda
//Inputs: siftCentroids, vector of sift images, vector of filenames
//Output: vector of histograms
//---------------------------------------------//
vector<Histogram> createHistogramCudaAux(Sift& siftCentroids, vector<Sift>& imSifts, stringvec& v){
vector<Histogram> histograms;
//Passed Vectors
float* d_cent;
//Memory allocation for centroids
int centS = siftCentroids.numSift;
int centTotal = siftCentroids.totalNum;
cudaMalloc((void **) &d_cent, centTotal*sizeof(float));
cudaMemcpy(d_cent, siftCentroids.sift, centTotal*sizeof(float),cudaMemcpyHostToDevice);
for(int i = 0; i < imSifts.size(); i++){
//Memory allocation for image sifts
float* d_im;
int imS = imSifts[i].numSift;
int imTotal = imSifts[i].totalNum;
cudaMalloc((void **) &d_im, imTotal*sizeof(float));
cudaMemcpy(d_im, imSifts[i].sift, imTotal*sizeof(float),cudaMemcpyHostToDevice);
//Memory allocation for histogram
float* hist = (float*) malloc(centS*sizeof(float));
memset(hist,0,centS*sizeof(float));
dim3 grid((centS+BLOCK_SIZE-1)/BLOCK_SIZE, imS);
dim3 block(BLOCK_SIZE);
//Temporary array creation
float* d_temp;
cudaMalloc((void **) &d_temp, imS*(centS+BLOCK_SIZE-1)/BLOCK_SIZE*2*sizeof(float));
cudaMemset(d_temp, 0, imS*(centS+BLOCK_SIZE-1)/BLOCK_SIZE*2*sizeof(float));
//Runs cuda code
createHistCuda<<<grid,block>>>(d_cent,d_im,centS,imS,d_temp);
//Copy temp back
size_t tempS = imS*(centS+BLOCK_SIZE-1)/BLOCK_SIZE*2;
float* out_temp = (float*) malloc(tempS*sizeof(float));
cudaMemcpy(out_temp, d_temp,tempS*sizeof(float),cudaMemcpyDeviceToHost);
size_t tempSx = imS;
size_t tempSy = (centS+BLOCK_SIZE-1)/BLOCK_SIZE;
for(size_t j = 0; j < tempSx; j++){
float max = 0;
int maxInd = 0;
for(size_t k = 0; k < tempSy; k++){
size_t ind = (j*tempSy + k)*2;
if(out_temp[ind] > max){
max = out_temp[ind];
maxInd = (int)out_temp[ind+1];
}
}
hist[maxInd] += 1.0f/(float)imS;
}
Histogram th = {.vals = hist, .size = centS, .filename = v[i]};
histograms.push_back(th);
cudaFree(d_im);
cudaFree(d_temp);
free(out_temp);
}
cudaFree(d_cent);
return histograms;
}
//---------------------------------------------//
//Print directory files
//---------------------------------------------//
void printFiles(stringvec v){
printf("\nDirectory List\nSize: %d\n", v.size());
for(int i = 0; i < v.size(); i++){
std::cout << "\tName: " << v[i] << "\n";
}
printf("\n\n");
}
//---------------------------------------------//
//Extracts Filenames in Directory
//---------------------------------------------//
void read_directory(const std::string& name, stringvec& v)
{
DIR* dirp = opendir(name.c_str());
struct dirent * dp;
while ((dp = readdir(dirp)) != NULL) {
string temp = dp->d_name;
string s = name + temp;
if(temp.compare(".") != 0 && temp.compare("..") != 0 && temp.compare("all.txt") != 0)
v.push_back(s);
}
closedir(dirp);
}
//---------------------------------------------//
//Free Histogram Memory
//---------------------------------------------//
void freeHisto(Histogram h){
free(h.vals);
}
//---------------------------------------------//
//Prints Histo
//Options: 'i' for info, 's' for sifts, 'b' for both
//---------------------------------------------//
void printHisto(Histogram h, char option){
if(option == 'i' || option == 'b'){
printf("----------------------\nSize: %d\n----------------------\n", h.size);
}
if(option == 's' || option == 'b'){
//Prints the histogram
for(int i = 0; i < h.size; i++){
printf("%f ", h.vals[i]);
}
}
}
//---------------------------------------------//
//Free Sift Memory
//---------------------------------------------//
void freeSift(Sift sift){
free(sift.sift);
}
//---------------------------------------------//
//Prints Sift
//Options: 'i' for info, 's' for sifts, 'b' for both
//---------------------------------------------//
void printSift(Sift sift, char option){
if(option == 'i' || option == 'b'){
printf("----------------------\nNum of Sifts: %d\nTotalSize: %d floats\n----------------------\n", sift.numSift, sift.totalNum);
}
if(option == 's' || option == 'b'){
for(int i = 0; i < sift.numSift; i++){
for(int j = 0; j < sift.elementSize; j++){
printf("%f ", sift.sift[i*sift.elementSize + j]);
}
}
}
}
//---------------------------------------------//
//Reads sift from file
//---------------------------------------------//
Sift readSift(char *filename){
int lines = countlines(filename);
float * siftCentroids = (float *)malloc(lines * 128* sizeof(float));
readFromFile(lines * 128, siftCentroids, filename);
printf("-----Reading-Sift-----\nFilename: %s\nSift Number: %d\n----------------------\n", filename, lines);
Sift sift = {.sift = siftCentroids, .numSift = lines, .totalNum = lines*128, .elementSize = 128};
return sift;
}
//---------------------------------------------//
//Creates histogram for single centroid-image pair
//Description: Creates a histogram for an image siftImage based on centroids siftCentroids.
//Input: centroids, image sifts, histogram vector, number of centroids, number of sifts for image
//Output: Histogram of siftImage according to siftCentroids
//---------------------------------------------//
Histogram createHist(Sift cent, Sift im){
float * siftCentroids = cent.sift;
float * siftImage = im.sift;
int linesCent = cent.numSift;
int linesIm = im.numSift;
float * histo = (float*) malloc(linesCent*sizeof(float));
//Initializes histogram to 0
for(int i = 0; i < linesCent; i++){
histo[i] = 0;
}
//For descriptor in image
for(int i = 0; i < linesIm; i++){
float bestCos = 0;
int bestJ = 0;
//For centroid descriptors, find the one that best matches the image descriptor
for(int j = 0; j < linesCent; j++){
int centin = j*128;
int imin = i*128;
//Cosine similarity code ------------
float sumab = 0;
float suma2 = 0;
float sumb2 = 0;
for(int k = 0; k < 128; k++){
sumab += siftCentroids[centin + k] * siftImage[imin + k];
suma2 += siftImage[imin + k] * siftImage[imin + k];
sumb2 += siftCentroids[centin + k] * siftCentroids[centin + k];
}
float cossim = sumab/(sqrtf(suma2)/sqrtf(sumb2));
if(cossim > bestCos){
bestCos = cossim;
bestJ = j;
}
//Cosine similarity end --------------
}
histo[bestJ] += 1;
}
//Normalization
for(int i = 0; i < linesCent; i++){
histo[i] = histo[i]/linesIm;
}
Histogram h = {.vals = histo, .size = linesCent, .filename = ""};
return h;
}
//---------------------------------------------//
//Reading file into array
//---------------------------------------------//
void readFromFile(int size,float *v, char *file){
FILE *fp;
//cout << "Openning" << endl;
fp = fopen(file,"r");
int i=0;
float t;
while(i < size){
if(fscanf(fp,"%f",&t)==EOF){
printf("Error reading file\n");
exit(1);
}
//printf("%f ", t);
v[i++]=t;
}
//cout << "Read" << endl;
fclose(fp);
}
//---------------------------------------------//
//Counting lines in file
//---------------------------------------------//
int countlines(char*filename)
{
FILE *fp;
int count = 0; // Line counter (result)
char c; // To store a character read from file
// Open the file
fp = fopen(filename, "r");
if (fp == NULL)
{
printf("Could not open file %s", filename);
return 0;
}
// Extract characters from file and store in character c
for (c = getc(fp); c != EOF; c = getc(fp))
if (c == '\n') // Increment count if this character is newline
count = count + 1;
// Close the file
fclose(fp);
//printf("The file %s has %d lines\n ", filename, count);
return count;
}
|
12,923 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define THREADS_POR_BLOCO 1024
#define DIM_BLOCO 32
#define DIM_GRID 1024
__global__ void add(int *a, int *b, int *c, int N){
//int i = threadIdx.y + blockIdx.y*blockDim.y;
//int j = threadIdx.x + blockIdx.x*blockDim.x;
int index =
DIM_BLOCO*DIM_BLOCO*(DIM_GRID*blockIdx.x+blockIdx.y)+blockDim.y*threadIdx.x+threadIdx.y;
if(index < N)
c[index] = a[index] + b[index];
}
int main()
{
int *A, *B, *C;
int *d_A, *d_B, *d_C;
int i, j;
//Input
int linhas, colunas;
scanf("%d", &linhas);
scanf("%d", &colunas);
//Definindo tamanho dos arrays que representarão as matrizes
int N = linhas*colunas;
int size = sizeof(int)*N;
//Alocando memória na GPU
cudaMalloc((void **)&d_A,size);
cudaMalloc((void **)&d_B,size);
cudaMalloc((void **)&d_C,size);
//Alocando memória na CPU
A = (int *)malloc(size);
B = (int *)malloc(size);
C = (int *)malloc(size);
//Inicializar
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
A[i*colunas+j] = B[i*colunas+j] = i+j;
//printf("%d ",A[i*colunas+j]);
}
//printf("\n");
}
//Transferir para a memória da GPU
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, size, cudaMemcpyHostToDevice);
//Computacao que deverá ser movida para a GPU
// Número de blocos = Número de linhas
// threads por bloco = número de colunas
dim3 dimGrid(DIM_GRID,DIM_GRID);
dim3 dimBlock(DIM_BLOCO,DIM_BLOCO);
add<<<dimGrid,dimBlock>>>(d_A,d_B,d_C,N);
//add<<<(N+THREADS_POR_BLOCO-1)/THREADS_POR_BLOCO,THREADS_POR_BLOCO>>>(d_A,d_B,d_C,N);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
long long int somador=0;
//Manter esta computação na CPU
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
somador+=C[i*colunas+j];
//printf("%d ",C[i*colunas+j]);
}
//printf("\n");
}
printf("%lli\n", somador);
free(A); free(B); free(C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
|
12,924 | #include <iostream>
#include <vector>
#include <random>
#include <cmath>
// #include <boost/timer/timer.hpp>
class Managed {
public:
size_t size;
void *operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
cudaDeviceSynchronize();
return ptr;
}
void operator delete(void *ptr) {
cudaDeviceSynchronize();
cudaFree(ptr);
}
void sync() {
cudaDeviceSynchronize();
}
};
template <class T>
class CuArray : public Managed
{
public:
int size;
T * data;
CuArray() : size(0), data(nullptr)
{
}
CuArray(std::vector<T> * a) : size(a->size())
{
// Allocate unified memory
realloc_(a->size());
// Copy C array from vector
memcpy(data, a->data(), a->size() * sizeof(T));
}
CuArray(const CuArray<T> & a) : size(a.size)
{
realloc_(a.size);
memcpy(data, a.data, a.size * sizeof(T));
}
~CuArray() { cudaFree(data); }
CuArray& operator=(std::vector<T> * a)
{
size = a->size();
realloc_(a->size());
memcpy(data, a->data(), size * sizeof(T));
return *this;
}
void prefetch()
{
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(data, size * sizeof(T), device, NULL);
cudaMemPrefetchAsync(&size, sizeof(int), device, NULL);
}
__host__ __device__
T& operator[](int pos) const { return data[pos]; }
private:
void realloc_(int s)
{
// cudaFree(data);
cudaMallocManaged(&data, s * sizeof(float));
cudaDeviceSynchronize();
}
};
class SplineParams : public Managed
{
public:
CuArray<float> knotsX;
CuArray<float> knotsY;
CuArray<float> dydxs;
CuArray<int> cells;
SplineParams() {}
SplineParams(CuArray<float> knotsX_, CuArray<float> knotsY_, CuArray<float> dydxs_, CuArray<int> cells_)
: knotsX(knotsX_), knotsY(knotsY_), dydxs(dydxs_), cells(cells_) {}
void prefetch()
{
knotsX.prefetch();
knotsY.prefetch();
dydxs.prefetch();
cells.prefetch();
}
};
// Basically straight from the Laura++ implementation
// Only a loop with trip count nKnots, probably not worth putting it on the GPU
// Fit for y eventually, here just assume they are given.
std::vector<float> calculateGrads(std::vector<float> &x, std::vector<float> &y)
{
int nKnots = x.size();
std::vector<float> a(nKnots);
std::vector<float> b(nKnots);
std::vector<float> c(nKnots);
std::vector<float> d(nKnots);
std::vector<float> grad(nKnots);
a[0] = 0.;
c[nKnots - 1] = 0.;
// Left
float xD10 = x[1] - x[0];
b[0] = 2. / xD10;
c[0] = 1. / xD10;
d[0] = 3. * (y[1] - y[0]) / ( xD10 * xD10 );
// Right
float xk12 = x[nKnots - 1] - x[nKnots - 2];
a[nKnots - 1] = 1. / xk12;
b[nKnots - 1] = 2. / xk12;
d[nKnots - 1] = 3. * (y[nKnots - 1] - y[nKnots - 2]) / ( xk12 * xk12 );
// Internal
for(uint i = 1; i < nKnots - 1; i++) {
float xDi = x[i] - x[i - 1];
float xD1i = x[i + 1] - x[i];
a[i] = 1. / xDi;
b[i] = 2. / xDi + 2. / xD1i;
c[i] = 1./ xD1i;
d[i] = 3. * (y[i] - y[i - 1]) / ( xDi * xDi ) + 3. * (y[i + 1] - y[i]) / ( xD1i * xD1i );
}
c[0] /= b[0];
d[0] /= b[0];
for(uint i = 1; i < nKnots - 1; i++) {
c[i] = c[i] / (b[i] - a[i] * c[i - 1]);
d[i] = (d[i] - a[i] * d[i - 1]) / (b[i] - a[i] * c[i - 1]);
}
d[nKnots - 1] = (d[nKnots - 1] - a[nKnots - 1] * d[nKnots - 2]) / (b[nKnots - 1] - a[nKnots - 1] * c[nKnots - 2]);
grad[nKnots - 1] = d[nKnots - 1];
for(int i = nKnots - 2; i >= 0; i--) {
grad[i] = d[i] - c[i] * grad[i + 1];
}
return grad;
}
__global__
void evalSplineKern(const int n, const CuArray<float> * xs, const SplineParams * params, CuArray<float> * splineVals)
{
// All threads handle blockDim.x * gridDim.x
// consecutive elements (interleaved partitioning)
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
int cell = params->cells[i];
// // Try and avoid this on GPU if possible -> precompute and save?
// while( (*xs)[i] > params->knotsX[cell+1] ) {
// ++cell;
// }
// Might be a slow memory access....
float xLow = params->knotsX[cell];
float xHigh = params->knotsX[cell+1];
float yLow = params->knotsY[cell];
float yHigh = params->knotsY[cell+1];
float t = ((*xs)[i] - xLow) / (xHigh - xLow);
float a = params->dydxs[cell] * (xHigh - xLow) - (yHigh - yLow);
float b = -1. * params->dydxs[cell+1] * (xHigh - xLow) + (yHigh - yLow);
(*splineVals)[i] = (1 - t) * yLow + t * yHigh + t * (1 - t) * ( a * (1 - t) + b * t );
}
}
void calcSplineGPU(std::vector<float> * knotsX,
std::vector<float> * knotsY,
std::vector<float> * dydxs,
std::vector<float> * masses,
std::vector<int> * cells)
{
int n = masses->size();
int nKnots = knotsX->size();
SplineParams * splineParams = new SplineParams();
splineParams->knotsX = knotsX;
splineParams->knotsY = knotsY;
splineParams->dydxs = dydxs;
splineParams->cells = cells;
CuArray<float> * xs = new CuArray<float>(masses);
std::vector<float> * splineValsV = new std::vector<float>;
splineValsV->resize(n);
std::fill(splineValsV->begin(), splineValsV->end(), 0.0);
CuArray<float> * splineVals = new CuArray<float>(splineValsV);
splineParams->prefetch();
xs->prefetch();
splineVals->prefetch();
int blockSize = 512;
int numBlocks = (n + blockSize - 1) / blockSize;
{
// boost::timer::auto_cpu_timer t;
evalSplineKern<<<numBlocks, blockSize>>>(n, xs, splineParams, splineVals);
splineParams->sync();
xs->sync();
splineVals->sync();
} // Timer
std::cout << (*splineVals)[0] << std::endl;
delete splineParams;
}
std::vector<float> calcSplineCPU(std::vector<float> & knotsX,
std::vector<float> & knotsY,
std::vector<float> & dydxs,
std::vector<float> & masses,
std::vector<int> & cells)
{
std::vector<float> splineVals(masses.size());
std::cout << splineVals.size() << std::endl;
for (int i = 0; i < splineVals.size(); i++) {
int cell = cells[i];
// while( masses[i] > knotsX[cell+1] ) {
// ++cell;
// }
float xLow = knotsX[cell];
float xHigh = knotsX[cell+1];
float yLow = knotsY[cell];
float yHigh = knotsY[cell+1];
float t = (masses[i] - xLow) / (xHigh - xLow);
float a = dydxs[cell] * (xHigh - xLow) - (yHigh - yLow);
float b = -1. * dydxs[cell+1] * (xHigh - xLow) + (yHigh - yLow);
splineVals[i] = (1 - t) * yLow + t * yHigh + t * (1 - t) * ( a * (1 - t) + b * t );
}
return splineVals;
}
std::vector<int> calculateCells(std::vector<float> & masses, std::vector<float> & knotsX)
{
std::vector<int> cells(masses.size());
for (int i = 0; i < cells.size(); i++) {
int cell = 0;
while( masses[i] > knotsX[cell+1] ) {
cell++;
}
cells[i] = cell;
}
return cells;
}
float normalDist(float mu, float sigma, float x)
{
float norm = 1. / (sigma * std::sqrt(2 * M_PI));
float z = (x - mu) / sigma;
float arg = -0.5 * z * z;
return norm * std::exp(arg);
}
int main(int argc, char const *argv[]) {
std::default_random_engine generator;
std::normal_distribution<float> normal(0.0, 1.0);
int n = 100000;
int nKnots = 30;
std::vector<float> data(n);
for (auto & d : data) d = normal(generator);
std::vector<float> * knotsX = new std::vector<float>(nKnots);
std::vector<float> * knotsY = new std::vector<float>(nKnots);
std::vector<float> * masses = new std::vector<float>(n);
float startKnot = -3.0;
float endKnot = 3.0;
float stepSize = (endKnot - startKnot) / nKnots;
for (int i = 0; i < nKnots; i++) {
(*knotsX)[i] = startKnot + i * stepSize;
(*knotsY)[i] = (n / nKnots) * normalDist(0.0, 1.0, (*knotsX)[i]);
}
std::vector<int> cells = calculateCells(*masses, *knotsX);
std::vector<float> dydxs = calculateGrads(*knotsX, *knotsY);
{
// boost::timer::auto_cpu_timer t;
calcSplineGPU(knotsX, knotsY, &dydxs, &data, &cells);
// calcSplineCPU(*knotsX, *knotsY, dydxs, data, cells);
}
return 0;
}
|
12,925 | #include "includes.h"
__global__ void kernel_offset(int *key, int *idx, int *offset, int size) {
int idxX = threadIdx.x + blockIdx.x*blockDim.x;
if(idxX == 0) {
offset[1] = 0;
}
else if(idxX < size) {
int keyVal = key[idxX];
int keyValPrev = key[idxX-1];
if(keyVal != keyValPrev) {
offset[keyVal+1] = idxX;
}
}
if(idxX == size-1) {
int keyVal = key[idxX];
offset[0] = keyVal+1;
offset[keyVal+2] = size;
}
} |
12,926 | #include <stdio.h>
#include <stdlib.h>
#define CSC(call) do { \
cudaError_t e = call; \
if (e != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(0); \
} \
} while(0)
#define MAX_R 1024
__global__ void kernel(double* dVector, int num)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
while (idx < num)
{
if (dVector[idx] < 0)
{
dVector[idx] = -dVector[idx];
}
idx += offset;
}
}
class CUVector {
public:
const int n;
int size;
double* hVector;
double* dVector;
CUVector(int _n): n(_n) {
size = sizeof(double) * n;
hVector= (double*)malloc(size);
CSC(cudaMalloc(&dVector, size));
}
void init() {
for (int i = 0; i < n; ++i)
scanf("%lf", &hVector[i]);
CSC(cudaMemcpy(dVector, hVector, size, cudaMemcpyHostToDevice));
}
void run_kernel() {
kernel<<<256, 256>>>(dVector, n);
CSC(cudaMemcpy(hVector, dVector, size, cudaMemcpyDeviceToHost));
}
void print() {
for (int i = 0; i < n; ++i)
printf("%.10e ", hVector[i]);
printf("\n");
}
~CUVector(){
CSC(cudaFree(dVector));
free(hVector);
}
};
__constant__ float a[MAX_R * 2 + 1];
int main(void)
{
int n;
scanf("%d", &n);
CUVector v = CUVector(n);
v.init();
v.run_kernel();
v.print();
return 0;
}
|
12,927 | #include <stdio.h>
#include<stdlib.h>
#define N 1000
#define MAX_ERR 1e-6
__global__ void add(int *a, int b, int c) {
*a = b + c;
}
int main(){
int count = 0;
cudaGetDevice(&count);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,count);
int code = cudaChooseDevice(&count,&prop);
printf("%d,%d\n",code,count);
int a = 0;
int *dev_a;
cudaMalloc((void **)&dev_a,sizeof(int));
add<<<1,1>>>(dev_a,3,5);
cudaDeviceSynchronize();
cudaMemcpy(&a,dev_a, sizeof(int),cudaMemcpyDeviceToHost);
printf("3 + 5 = %d\n",a);
cudaFree(dev_a);
return 0;
}
|
12,928 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
__global__ void MyKernel(float** dev_matrix, size_t pitch, int width, int height)
{
int number = 0;
for (int i = 0; i < height; ++i)
{
float* row = (float*)((char*)dev_matrix + i*pitch);
for (int j = 0; j < width; ++j)
{
row[j] = number;
number++;
}
}
}
int main (int argc , char * argv [])
{
int width = 4, height = 2, i, j;
// float matrix[width][height];
float matrix[height][width];
float **dev_matrix;
size_t pitch;
printf("\nMATRIX MANIPULATION\n");
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
matrix[i][j] = 0.0;
printf("Matrix in host memory\n");
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
printf("%f ", matrix[i][j]);
printf("\n");
}
cudaMallocPitch(&dev_matrix, &pitch, width * sizeof(float), height);
cudaMemcpy2D(dev_matrix, pitch, matrix, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice);
MyKernel<<<2, 2>>>(dev_matrix, pitch, width, height);
cudaMemcpy2D(matrix, width * sizeof(float), dev_matrix, pitch, width * sizeof(float), height, cudaMemcpyDeviceToHost);
printf("Matrix after calculate elements in the gpu\n");
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
printf("%f ", matrix[i][j]);
printf("\n");
}
cudaFree(dev_matrix);
return 0;
}
|
12,929 | #include <iostream>
using namespace std;
__global__ void VecAdd(float *A, float *B, float *C){
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(){
float *A, *B, *C, *devA, *devB, *devC;
int n = 100000000;
A = (float*)malloc(n * sizeof(float));
B = (float*)malloc(n * sizeof(float));
C = (float*)malloc(n * sizeof(float));
cudaMalloc(&devA, n * sizeof(float));
cudaMalloc(&devB, n * sizeof(float));
cudaMalloc(&devC, n * sizeof(float));
for(int i = 0;i < n;i ++){
A[i] = float(rand()) + float(rand()%100);
B[i] = float(rand()%20) + float(rand() + 32) + 93.23;
}
cudaMemcpy(devA, A, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(devB, B, n * sizeof(float), cudaMemcpyHostToDevice);
VecAdd<<<1, 1024>>>(devA, devB, devC);
cudaMemcpy(C, devC, n * sizeof(float), cudaMemcpyDeviceToHost);
cout << endl << C[100];
} |
12,930 | #include <stdio.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
__global__ void square_array(double *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
}
void run_square_array(double* a_d, int N)
{
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// Retrieve result from device and store it in host array
square_array<<<n_blocks,block_size>>>(a_d,N);
cudaError_t code = cudaGetLastError();
printf("Poseidon_kernel error: %s\n", cudaGetErrorString(code));
}
|
12,931 | #include "includes.h"
__global__ void mul_kernel(const int n, const float *a, const float *b, float *y) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n);
i += blockDim.x * gridDim.x) {
y[i] = a[i] * b[i];
}
} |
12,932 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int error(int* device_a, int* device_b, int* device_c);
__global__ void add(int *c, const int *a, const int *b)
{
printf("Block: %d, Thread: %d, Block Dim: %d\n", threadIdx.x, blockIdx.x, blockDim.x);
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
cudaError_t cudaStatus;
const int arraySize = 7;
const int a[arraySize] = { 1, 2, 3, 5, 7, 11, 13 };
const int b[arraySize] = { 1, 2, 3, 5, 8, 13, 21 };
int c[arraySize] = { 0 };
int* device_a = 0;
int* device_b = 0;
int* device_c = 0;
// Seleciona o dispositivo
cudaStatus = cudaSetDevice(0);
//Aloca memoria na GPU
cudaStatus = cudaMalloc((void**)&device_a, arraySize * sizeof(int));
cudaStatus = cudaMalloc((void**)&device_b, arraySize * sizeof(int));
cudaStatus = cudaMalloc((void**)&device_c, arraySize * sizeof(int));
cudaStatus = cudaMemcpy(device_a, a, arraySize * sizeof(int), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(device_b, b, arraySize * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("cudaSetDevice falhou!");
return error(device_a, device_b, device_c);
}
add<<<1, arraySize>>>(device_c, device_a, device_b);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf("add function failed: %s\n", cudaGetErrorString(cudaStatus));
return error(device_a, device_b, device_c);
}
cudaStatus = cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(c, device_c, arraySize * sizeof(int), cudaMemcpyDeviceToHost);
printf("{ 1, 2, 3, 5, 7, 11, 13 } + { 1, 2, 3, 5, 8, 13, 21 } = {%d, %d, %d, %d, %d, %d, %d}\n",
c[0], c[1], c[2], c[3], c[4], c[5], c[6]);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
int error(int* device_a, int* device_b, int* device_c) {
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 1;
}
|
12,933 | /*
* Author: scps950707
* Email: scps950707@gmail.com
* Created: 2017-12-04 12:09
* Last Modified: 2017-12-04 18:42
* Filename: wave.cu
* description: Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
static void HandleError( cudaError_t err, const char *file, int line )
{
if ( err != cudaSuccess )
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
int nsteps;
/* number of time steps */
int tpoints;
/* total points along string */
float H_currVal[MAXPOINTS + 2];
/* values at time t */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param( void )
{
char tchar[20];
/* check number of points, number of iterations */
while ( ( tpoints < MINPOINTS ) || ( tpoints > MAXPOINTS ) )
{
printf( "Enter number of points along vibrating string [%d-%d]: "
, MINPOINTS, MAXPOINTS );
scanf( "%s", tchar );
tpoints = atoi( tchar );
if ( ( tpoints < MINPOINTS ) || ( tpoints > MAXPOINTS ) )
printf( "Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS );
}
while ( ( nsteps < 1 ) || ( nsteps > MAXSTEPS ) )
{
printf( "Enter number of time steps [1-%d]: ", MAXSTEPS );
scanf( "%s", tchar );
nsteps = atoi( tchar );
if ( ( nsteps < 1 ) || ( nsteps > MAXSTEPS ) )
{
printf( "Invalid. Please enter value between 1 and %d\n", MAXSTEPS );
}
}
printf( "Using points = %d, steps = %d\n", tpoints, nsteps );
}
/**********************************************************************
* initialize points on line
* Update all values along line a specified number of times
*********************************************************************/
__global__ void initAndUpdate( float *D_oldVal, float *D_currVal, int tpoints, int nsteps )
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
if ( j < tpoints )
{
j += 1;
/* Calculate initial values based on sine curve */
/* Initialize old values array */
float x = ( float )( j - 1 ) / ( tpoints - 1 );
D_oldVal[j] = D_currVal[j] = sin ( 6.2831853f * x );
int i;
/* global endpoints */
if ( ( j == 1 ) || ( j == tpoints ) )
{
D_currVal[j] = 0.0;
}
else
{
/* Update values for each time step */
for ( i = 1; i <= nsteps; i++ )
{
/* Update old values with new values */
float newVal = ( 2.0 * D_currVal[j] ) - D_oldVal[j] + ( 0.09f * ( -2.0 ) * D_currVal[j] );
D_oldVal[j] = D_currVal[j];
D_currVal[j] = newVal;
}
}
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for ( i = 1; i <= tpoints; i++ )
{
printf( "%6.4f ", H_currVal[i] );
if ( i % 10 == 0 )
{
printf( "\n" );
}
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main( int argc, char *argv[] )
{
/* Error code to check return values for CUDA calls */
sscanf( argv[1], "%d", &tpoints );
sscanf( argv[2], "%d", &nsteps );
check_param();
int threadsPerBlock = 256;
int blocksPerGrid = ( tpoints + threadsPerBlock - 1 ) / threadsPerBlock;
float *D_currVal, *D_oldVal;
HANDLE_ERROR( cudaMalloc( ( void ** )&D_currVal, sizeof( float ) * ( tpoints + 2 ) ) );
HANDLE_ERROR( cudaMalloc( ( void ** )&D_oldVal, sizeof( float ) * ( tpoints + 2 ) ) );
printf( "Initializing points on the line...\n" );
printf( "Updating all points for all time steps...\n" );
#if __DEBUG__
clock_t t = clock();
#endif
initAndUpdate <<<blocksPerGrid, threadsPerBlock>>>( D_oldVal, D_currVal, tpoints, nsteps );
HANDLE_ERROR( cudaMemcpy( H_currVal, D_currVal, sizeof( float ) * ( tpoints + 2 ), cudaMemcpyDeviceToHost ) );
#if __DEBUG__
t = clock() - t;
#endif
printf( "Printing final results...\n" );
printfinal();
printf( "\nDone.\n\n" );
#if __DEBUG__
printf( "time:%f\n", ( float )t / CLOCKS_PER_SEC );
#endif
HANDLE_ERROR( cudaFree( D_currVal ) );
HANDLE_ERROR( cudaFree( D_oldVal ) );
return EXIT_SUCCESS;
}
|
12,934 | #include "BasicTool.cuh"
//Ƕת
double AngleToRad(double angle)
{
double temp;
temp=angle*PI;
return temp/180;
}
//
float CalculateDistanceOfBipartite(Point a,Point b)
{
return sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*(a.y-b.y)+(a.z-b.z)*(a.z-b.z));
}
void UitizeVector(float *a,float *b,float *c)//һ
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
Point GetReflectedVector(Point d,Point n)//ݷĵλõ
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
Ray CalculateReflectedRayOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//н
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
int remain=i%12;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1&&remain<8)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
Ray CalculateVirtualReflectedRayOnCPU(Ray incidentRay,Face face)//õijⷴ
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face.B.x-face.A.x,b2=face.B.y-face.A.y,b3=face.B.z-face.A.z;
float c1=face.C.x-face.A.x,c2=face.C.y-face.A.y,c3=face.C.z-face.A.z;
float x1=incidentRay.originalPoint.x-face.A.x,x2=incidentRay.originalPoint.y-face.A.y,x3=incidentRay.originalPoint.z-face.A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face.A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face.A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face.A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
return reflectedRay;
}
Ray CalculateReflectedRayWithTerrainOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//н
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i].B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i].C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
//߽ ⷢ㣩
Point GetIntersectionOfRays(Ray ray1,Ray ray2)
{
float a1=ray1.direction.x,a2=ray1.direction.y,a3=ray1.direction.z;
float b1=-ray2.direction.x,b2=-ray2.direction.y,b3=-ray2.direction.z;
float c1=ray2.originalPoint.x-ray1.originalPoint.x,c2=ray2.originalPoint.y-ray1.originalPoint.y,c3=ray2.originalPoint.z-ray1.originalPoint.z;
float det=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
Point intersection;
intersection.x=0;intersection.y=0;intersection.z=0;
if(abs(det)<0.001)
{
if (abs(c1*b2-c2*b1)<0.001)
{
intersection.x=ray1.originalPoint.x;
intersection.y=ray1.originalPoint.y;
intersection.z=ray1.originalPoint.z;
}
else if ((abs(b2*a1-b1*a2)>0.00001))
{
float u=(c1*b2-c2*b1)/(b2*a1-b1*a2);
//float v=(c2*a1-c1*a2)/(b2*a1-b1*a2);
intersection.x=ray1.originalPoint.x+u*a1;
intersection.y=ray1.originalPoint.y+u*a2;
intersection.z=ray1.originalPoint.z+u*a3;
}
}
else
{
intersection.x=0;intersection.y=0;intersection.z=0;
}
return intersection;
}
bool JudgePointEqual(Point a, Point b)//жǷ
{
if(a.x == b.x && a.y == b.y && a.z == b.z)
return true;
return false;
}
void ExchangeTwoRay(Ray *ray1, Ray *ray2)//
{
Ray tempRay;
tempRay = *ray1;
*ray1 = *ray2;
*ray2 = tempRay;
} |
12,935 | #include <iostream>
#include <string>
#include <math.h>
#include <stdio.h>
__global__ void backWardMask(char * input, int * mask, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if(index == 0) {
mask[0] = 1;
for(int i = index + stride; i < n; i+=stride) {
mask[i] = input[i] == input[i - 1] ? 0 : 1;
}
return;
}
for(int i = index; i < n - 1; i+=stride) {
mask[i] = input[i] == input[i - 1] ? 0 : 1;
}
}
// NVIDIA's upsweep and downsweep prefix sum (prescan)
// TO-DO -> eliminate bank conflicts
__global__ void prescan(int *g_odata, int *g_idata, int * blockSums, int n) {
extern __shared__ int temp[];
int thid = threadIdx.x;
int index = 2 * blockIdx.x * blockDim.x + threadIdx.x;
int offset = 1;
temp[2*thid] = 0;
temp[2*thid+1] = 0;
if(index < n) {
temp[2*thid] = g_idata[index];
temp[2*thid+1] = g_idata[index+1];
}
// build sum in place up the tree
for (int d = 2*blockDim.x>>1; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
// clear the last element
if (thid == 0) {
blockSums[blockIdx.x] = temp[2*blockDim.x - 1];
temp[2*blockDim.x - 1] = 0;
}
// traverse down tree & build scan
for (int d = 1; d < 2*blockDim.x; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
temp[2*thid] = temp[2*thid + 1];
if(thid == blockDim.x - 1) {
temp[2*thid + 1] = blockSums[blockIdx.x];
} else {
temp[2*thid + 1] = temp[2*thid + 2];
}
g_odata[index] = temp[2*thid];
g_odata[index+1] = temp[2*thid+1];
}
__global__ void addOffsets(int * preScannedMask, int * blockScan) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(blockIdx.x == 0) return;
preScannedMask[index] += blockScan[blockIdx.x-1];
}
__global__ void compactKernel(int * scannedMask, int * compactedMask, int * totalRuns, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index == 0) {
compactedMask[0] = 0;
}
for (int i = index; i < n; i+=stride) {
if (i == (n - 1)) {
compactedMask[scannedMask[i]] = i + 1;
*totalRuns = scannedMask[i];
}
if (scannedMask[i] != scannedMask[i - 1]) {
compactedMask[scannedMask[i] - 1] = i;
}
}
}
__global__ void scatterKernel(int * compactedMask, int * totalRuns, int * in, int * symbolsOut, int * countsOut) {
int n = *totalRuns;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+=stride) {
int a = compactedMask[i];
int b = compactedMask[i + 1];
symbolsOut[i] = in[a];
countsOut[i] = b - a;
}
}
int main(int argc, char ** argv) {
char * in = argv[1];
int input_size = (int)strlen(argv[1]);
// GPU variables
int * mask;
int * host_mask;
int * scannedMask;
int * host_scannedMask;
int * compactedMask;
int * totalRuns;
char * input;
int * block_sums;
int * scannedBlockSums;
int * bs;
int gridSize = (input_size + 1024 - 1) / 1024;
//host_mask = (int *)malloc(input_size * sizeof(int));
//host_scannedMask = (int *)malloc(input_size * sizeof(int));
cudaMallocManaged(&input, input_size * sizeof(char));
cudaMallocManaged(&mask, input_size * sizeof(int));
cudaMallocManaged(&scannedMask, input_size * sizeof(int));
cudaMallocManaged(&block_sums, gridSize * sizeof(int));
cudaMallocManaged(&scannedBlockSums, gridSize * sizeof(int));
cudaMallocManaged(&bs, gridSize * sizeof(int));
cudaMemcpy(input, in, input_size * sizeof(char), cudaMemcpyHostToDevice);
backWardMask<<<8, 1024>>>(in, mask, input_size);
cudaDeviceSynchronize();
prescan<<<gridSize, 1024>>>(scannedMask, mask, block_sums, input_size);
cudaDeviceSynchronize();
if(input_size > 2048) {
// scan of block scans
prescan<<<1,ceil(gridSize)>>>(scannedBlockSums, block_sums, bs, gridSize);
cudaDeviceSynchronize();
// add the offset
addOffsets<<<gridSize, 2048>>>(scannedMask, scannedBlockSums);
}
//cudaMemcpy(host_mask, mask, input_size * sizeof(int), cudaMemcpyDeviceToHost);
//cudaMemcpy(host_scannedMask, scannedMask, input_size * sizeof(int), cudaMemcpyDeviceToHost);
// compactKernel<<<blocks, 512>>>();
// cudaDeviceSynchronize();
// scatterKernel<<<,>>>();
// cudaDeviceSynchronize();
for(int i = 0; i < input_size; i++) {
std::cout << input[i] << " ";
}
std::cout << std::endl;
for(int i = 0; i < input_size; i++) {
std::cout << mask[i] << " ";
}
// std::cout << std::endl;
// for(int i = 0; i < input_size; i++) {
// std::cout << scannedMask[i] << " ";
// }
// std::cout << std::endl;
} |
12,936 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__device__ double2 pow(double2 a, int b){
double r = sqrt(a.x*a.x + a.y*a.y);
double theta = atan(a.y / a.x);
return{pow(r,b)*cos(b*theta),pow(r,b)*sin(b*theta)};
}
__global__ void ktorus_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){
int gid = getGid3d3d();
int xid = blockDim.x*blockIdx.x + threadIdx.x;
int yid = blockDim.y*blockIdx.y + threadIdx.y;
int zid = blockDim.z*blockIdx.z + threadIdx.z;
double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6])
+ (y[yid] - items[7]) * (y[yid] - items[7]))
- 0.5*items[0];
wfc[gid].x = exp(-( pow((rad)/(items[14]*items[15]*0.5),2) +
pow((z[zid])/(items[14]*items[17]*0.5),2) ) );
wfc[gid].y = 0.0;
} |
12,937 | #include "includes.h"
__global__ void FindPos(int *pos, bool *forest, int text_size, int order, int step)
{
int text_idx = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockIdx.x*step;
if(text_idx < text_size) {
if(!forest[offset+blockDim.x+threadIdx.x]) {
pos[text_idx] = 0;
} else {
bool isCurBlock = true;
bool isLeftMost = (blockIdx.x < 1);
int nodeIdx = blockDim.x+threadIdx.x;
int leftBound = blockDim.x;
int rightBound = 2*blockDim.x-1;
int alignOrder = 0;
// bottom-up
while(alignOrder != order) {
int leftInx;
if(nodeIdx-1 < leftBound) {
if(isLeftMost) break;
isCurBlock = false;
leftInx = offset-step+rightBound;
} else {
leftInx = offset+nodeIdx-1;
}
if(!forest[leftInx]) break;
rightBound = leftBound-1;
leftBound /= 2;
nodeIdx /= 2;
alignOrder++;
}
// top-down
if(alignOrder == order && !isLeftMost) isCurBlock = false;
nodeIdx = (!isCurBlock)? rightBound
:(nodeIdx-1 < leftBound)? nodeIdx
:nodeIdx-1;
offset = offset - ((isCurBlock)? 0:step);
while(alignOrder != 0) {
if((alignOrder == order && isCurBlock) || forest[offset+2*nodeIdx+1]) {
nodeIdx = 2*nodeIdx;
} else {
nodeIdx = 2*nodeIdx+1;
}
alignOrder--;
}
pos[text_idx] = (isCurBlock)? (threadIdx.x-(nodeIdx-blockDim.x)+(forest[offset+nodeIdx]))
:(step-nodeIdx+threadIdx.x);
}
}
} |
12,938 | const int threadsPerBlock = 128;
unsigned int hostOffsetSize;
unsigned int hostTargetSize;
unsigned int hostSourceSize;
static unsigned int is_set=0;
static unsigned int deviceOffsetSize;
static unsigned int deviceTargetSize;
static unsigned int deviceSourceSize;
static int *deviceOffset;
static float *deviceTargetX;
static float *deviceTargetY;
static float *deviceTargetZ;
static float *deviceTargetW;
static float *deviceSourceX;
static float *deviceSourceY;
static float *deviceSourceZ;
static float *deviceSourceG;
__global__ void kernel(int* deviceOffset, float* deviceTargetX, float* deviceTargetY, float* deviceTargetZ, float* deviceTargetW,
float sigma, float* deviceSourceX, float* deviceSourceY, float* deviceSourceZ, float* deviceSourceG)
{
int i = blockIdx.x * threadsPerBlock + threadIdx.x;
int jbase,jsize,jblok,j,jb,jj;
float targetX,targetY,targetZ,targetW,dx,dy,dz,coef;
__shared__ float sharedSourceX[threadsPerBlock];
__shared__ float sharedSourceY[threadsPerBlock];
__shared__ float sharedSourceZ[threadsPerBlock];
__shared__ float sharedSourceG[threadsPerBlock];
targetX = deviceTargetX[i];
targetY = deviceTargetY[i];
targetZ = deviceTargetZ[i];
targetW = 0;
coef = 0.5f/(sigma*sigma);
jbase = deviceOffset[blockIdx.x];
jsize = deviceOffset[blockIdx.x+1]-deviceOffset[blockIdx.x];
jblok = (jsize + threadsPerBlock - 1) / threadsPerBlock;
for (j = 0; j < jblok-1; j++) {
jb = jbase + j * threadsPerBlock + threadIdx.x;
__syncthreads();
sharedSourceX[threadIdx.x] = deviceSourceX[jb];
sharedSourceY[threadIdx.x] = deviceSourceY[jb];
sharedSourceZ[threadIdx.x] = deviceSourceZ[jb];
sharedSourceG[threadIdx.x] = deviceSourceG[jb];
__syncthreads();
#pragma unroll 32
for(jj = 0; jj < threadsPerBlock; jj++){
dx = targetX-sharedSourceX[jj];
dy = targetY-sharedSourceY[jj];
dz = targetZ-sharedSourceZ[jj];
targetW += sharedSourceG[jj]*exp(-(dx*dx+dy*dy+dz*dz)*coef);
}
}
jb = jbase + j * threadsPerBlock + threadIdx.x;
__syncthreads();
sharedSourceX[threadIdx.x] = deviceSourceX[jb];
sharedSourceY[threadIdx.x] = deviceSourceY[jb];
sharedSourceZ[threadIdx.x] = deviceSourceZ[jb];
sharedSourceG[threadIdx.x] = deviceSourceG[jb];
__syncthreads();
for(jj = 0; jj < jsize - (j * threadsPerBlock); jj++){
dx = targetX-sharedSourceX[jj];
dy = targetY-sharedSourceY[jj];
dz = targetZ-sharedSourceZ[jj];
targetW += sharedSourceG[jj]*exp(-(dx*dx+dy*dy+dz*dz)*coef);
}
deviceTargetW[i] = targetW/M_PI*coef;
}
void gpumatmult(float *hostTargetX, float *hostTargetY, float *hostTargetZ, float *hostTargetW,
float *hostSourceX, float *hostSourceY, float *hostSourceZ, float *hostSourceG,
int *hostOffset, int iblok, float sigma, int numCluster, int numTrunc)
{
hostOffsetSize = sizeof(int) * (numCluster+1);
hostTargetSize = sizeof(float) * numCluster * threadsPerBlock;
hostSourceSize = sizeof(float) * numCluster * numTrunc;
if (is_set==0) {
cudaSetDevice(0);
is_set=1;
}
if (hostOffsetSize>deviceOffsetSize) {
if(deviceOffsetSize!=0) cudaFree(deviceOffset);
cudaMalloc((void**)&deviceOffset,hostOffsetSize);
deviceOffsetSize=hostOffsetSize;
}
if (hostTargetSize>deviceTargetSize) {
if(deviceTargetSize!=0) {
cudaFree(deviceTargetX);
cudaFree(deviceTargetY);
cudaFree(deviceTargetZ);
cudaFree(deviceTargetW);
}
cudaMalloc((void**)&deviceTargetX,hostTargetSize);
cudaMalloc((void**)&deviceTargetY,hostTargetSize);
cudaMalloc((void**)&deviceTargetZ,hostTargetSize);
cudaMalloc((void**)&deviceTargetW,hostTargetSize);
deviceTargetSize=hostTargetSize;
}
if (hostSourceSize>deviceSourceSize) {
if(deviceSourceSize!=0) {
cudaFree(deviceSourceX);
cudaFree(deviceSourceY);
cudaFree(deviceSourceZ);
cudaFree(deviceSourceG);
}
cudaMalloc((void**)&deviceSourceX,hostSourceSize);
cudaMalloc((void**)&deviceSourceY,hostSourceSize);
cudaMalloc((void**)&deviceSourceZ,hostSourceSize);
cudaMalloc((void**)&deviceSourceG,hostSourceSize);
deviceSourceSize=hostSourceSize;
}
cudaMemcpy(deviceOffset,hostOffset,hostOffsetSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceTargetX,hostTargetX,hostTargetSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceTargetY,hostTargetY,hostTargetSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceTargetZ,hostTargetZ,hostTargetSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceSourceX,hostSourceX,hostSourceSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceSourceY,hostSourceY,hostSourceSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceSourceZ,hostSourceZ,hostSourceSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceSourceG,hostSourceG,hostSourceSize,cudaMemcpyHostToDevice);
dim3 block(threadsPerBlock);
dim3 grid(iblok);
kernel<<< grid, block >>>(deviceOffset,deviceTargetX,deviceTargetY,deviceTargetZ,deviceTargetW,
sigma,deviceSourceX,deviceSourceY,deviceSourceZ,deviceSourceG);
cudaMemcpy(hostTargetW,deviceTargetW,hostTargetSize,cudaMemcpyDeviceToHost);
}
|
12,939 | extern "C"
__global__
void crossEntropyCostDerivative(float *desiredOutput, unsigned int length, float *networkOutput, float* result)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < length;
i += blockDim.x * gridDim.x)
{
result[i]=-desiredOutput[i]/(0.00001f+networkOutput[i])+(1.0f-desiredOutput[i])/(1.00001f-networkOutput[i]);
}
}
|
12,940 | #include <stdio.h>
#include <cuda_runtime_api.h>
__global__ void cuda_add(int a, int b, int *result)
{
*result = a + b;
}
int main()
{
int a, b;
int h_result, *d_result;
printf("Pleas, enter two ints\n");
scanf("%d%d", &a, &b);
cudaMalloc((void **)&d_result, sizeof(int));
cuda_add<<<1, 1>>>(a, b, d_result);
cudaMemcpy(&h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_result);
printf("a + b == %d\n", h_result);
return 0;
} |
12,941 | /*********************************************************************
* Copyright © 2011-2012,
* Marwan Abdellah: <abdellah.marwan@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
********************************************************************/
/*!
* CUDA : This kernel adds a constant value to the input vector into the
* output vector with length N.
*
* @param devArrayInput
* Input vector.
*
* @param constVal
* Constant value to be added to the input device vector.
*
* @param devArrayOutput
* Sum vector.
*
* @param N
* Vector length.
*
* @author
* Marwan Abdellah <abdellah.marwan@gmail.com>
*
* @date
* Created: August, 2012.
* @date
* Last Update: September, 2012.
*
* @note
* Minimum CUDA version 3.2.
* @note
* Minimum Device Compute Capability 1.0.
*/
template <typename T>
__global__
void Constant_Add_1D_Array_Kernel(T* devArrayInput,
T constVal,
T* devArrayOutput,
int N)
{
int xThreadIdx = threadIdx.x;
int blockWidth = blockDim.x;
int index = blockIdx.x * blockWidth + xThreadIdx;
#ifdef VEC_CHECK
if (index < N)
devArrayOutput[index] = (T) ((T) devArrayInput[index] + (T) constVal);
#else
devArrayOutput[index] = (T) ((T) devArrayInput[index] + (T) constVal);
#endif
}
|
12,942 | #include "includes.h"
__global__ void reposition (double4 *ac, double4 *ac1, double4 *ac2, double4 *af, unsigned long nextsize)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < nextsize){
af[i] = ac[i];
af[i + nextsize] = ac1[i];
af[i + 2*nextsize] = ac2[i];
}
} |
12,943 | /*
Author : Kim, KyoungHo (rain_woo@korea.ac.kr)
Ki-Hwan Kim (wbkifun@korea.ac.kr)
Written date : 2009. 6. 11
last update :
Copyright : GNU GPL
*/
__global__ void initmem( int Ntot, int idx0, float *a ) {
int idx = blockIdx.x*blockDim.x + threadIdx.x + idx0;
if ( idx < Ntot ) a[idx] = 0;
}
|
12,944 | #include<iostream>
#include<ctime>
using namespace std;
template<unsigned int DIMX, unsigned int DIMY, typename T>
__global__ void transpose(T *in_data, T *out_data, unsigned int nx, unsigned int ny) {
//padding = 2
__shared__ T tile[DIMY][DIMX*2 + 2];
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * 2;
unsigned int idy = threadIdx.y + blockDim.y * blockIdx.y;
if(idx + blockDim.x < nx && idy < ny) {
tile[threadIdx.y][threadIdx.x] = in_data[idy * nx + idx];
tile[threadIdx.y][threadIdx.x + blockDim.x] = in_data[idy * nx + idx + blockDim.x];
__syncthreads();
unsigned int posB = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int column = posB / blockDim.y;
unsigned int row = posB % blockDim.y;
idx = column + blockDim.x * blockIdx.x * 2;
idy = row + blockDim.y * blockIdx.y;
out_data[idx * ny + idy] = tile[row][column];
out_data[(idx + blockDim.x) * ny + idy] = tile[row][column + blockDim.x];
}
}
template<typename T>
void transposeHost(T *in, T* out, unsigned int nx, unsigned int ny) {
for(int i = 0;i < nx;++i) {
for(int j = 0;j < ny;++j) {
out[i * ny + j] = in[j * nx + i];
}
}
}
int main(int argc, char *argv[]) {
unsigned int nx = 1 << 9;
unsigned int ny = 1 << 9;
constexpr unsigned int blockx = 32;
constexpr unsigned int blocky = 32;
clock_t start, end;
int in[nx * ny], out[nx * ny], *in_dev, *out_dev;
auto init = [](auto*in ,unsigned int size)->void {
for(int i = 0;i < size;++i) {
in[i] = random()%1000;
}
};
init(in, nx * ny);
cudaMalloc((void**)&in_dev, sizeof(in));
cudaMalloc((void**)&out_dev, sizeof(in));
cudaMemcpy(in_dev, in ,sizeof(in), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
transposeHost(in, out, nx, ny);
dim3 block(blockx, blocky);
dim3 grid((nx + blockx - 1) / blockx / 2, (ny + blocky - 1) / blocky);
start = clock();
transpose<blockx, blocky><<<grid, block>>>(in_dev, out_dev, nx,ny);
cudaDeviceSynchronize();
end = clock();
cout <<" gpu time: " << end - start<<endl;
cudaMemcpy(in, out_dev,sizeof(in), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(in_dev);
cudaFree(out_dev);
int n = 0;
for (int i = 0;i < nx * ny;++i) {
if(out[i] != in[i]) {
n++;
}
}
cout << n << endl;
return 0;
}
|
12,945 | #include<stdio.h>
__global__ void sum(int *a_d, int n,int* maxsum){
int strid=n/2;
//printf("fas");
int t=threadIdx.x;
while(strid>=1){
//printf("af");
__syncthreads();
if(t<strid){
a_d[t]=a_d[t]+a_d[strid+t];
// printf("threadid=%d val=%d\n",t,a_d[t]);
}
strid/=2;
}
maxsum[0]=a_d[0];
}
int main(){
int n=512;
int a[n];
for(int i=0; i<n; i++){
a[i]=i;
}
int *a_d,*maxsum;
cudaMalloc((void**)&a_d,n*sizeof(int));
cudaMalloc((void**)&maxsum,sizeof(int));
//cudaMalloc((void**)&n_d,sizeof(int));
cudaMemcpy(a_d,a,n*sizeof(int),cudaMemcpyHostToDevice);
//for(int i=0; i<n; i++)
//printf("%d ",a[i]);
//printf("\n");
sum<<<1,n>>>(a_d,n,maxsum);
int maxi[n];
int max_val[1];
cudaMemcpy(maxi,a_d,n*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(max_val,maxsum,sizeof(int),cudaMemcpyDeviceToHost);
// for(int i=0; i<n; i++)
printf("%d ",max_val);
printf("%d ",maxi[0]);
return 0;
}
|
12,946 | #include "CosmicConstants.cuh"
#include <stdio.h>
__global__ void mykernel() {
printf("value is %f\n", K0);
}
void setDT(float newDT) {
cudaMemcpyToSymbol(dt, &newDT, sizeof(newDT));
}
void setK0(float newK0) {
mykernel << <1, 1 >> >();
cudaMemcpyToSymbol(K0, &newK0, sizeof(newK0), 0, cudaMemcpyHostToDevice);
mykernel << <1, 1 >> >();
}
|
12,947 | #include "includes.h"
#define SIZ 20
#define num_inp 4
using namespace std;
typedef struct edge {
int first, second;
} edges;
__global__ void w1_kernel(double * grads_W1, double * W1, double learning_rate, int size)
{
int i = blockIdx.x;
int j = threadIdx.x;
W1[i*size + j] += (-learning_rate * grads_W1[i*size + j]);
} |
12,948 | #include "includes.h"
__global__ void Cos( float * x, size_t idx, size_t N, float W0)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx-1)*N+i] = cos ( W0*x[(idx-1)*N+i] );
}
return;
} |
12,949 | //xfail:BOOGIE_ERROR
//--blockDim=512 --gridDim=64 --loop-unwind=2 --no-inline
//kernel.cu: error: possible write-write race on B
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <assert.h>
#define N 2//512
extern "C" {
__global__ void helloCUDA(float *A)
{
__shared__ float B[256];
for(int i = 0; i < N*2; i ++) {
B[i] = A[i];
}
}
}
int main() {
float *A;
float *dev_A;
float size= N*sizeof(float);
A=(float*)malloc(size);
cudaMalloc((void**)&dev_A, size);
for (int i = 0; i < N; i++)
A[i] = 5;
cudaMemcpy(dev_A, A, size, cudaMemcpyHostToDevice);
helloCUDA<<<64,N>>>(dev_A);
//ESBMC_verify_kernel(helloCUDA, 1, N, dev_A);
cudaMemcpy(A, dev_A, size, cudaMemcpyDeviceToHost);
cudaFree(dev_A);
free(A);
}
|
12,950 | #include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
#define BLOCK_SIZE 16
void printDeviceProp(const cudaDeviceProp &prop) {
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %lu.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %lu.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %lu.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %lu.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %lu.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
bool InitCUDA() {
int count;
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printDeviceProp(prop);
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
//CPU
void matrixMulCPU(float* res,const float *matrixA,const float *matrixB,int colsA,int rowsA,int rowsB) {
float sum = 0;
for (int i = 0; i < rowsA; ++i) {
for (int j = 0; j < rowsB; ++j) {
sum = 0;
for (int k = 0; k < colsA; ++k) {
sum += (float)matrixA[i*colsA+k]*(float)matrixB[k*rowsB+ j];
}
res[i*rowsB+j] = (float)sum;
}
}
}
// GPU
// C(i,j) = sum{A(i, k)* B(k ,j)}
// each thread cal C(i, j)
__global__ void matrixMulGPUKernal0(float* matrixC,const float* matrixA,const float *matrixB,int colsA,int rowsB) {
float sum = 0;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
for (int i = 0; i < colsA; ++i) {
sum += matrixA[row*colsA + i] * matrixB[i*rowsB + col];
}
matrixC[row*rowsB + col] = sum;
}
// Csub(i,j) = sum{A(i,ksub+offsetA)*B(ksub+offsetB,j)} 0 <= ksub < blockSize
// C(i,j) = sum{Csub(i,j)}
// each thread cal each block
__global__ void matrixMulGPUKernal1(float* matrixC,const float* matrixA,const float *matrixB,int colsA,int rowsB) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = colsA*(by*BLOCK_SIZE);//A(0,by)
int aEnd = aBegin + colsA - 1;
int aStep = BLOCK_SIZE;//offsetA
int bBegin = BLOCK_SIZE*bx;//B(bx,0)
int bStep = BLOCK_SIZE*rowsB;//offsetB
float cSub = 0;
for (int a = aBegin,b = bBegin; a <= aEnd; a += aStep,b += bStep) {
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = matrixA[a + colsA*ty + tx];
Bs[ty][tx] = matrixB[b + rowsB*ty + tx];
__syncthreads();
//i * j for each thread
for (int k = 0; k < BLOCK_SIZE; ++k) {
cSub += As[ty][k]*Bs[k][tx];
}
__syncthreads();
}
int cIndex = (by*BLOCK_SIZE + ty)*rowsB + (bx*BLOCK_SIZE + tx);
matrixC[cIndex] = cSub;
}
void copyFromCPUToGPU(const float *matrixA, float *d_a, int n) {
cudaMemcpy(d_a, matrixA, sizeof(float) * n, cudaMemcpyHostToDevice);
}
void copyFromGPUToCPU(const float *d_c, float *matrixC, int n) {
cudaMemcpy(matrixC, d_c, sizeof(float) * n, cudaMemcpyDeviceToHost);
}
void matrixMulGPU(float* matrixC,const float *matrixA,const float *matrixB,int colsA,int rowsA,int rowsB) {
float *d_a, *d_b, *d_c;
cudaMalloc((void**) &d_a, sizeof(float) * colsA*rowsA);
cudaMalloc((void**) &d_b, sizeof(float) * rowsB*colsA);
cudaMalloc((void**) &d_c, sizeof(float) * rowsB*rowsA);
copyFromCPUToGPU(matrixA,d_a,colsA*rowsA);
copyFromCPUToGPU(matrixB,d_b,rowsB*colsA);
dim3 blocks(rowsB/BLOCK_SIZE, rowsA/BLOCK_SIZE);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
float time_elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matrixMulGPUKernal0<<<blocks,threads>>>(d_c,d_a,d_b,colsA,rowsA);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf(" - Running time: %f ms\n", time_elapsed);
double gflop = (2.0 * (double)colsA * colsA * colsA) * 0.000001;
printf(" - GFlop: %.5f GFlop/sec\n\n", gflop/time_elapsed);
cudaThreadSynchronize();
copyFromGPUToCPU(d_c,matrixC,rowsB*rowsA);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
void randomInit(float* _data,int size) {
for (int i = 0; i < size; ++i) {
_data[i] = rand()/(float)RAND_MAX;
}
}
bool checkError(const float* matrixA, const float* matrixB, int size) {
for (int i = 0 ; i < size; ++i) {
if (fabs(matrixA[i] - matrixB[i]) > 1.0e-3) {
printf(" ! Wrong index: %d\n", i);
printf("%f \t %f\n",matrixA[i],matrixB[i]);
return false;
}
}
return true;
}
int main(int argc, char* argv[]) {
if (!InitCUDA()) return 0;
srand(63);
printf("\n - BLOCK_SIZE: %d\n", BLOCK_SIZE);
int N = (1 << 11);
int colsA, colsB, colsC, rowsA, rowsB, rowsC;
colsA = colsB = colsC = rowsA = rowsB = rowsC = N;
printf(" - Matrix size: %d * %d\n", rowsC, rowsC);
float* A , *B, *C, *C2;
A = (float*) malloc(sizeof(float) * colsA * rowsA);
B = (float*) malloc(sizeof(float) * colsB * rowsB);
randomInit(A,colsA*rowsA);
randomInit(B,colsB*rowsB);
C = (float*) malloc(sizeof(float) * colsC * rowsC);
memset(C,0,sizeof(float)*colsC*rowsC);
C2 = (float*) malloc(sizeof(float) * colsC * rowsC);
memset(C2,0,sizeof(float)*colsC*rowsC);
clock_t tick1 = clock();
matrixMulCPU(C2,A,B,colsA,rowsA,colsB);
printf(" - CPU use Time : %f ms\n",(double)(clock() - tick1)/CLOCKS_PER_SEC);
// unsigned int timer = 0;
// cutilCheckError(cutCreateTimer(&timer));
// cutilCheckError(cutStartTimer(timer));
matrixMulGPU(C,A,B,colsA,rowsA,colsB);
// cutilCheckError(cutStopTimer(timer));
// printf("GPU use time: %f (ms) \n", cutGetTimerValue(timer));
// cutilCheckError(cutDeleteTimer(timer));
if (checkError(C,C2,colsC*rowsC)) {
printf("Right Answer!\n");
}else{
printf("Worng Answer!\n");
}
free(A);
free(B);
free(C);
free(C2);
return 0;
}
|
12,951 |
extern "C"
__global__ void sumSquareError
(int nBatch, int rbs, int rScale, int nCoeff,
float *DA, float *CA, float *EA, float *SA)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nBatch)
{
const int daOffset = i * rbs * rScale * nCoeff;
const int caOffset = i * nCoeff;
const int eaOffset = i * rbs * rScale;
SA[i] = 0;
for(int j = 0; j < rbs * rScale ; j++){
float fx = 0.0f;
for(int k = 0 ; k < nCoeff ; k++){
fx += DA[daOffset + rbs * rScale * k + j] * CA[caOffset + k];
}
float error = EA[eaOffset + j] - fx;
SA[i] += error*error; // sum square error
}
}
}
|
12,952 | #include<cuda.h>
#include<iostream>
__global__ void divergence(/* volatile */ int* data)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ((i & 0x01) == 0)
{
data[i+1] = data[i+1] + i; // if even, come here
//__threadfence_block();
}
else
{
data[i] = data[i] + 2*i; // if odd, come here
}
}
int main()
{
const int numElems = 4;
int hostArray[numElems], *devArray;
//allocate memory on the device (GPU); zero out all entries in this device array
cudaMalloc((void**)&devArray, sizeof(int) * numElems);
cudaMemset(devArray, 0, numElems * sizeof(int));
//invoke GPU kernel, with one block that has four threads
divergence <<<1, numElems >>>(devArray);
//bring the result back from the GPU into the hostArray
cudaMemcpy(&hostArray, devArray, sizeof(int) * numElems, cudaMemcpyDeviceToHost);
//print out the result to confirm that things are looking good
std::cout << "Values stored in hostArray: " << std::endl;
for (int i = 0; i < numElems; i++)
std::cout << hostArray[i] << std::endl;
//release the memory allocated on the GPU
cudaFree(devArray);
return 0;
}
|
12,953 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
__global__ void mult(int *a, int *b, int *c, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (col < n && row < n) {
for (int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * n + col];
c[row * n + col] = sum;
}
}
int main() {
int n;
int i, j, k;
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
printf("Please enter the size of matrix: \n");
scanf("%d", &n);
cudaMalloc((void**)&dev_a, sizeof(int) * n * n);
cudaMalloc((void**)&dev_b, sizeof(int) * n * n);
cudaMalloc((void**)&dev_c, sizeof(int) * n * n);
cudaMallocHost((void**)&a, sizeof(int) * n * n);
cudaMallocHost((void**)&b, sizeof(int) * n * n);
cudaMallocHost((void**)&c, sizeof(int) * n * n);
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
a[i * n + j] = round(rand() % 2);
b[i * n + j] = round(rand() % 2);
}
}
printf("Start calculating...\n");
clock_t start_time = clock();
cudaMemcpy(dev_a, a, sizeof(int) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) * n * n, cudaMemcpyHostToDevice);
mult<<<n, n>>>(dev_a, dev_b, dev_c, n);
clock_t end_time = clock();
printf("Time consuming of calculating %dx%d matrix using GPU is %f ms.\n", n, n, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
return 0;
}
|
12,954 | //#include "Field.hpp"
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//we are computing FFT of the size 2**N
#define N 1024
#define LOG_N 10
/*using namespace arithmetic;
std::initializer_list<uint_default_t> p = { 0x2370fb049d410fbe, 0x4e761a9886e50241, 0x7d023f4018000001, 0x7e80600000000001 };
static const bignum<MAX_BITSIZE> modulus(p);
using field = Field<MAX_BITSIZE, modulus>;
__constant__ field dev_roots_of_unity[LOG_N];
__global__ void FFTKernel(field* input_buf, field* output_buf);
int main()
{
//first find suitable Cuda device
//TBD: or split between several CUDA devices if possible
int device_count;
cudaError_t cudaStatus = cudaGetDeviceCount(&device_count);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaGetDeviceCount failed!");
return 1;
}
if (device_count == 0)
{
fprintf(stderr, "No suitable CUDA devices were found!");
return 1;
}
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceProperties(&prop, 0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaGetDeviceCount failed!");
return 1;
}
//TODO: check if there are enough constant memory and other additional checks
//set appropriate device
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
field input[N];
field output[N];
//we are goint to calculate roots of unity in corresponding powers
field roots_of_unity[LOG_N];
//TODO: compute them in advance before the kernel starts
for (size_t i = 0; i < N; i++)
input[i] = field::random();
field* dev_input = nullptr;
field* dev_output = nullptr;
// Allocate GPU buffers for three vectors (one input, one output) .
cudaStatus = cudaMalloc((void**)&dev_input, N * sizeof(field));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_output, N * sizeof(field));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vector from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, N * sizeof(field), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//copy precomputed roots of unity to constant memory
cudaMemcpyToSymbol(dev_roots_of_unity, roots_of_unity, LOG_N * sizeof(field));
// Launch a kernel on the GPU with one thread for each element.
FFTKernel << <1, size >> > (dev_input, dev_output);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "FFTKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(output, dev_output, N * sizeof(field), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_input);
cudaFree(dev_output);
return 0;
}
//NB: we have precomputed the powers of roots of unity and put them into constant memory
__global__ void FFTKernel(const field* input_buf, field* output_buf)
{
field temp_buf[N];
const field* in_buf = input_buf;
field* out_buf = (LOG_N % 2 ? output_buf : temp_buf);
int thread_idx = threadIdx.x + blockIdx.x * blockDim.x;
for (size_t i = 0; i < LOG_N; i++)
{
int idx = thread_idx;
while (idx < N)
{
idx += blockDim.x * gridDim.x;
}
}
}*/
|
12,955 | #include "includes.h"
__global__ void fupdate_dummy(float *f, float *z1, float *z2, float *g, float invlambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
float Z1c = z1[(idx)];
float Z2c = z2[(idx)];
// float Z1l=z1[(idx-1 )];
// float Z2d=z2[(idx-nx)];
if (!(px == (nx - 1))) DIVZ += Z1c;
// if (!(px==0)) DIVZ -= Z1l;
if (!(py == (ny - 1))) DIVZ += Z2c;
// if (!(py==0)) DIVZ -= Z2d;
// update f
f[idx] = DIVZ - g[idx] * invlambda;
}
} |
12,956 | /*
* a simple test
*/
__shared__ float data1[32][32];
__shared__ float data2[32][32];
__shared__ float data3[32][32];
__device__ void mult(float d1[32][32],
float d2[32][32],
float d3[32][32],
int idx0,
int idx1,
int idx2) {
int i;
int iv0 = 0;
int iv1 = 0;
int iv2 = 0;
for (i = 0; i < 32; i++) {
d1[iv0][idx0] = d2[iv1][idx1] + d3[iv2][idx2];
iv0 += 1;
iv1 += 1;
iv2 += 1;
}
}
__global__ void doit(int start, int end) {
int i;
int id0 = start;
int id1 = start;
int id2 = start;
for (i = start; i < end; i++) {
mult(data1, data1, data1, id0, id1, id2);
id0 += 1;
id1 += 1;
id2 += 1;
}
}
__device__ void mult1(float d1[32][32],
float d2[32][32],
float d3[32][32],
int idx0,
int idx1,
int idx2) {
int i;
int iv0 = 0;
int iv1 = 0;
int iv2 = 0;
for (i = 0; i < 32; i++) {
d1[iv0][idx0] = d2[iv1][idx1] + d3[iv2][idx2];
iv0 += 1;
iv1 += 1;
iv2 += 1;
}
}
__global__ void doit1(int start, int end) {
int i;
int id0 = start;
int id1 = start;
int id2 = start;
for (i = start; i < end; i++) {
mult(data1, data1, data1, id0, id1, id2);
id0 += 1;
id1 += 1;
id2 += 1;
}
}
|
12,957 | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__device__ int intToBin(int x){
return 5;
}
__global__ void add(int *A, int *B, int m, int n) {
int row = threadIdx.y;
int col = threadIdx.x;
if ((row%(m-1) == 0) || (col%(n-1) == 0)){
B[row*n + col] = A[row*n + col];
}else{
B[row*n + col] = intToBin(A[row*n + col]);
}
}
int main(){
int a[100], b[100], n, m;
printf("Enter m: ");
scanf("%d",&m);
printf("Enter n: ");
scanf("%d",&n);
printf("Enter Matrix:\n");
for(int i=0;i<n*m;i++)
scanf("%d",&a[i]);
int *d_a,*d_b;
int size = sizeof(int)*m*n;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
dim3 block(m, n, 1);
add<<<1, block>>>(d_a, d_b, m, n);
cudaMemcpy(&b,d_b,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n*m;i++){
if (i % n == 0)
printf("\n");
printf("%d ",b[i]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
}
|
12,958 | #include<iostream>
#include<string>
#include<cstring>
#include<ctime>
#include<cstdlib>
#include<sys/time.h>
#include<stdio.h>
#include<iomanip>
/* we need these includes for CUDA's random number stuff */
#include<curand.h>
#include<curand_kernel.h>
using namespace std;
#define MAX 26
//int a[1000]; //array of all possible password characters
int b[1000]; //array of attempted password cracks
unsigned long long tries = 0;
char alphabet[] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' };
size_t result = 1000 * sizeof(float);
int *a = (int *) malloc(result);
void serial_passwordCrack(int length){
bool cracked = false;
do{
b[0]++;
for(int i =0; i<length; i++){
if (b[i] >= 26 + alphabet[i]){
b[i] -= 26;
b[i+1]++;
}else break;
}
cracked=true;
for(int k=0; k<length; k++)
if(b[k]!=a[k]){
cracked=false;
break;
}
if( (tries & 0x7ffffff) == 0 )
cout << "\r \r ";
else if( (tries & 0x1ffffff) == 0 )
cout << ".";
tries++;
}while(cracked==false);
}
__global__ void parallel_passwordCrack(int length,int*d_output,int *a)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
bool cracked = false;
char alphabetTable[] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' };
int newB[1000];
__shared__ int nIter;
__shared__ int idT;
__shared__ long totalAttempt;
do{
if(idx == 0){
nIter = 0;
totalAttempt = 0;
}
newB[0]++;
for(int i =0; i<length; i++){
if (newB[i] >= 26 + alphabetTable[i]){
newB[i] -= 26;
newB[i+1]++;
}else break;
}
cracked=true;
for(int k=0; k<length; k++)
{
if(newB[k]!=a[k]){
cracked=false;
break;
}else
{
cracked = true;
}
}
if(cracked && nIter == 0){
idT = idx;
break;
}
else if(nIter){
break;
}
totalAttempt++;
}while(!cracked || !nIter);
if(idx == idT){
for(int i = 0; i< length; i++){
d_output[i] = newB[i];
}
}
}
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
int main()
{
int length; //length of password
int random; //random password to be generated
int *d_input = (int *) malloc(result);
cout << "Enter a password length: ";
cin >> length;
int *h_gpu_result = (int*)malloc(1000*sizeof(int));
srand(time(NULL));
//generating random password
cout << "Random generated password: " << endl;
for (int i =0; i<length; i++){
random = alphabet[(rand()%26)];
a[i] = random; //adding random password to array
cout << char(a[i]);
}cout << "\n" << endl;
long long serial_start_time = start_timer();
cout << "Serial Password Cracked: " << endl;
serial_passwordCrack(length);
cout << "\n";
long long serial_end_time = stop_timer(serial_start_time, "\nSerial Run Time");
for(int i=0; i<length; i++){
cout << char(b[i]);
}cout << "\nNumber of tries: " << tries << endl;
//long long serial_end_time = stop_timer(serial_start_time, "\nSerial Run Time");
//declare GPU memory pointers
int *d_output;
//allocate GPU memory
cudaMalloc((void **) &d_output,1000*sizeof(int));
cudaMalloc((void **) &d_input, result);
cudaError_t err = cudaSuccess;
//transfer the array to the GPU
err = cudaMemcpy(d_input, a, result,cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy d_S from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//launch the kernel
int threads =length;
long long parallel_start_time = start_timer();
parallel_passwordCrack<<<1,threads>>>(length,d_output,d_input);
long long parallel_end_time = stop_timer(parallel_start_time, "\nParallel Run Time");
//copy back the result array to the CPU
cudaMemcpy(h_gpu_result,d_output,1000*sizeof(int),cudaMemcpyDeviceToHost);
cout << "\nParallel Password Cracked: " << endl;
for(int i=0; i<length; i++){
printf("%c\n", char(h_gpu_result[i]));
}
printf("\n");
cudaFree(d_output);
cudaFree(d_input);
free(h_gpu_result);
return 0;
}
|
12,959 | #include "includes.h"
__global__ void reverseArray(int *A, int *B) {
int threadID = threadIdx.x;
int start = (threadID * ArraySize) / 256;
int end = ( ( (threadID + 1 ) * ArraySize) / 256) - 1;
while(end > 0)
{
B[end] = A[start];
end--;
start++;
}
} |
12,960 | /* Justinas Linkevicius
* IFF-3/2
* L1c
*
1. Kokia tvarka startuoja procesai?
**** tokia, kokia užrašyti
2. Kokia tvarka vykdomi procesai?
**** tokia, kokia startuoja
3. Kiek iteracijų iš eilės padaro vienas procesas?
**** vieną pilna
4. Kokia tvarka atspausdinami to paties masyvo duomenys?
**** tokia, kokia surašyti.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <stdio.h>
using namespace std;
#define INPUTFILE "LinkeviciusJ.txt"
#define CUDA_THREADS 10
#define MAX_DATA_PER_THREADS 10
/* Struktura saugoti vienai duomenu eilutei */
struct FileData
{
char stringField[255];
int intField;
double doubleField;
FileData()
{
strncpy(stringField, "", 255);
intField = 0;
doubleField = 0.0;
}
FileData(string a, int b, double c)
{
strncpy(stringField, a.c_str(), 255);
intField = b;
doubleField = c;
}
};
/* Struktura saugoti visiems duomenims */
struct ThreadData
{
FileData array[ MAX_DATA_PER_THREADS ];
};
// lauku pavadinimai
string stringFieldName, intFieldName, doubleFieldName;
// nuskaito pradinius duomenis
void readData(ThreadData* threadDataArrays, int & threadDataSize, int & dataElementsCount)
{
ifstream input(INPUTFILE);
input >> stringFieldName;
input >> intFieldName;
input >> doubleFieldName;
input >> dataElementsCount;
threadDataSize = ceil((double)dataElementsCount / CUDA_THREADS);
int line = 0;
for (int i = 0; i < CUDA_THREADS; i++)
{
for (int j = 0; j < threadDataSize; j++)
{
string stringField;
int intField;
double doubleField;
input >> stringField >> intField >> doubleField;
// jei masyvui nebera duomenu, uzpildome tusciais elementais
if (line < dataElementsCount)
threadDataArrays[i].array[j] = FileData(stringField, intField, doubleField);
else
threadDataArrays[i].array[j] = FileData();
line++;
}
}
input.close();
}
// spausdina pradinius duomenis
void writeData(ThreadData* threadDataArrays, int & threadDataSize, int & dataElementsCount)
{
int line = 0;
cout << stringFieldName << "\t" << intFieldName << "\t" << doubleFieldName << "\r\n";
for (int i = 0; i < CUDA_THREADS; i++)
{
cout << endl << "**** Array" << i << " ****" << endl;
for (int j = 0; j < threadDataSize; j++)
{
line++;
if (threadDataArrays[i].array[j].stringField != "")
{
cout.precision(2);
cout << j << ") " << threadDataArrays[i].array[j].stringField << "\t" << threadDataArrays[i].array[j].intField << "\t" << fixed << threadDataArrays[i].array[j].doubleField << "\r\n";
}
if (line == dataElementsCount)
break;
}
}
cout << endl;
}
// lygiagrecioji programos dalis
// perduodamas duomenu masyvas ir kiekvieno proceso apdorojamu elementu kiekis
__global__ void printKernel(ThreadData *threadData, int* size)
{
// get threadId
int i = threadIdx.x;
// spausdina proceso elementus
for (int j = 0; j < *size; j++)
{
printf("process_%d: %d\t%s\t%d\t%.2f\n", i, j, threadData[i].array[j].stringField, threadData[i].array[j].intField, threadData[i].array[j].doubleField);
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t printWithCuda(ThreadData* hostData, int threadsCount, int threadDataSize)
{
cudaError_t cudaStatus;
ThreadData* devData = 0;
int* devSize = 0;
// choose gpu
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers
cudaStatus = cudaMalloc((void**)&devData, threadsCount * sizeof(ThreadData));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc(&devSize, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(devData, hostData, threadsCount * sizeof(ThreadData), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(devSize, &threadDataSize, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
printf("Starting CUDA threads!\n");
printKernel <<< 1, threadsCount >>> (devData, devSize);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
printf("End of CUDA threads!\n");
Error:
cudaFree(devData);
cudaFree(devSize);
std::cin.get();
return cudaStatus;
}
int main()
{
ThreadData threadDataArrays[CUDA_THREADS];
int dataElementsCount;
int threadDataSize;
readData(threadDataArrays, threadDataSize, dataElementsCount);
writeData(threadDataArrays, threadDataSize, dataElementsCount);
// Start CUDA
cudaError_t cudaStatus = printWithCuda(threadDataArrays, CUDA_THREADS, threadDataSize);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
std::cin.get();
return 0;
} |
12,961 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
/* -------- KERNEL -------- */
__global__ void reduce_kernel(unsigned int * d_out, unsigned int * d_in, unsigned int SIZE)
{
// position and threadId
unsigned int pos = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int tid = threadIdx.x;
// do reduction in global memory
for (unsigned int s = blockDim.x / 2; s>0; s>>=1)
{
if (tid < s)
{
if (pos+s < SIZE) // Handling out of bounds
{
d_in[pos] = d_in[pos] + d_in[pos+s];
}
}
__syncthreads();
}
// only thread 0 writes result, as thread
if ((tid==0) && (pos < SIZE))
{
d_out[blockIdx.x] = d_in[pos];
}
}
/* -------- KERNEL -------- */
__global__ void reduce_kernel_volatile(unsigned int * d_out, unsigned int * d_in, unsigned int SIZE)
{
// position and threadId
unsigned int pos = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int tid = threadIdx.x;
volatile unsigned int a = blockDim.x / 2;
// do reduction in global memory
for (unsigned int s = a; s>0; s>>=1)
{
if (tid < s)
{
if (pos+s < SIZE) // Handling out of bounds
{
d_in[pos] = d_in[pos] + d_in[pos+s];
}
}
__syncthreads();
}
// only thread 0 writes result, as thread
if ((tid==0) && (pos < SIZE))
{
d_out[blockIdx.x] = d_in[pos];
}
}
/* -------- KERNEL WRAPPER -------- */
void reduce(unsigned int * d_out, unsigned int * d_in, unsigned int SIZE, unsigned int NUM_THREADS)
{
// Setting up blocks and intermediate result holder
unsigned int NUM_BLOCKS = SIZE/NUM_THREADS + ((SIZE % NUM_THREADS)?1:0);
unsigned int * d_intermediate_in;
unsigned int * d_intermediate_out;
cudaMalloc(&d_intermediate_in, sizeof(unsigned int)*SIZE);
cudaMalloc(&d_intermediate_out, sizeof(unsigned int)*NUM_BLOCKS);
cudaMemcpy(d_intermediate_in, d_in, sizeof(unsigned int)*SIZE, cudaMemcpyDeviceToDevice);
// Recursively solving, will run approximately log base NUM_THREADS times.
do
{
reduce_kernel<<<NUM_BLOCKS, NUM_THREADS>>>(d_intermediate_out, d_intermediate_in, SIZE);
// Updating SIZE
SIZE = NUM_BLOCKS;//SIZE / NUM_THREADS + SIZE_REST;
// Updating input to intermediate
cudaMemcpy(d_intermediate_in, d_intermediate_out, sizeof(unsigned int)*NUM_BLOCKS, cudaMemcpyDeviceToDevice);
// Updating NUM_BLOCKS to reflect how many blocks we now want to compute on
NUM_BLOCKS = SIZE/NUM_THREADS + ((SIZE % NUM_THREADS)?1:0);
}
while(SIZE > NUM_THREADS); // if it is too small, compute rest.
// Computing rest
reduce_kernel<<<1, SIZE>>>(d_out, d_intermediate_out, SIZE);
cudaFree(d_intermediate_in);
cudaFree(d_intermediate_out);
}
/* -------- MAIN -------- */
int main(int argc, char **argv)
{
std::ofstream myfile;
myfile.open ("par_reduce.csv");
// Setting NUM_THREADS
const unsigned int times = 10;
for (unsigned int rounds = 0; rounds<30; rounds++)
{
// printf("Round: %d\n", rounds);
unsigned int NUM_THREADS = 1<<10;
// Making non-bogus data and setting it on the GPU
unsigned int SIZE = 1<<rounds;
unsigned int * d_in;
unsigned int * d_out;
cudaMalloc(&d_in, sizeof(unsigned int)*SIZE);
cudaMalloc(&d_out, sizeof(unsigned int)*SIZE);
unsigned int * h_in = (unsigned int *)malloc(SIZE*sizeof(int));
for (unsigned int i = 0; i < SIZE; i++) h_in[i] = 1;
cudaMemcpy(d_in, h_in, sizeof(unsigned int)*SIZE, cudaMemcpyHostToDevice);
// Running kernel wrapper
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// kernel time!!!
cudaEventRecord(start, 0);
for (unsigned int i = 0; i < times; i++)
{
reduce(d_out, d_in, SIZE, NUM_THREADS);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
// printf("time!: %.5f\n", elapsedTime);
unsigned int h_out;
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d \n", h_out);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
|
12,962 | #include <iostream>
#include <random>
#include <cuda_runtime_api.h>
#include <sys/time.h>
#include <vector>
struct RGBPoint {
float x;
float y;
float z;
float r;
float g;
float b;
float a;
RGBPoint() {}
RGBPoint(float x, float y, float z, float r, float g, float b, float a) : x(x), y(y), z(z), r(r), g(g), b(b), a(a) {}
};
__global__ void TestKernel(RGBPoint *d_img_RGB, const int row, const int col) {
int h = threadIdx.x + blockIdx.x * blockDim.x;
int w = threadIdx.y + blockIdx.y * blockDim.y;
if ((h >= row) || (w >= col)) {
return;
}
int index = h * col + w;
if (index % 120 == 0) {
d_img_RGB[index].a = 3.0f;
d_img_RGB[index].r = 4.0f;
d_img_RGB[index].g = 5.0f;
d_img_RGB[index].b = 6.0f;
d_img_RGB[index].x = 7.0f;
d_img_RGB[index].y = 8.0f;
d_img_RGB[index].z = 9.0f;
}
}
int main() {
struct timeval start, end;
float t1, t2;
const int row = 640;
const int col = 480;
const size_t size_RGB = row * col * sizeof(RGBPoint);
RGBPoint *img_RGB = (RGBPoint*)malloc(size_RGB);
RGBPoint *d_img_RGB;
cudaMalloc(&d_img_RGB, size_RGB);
dim3 block_size(4, 32);
dim3 grid_size((row - 1) / block_size.x + 1, (col - 1) / block_size.y + 1);
gettimeofday(&start, nullptr);
TestKernel<<<grid_size, block_size>>>(d_img_RGB, row, col);
cudaMemcpy(img_RGB, d_img_RGB, size_RGB, cudaMemcpyDeviceToHost);
gettimeofday(&end, nullptr);
t1 = ((end.tv_sec - start.tv_sec) * 1000000.0f + (end.tv_usec - start.tv_usec)) / 1000.0f;
std::vector<RGBPoint> result_RGB;
gettimeofday(&start, nullptr);
for (int h = 0; h < row; h++) {
for (int w = 0; w < col; w++) {
int index = h * col + w;
if (img_RGB[index].a > 0) {
result_RGB.push_back(img_RGB[index]);
}
}
}
gettimeofday(&end, nullptr);
t2 = ((end.tv_sec - start.tv_sec) * 1000000.0f + (end.tv_usec - start.tv_usec)) / 1000.0f;
free(img_RGB);
cudaFree(d_img_RGB);
std::cout << "kernel and data transfer time: " << t1 << " ms" << std::endl;
std::cout << "postprocess time: " << t2 << " ms" << std::endl;
return 0;
}
|
12,963 | #include <math.h>
#include <stdio.h>
#define N 2048*2048 // Number of elements in each vector
#define rowcol2idx(num, r, c) ((r)*(num)+(c))
/*
* Optimize this already-accelerated codebase. Work iteratively,
* and use nvprof to support your work.
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*
* Some bugs have been placed in this codebase for your edification.
*/
__global__ void saxpy(int *a, int *b, int *c, int base)
{
/*
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (; tid < N; tid += stride)
c[tid] = (a[tid]<<1) + b[tid];
*/
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int tid = rowcol2idx(gridDim.x*blockDim.x, row, col)+base;
if (tid < N)
c[tid] = (a[tid]<<1) + b[tid];
}
/*
__global__ void init_array(int *a, int *b, int *c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (; tid < N; tid += stride) {
a[tid] = 2;
b[tid] = 1;
c[tid] = 0;
}
}
*/
__global__ void init_array(int *a, int target, int stride)
{
for (int tid = threadIdx.x + blockIdx.x * blockDim.x; tid < N; tid += stride)
a[tid] = target;
}
int main()
{
int *a, *b, *c;
int size = N*sizeof(int); // The total number of bytes per vector
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
int deviceId;
cudaDeviceProp props;
cudaGetDevice(&deviceId);
cudaGetDeviceProperties(&props, deviceId);
int multiProcessorCount = props.multiProcessorCount;
size_t threadsPerBlock = 1024;
size_t numberOfBlocks = ((N>>10)/multiProcessorCount+1)*multiProcessorCount;
// i first prefetch pages...
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
init_array<<<numberOfBlocks, threadsPerBlock>>>(a,2,threadsPerBlock*numberOfBlocks);
init_array<<<numberOfBlocks, threadsPerBlock>>>(b,1,threadsPerBlock*numberOfBlocks);
// we have no need to initialize array c, because of default value is 0.
//init_array<<<numberOfBlocks, threadsPerBlock>>>(c,0,threadsPerBlock*numberOfBlocks);
cudaDeviceSynchronize();
//printf("sm numer = %d\n", multiProcessorCount);
cudaMemPrefetchAsync(c, size, deviceId);
//saxpy <<<numberOfBlocks, threadsPerBlock>>>(a,b,c,threadsPerBlock*numberOfBlocks);
int len = 40;
dim3 threads_per_block(32, 32, 1);
dim3 number_of_blocks(len, len, 1);
saxpy<<<number_of_blocks, threads_per_block>>>(a, b, c, 0);
saxpy<<<number_of_blocks, threads_per_block>>>(a, b, c, 1600*1024);
saxpy<<<number_of_blocks, threads_per_block>>>(a, b, c, 3200*1024);
cudaDeviceSynchronize();
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
12,964 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void kernel() {
double a = 2.71828; //register variables, automatic
double c[100]; //local variable, automatic
__shared__ double b; //shared variable
int tx = threadIdx.x; //register variable
if (tx == 0) {
b = 3.1415926f;
}
//__syncthreads(); // run with/without this line
printf("id = %d, a=%7.5f, b=%9.7f\n", tx, a, b);
}
int main() {
kernel<<<1,8>>>();
cudaDeviceReset();
return 0;
}
|
12,965 | template<typename T>
__device__ void vectorAddVector(const T* A, const T* B, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] + B[index];
}
}
template<typename T>
__device__ void vectorSubVector(const T* A, const T* B, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] - B[index];
}
}
template<typename T>
__device__ void vectorTimesVector(const T* A, const T* B, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] * B[index];
}
}
template<typename T>
__device__ void vectorDivVector(const T* A, const T* B, T* C, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
C[index] = A[index] / B[index];
}
} |
12,966 | // From CUDA for Engineering
// dist_v2/kernel.cu
#include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <iomanip>
#define TPB 32
#define N 256000
#define M 5 // number of times to do cudaMemcpy
#define DEBUG 0
__device__
float distance(float x1, float x2)
{
return sqrt((x2 - x1) * (x2 - x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref, int len)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= len) { return; }
const float x = d_in[i];
d_out[i] = distance(x, ref);
}
void distanceArray(float *out, float *in, float ref, int len)
{
// alloc cuda memory
float *d_in = 0;
float *d_out = 0;
cudaMalloc(&d_in, len * sizeof(float));
cudaMalloc(&d_out, len * sizeof(float));
// memcpy to device
struct timespec t0 = {0,0};
struct timespec t1 = {0,0};
clock_gettime(CLOCK_REALTIME, &t0);
for (int i = 0; i < M; i++) {
cudaMemcpy(d_in, in, len * sizeof(float), cudaMemcpyHostToDevice);
}
clock_gettime(CLOCK_REALTIME, &t1);
std::cout << "Data transfer time (ms) = " << (t1.tv_sec-t0.tv_sec)*1e3 + (t1.tv_nsec-t0.tv_nsec)/1e6 << "\n";
// call wrapper
clock_gettime(CLOCK_REALTIME, &t0);
distanceKernel<<<(len+TPB-1)/TPB, TPB>>>(d_out, d_in, ref, len);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &t1);
std::cout << "Kernel time (ms) = " << (t1.tv_sec-t0.tv_sec)*1e3 + (t1.tv_nsec-t0.tv_nsec)/1e6 << "\n";
// memcpy from device
cudaMemcpy(out, d_out, len * sizeof(float), cudaMemcpyDeviceToHost);
// free cuda memory
cudaFree(d_in);
cudaFree(d_out);
}
float scale(int i, int n) {
return ((float)i) / (n - 1);
}
int main()
{
std::cout << "dist_v2_cuda\n";
const float ref = 0.5f;
float *in = (float*)calloc(N, sizeof(float));
float *out = (float*)calloc(N, sizeof(float));
for (int i = 0; i < N; i++) {
in[i] = scale(i, N);
}
distanceArray(out, in, ref, N);
#if DEBUG
std::cout << std::fixed << std::setprecision(4);
for (int i = 0; i < N; i++) {
std::cout << "i = " << i << "\tin: " << in[i] << "\tout: " << out[i] << "\n";
}
#endif
free(in);
free(out);
return 0;
} |
12,967 | #include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#define SIZE 1024
#define TILE_WIDTH 16
float h_M[SIZE*SIZE],h_N[SIZE*SIZE],h_P[SIZE*SIZE];
__global__ void multiplication_kernel(float *d_M,float *d_N,float *d_P)
{
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int col = TILE_WIDTH * bx + tx;
int row = TILE_WIDTH * by + ty;
float prod_value = 0;
int m,k;
for(m=0;m<SIZE/TILE_WIDTH;m++)
{
ds_M[ty][tx] = d_M[row*SIZE+(m*TILE_WIDTH+tx)];
ds_N[ty][tx] = d_N[(m*TILE_WIDTH+ty)*SIZE+col];
__syncthreads();
for(k=0;k<TILE_WIDTH;k++)
prod_value+=ds_M[ty][k]*ds_N[k][tx];
__syncthreads();
}
d_P[row*SIZE+col] = prod_value;
}
void matrix_multiplication(float *d_M,float *d_N,float *d_P)
{
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
dim3 dimGrid(SIZE/TILE_WIDTH,SIZE/TILE_WIDTH,1);
multiplication_kernel<<<dimGrid,dimBlock>>>(d_M,d_N,d_P);
}
void display_matrix(float mat[])
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
printf("%f ",mat[i*SIZE+j]);
printf("\n");
}
}
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
cudaSetDevice(deviceId);
const int ARRAY_BYTES = SIZE*SIZE*sizeof(float);
float *d_M,*d_N,*d_P;
cudaMalloc((void**)&d_M,ARRAY_BYTES);
cudaMalloc((void**)&d_N,ARRAY_BYTES);
cudaMalloc((void**)&d_P,ARRAY_BYTES);
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
h_M[i*SIZE+j] = rand()%101;
h_N[i*SIZE+j] = rand()%101;
}
}
cudaMemcpy(d_M,h_M,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
matrix_multiplication(d_M,d_N,d_P);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaMemcpy(h_P,d_P,ARRAY_BYTES,cudaMemcpyDeviceToHost);
/*
printf("M is \n");
display_matrix(h_M);
printf("N is \n");
display_matrix(h_N);
printf("Product of M and N is \n");
display_matrix(h_P);
*/
printf("Elapsed time is %f\n",elapsedTime);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
return 0;
} |
12,968 | #include <cuda.h>
#include <stdio.h>
const int N = 1 << 20;
__global__ void kernel(float *x, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main(int argc, char **argv){
// seteo de numero de streams
if(argc != 2){
fprintf(stderr, "run as ./prog numstreams\n");
exit(EXIT_FAILURE);
}
const int num_streams = atoi(argv[1]);
cudaStream_t streams[num_streams];
float *data[num_streams];
// creacion de streams y de datos
for(int i = 0; i < num_streams; i++){
printf("creando stream %i\n", i);
cudaStreamCreate(&streams[i]);
cudaMalloc(&data[i], N * sizeof(float));
}
// ejecucion de cada kernel
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("ejecutando con %i streams....", num_streams); fflush(stdout);
cudaEventRecord(start);
for (int i = 0; i < num_streams; i++) {
// launch one worker kernel per stream
kernel<<<1, 64, 0, streams[i]>>>(data[i], N);
kernel<<<1, 1>>>(0, 0);
}
cudaDeviceSynchronize();
cudaEventRecord(stop);
printf("ok\n"); fflush(stdout);
cudaEventSynchronize(stop);
float milliseconds = 0.0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time GPU: %f\n", milliseconds );
cudaDeviceReset();
return 0;
}
|
12,969 |
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
/*
------------------------102- Introducction to parallel programing -------------------------------------------------------------------------------------------------------------------------------------------------------
Context (instruccion por turnos)
- Collection of data about process which allows processor to suspend or hold the execution of a process and restart the execution later.
- Memory addresses
- Program counter states
Thread (secuendia mas pequea de instruccion programada
- Process
- threads (subprocess)
Parallel Process
- Tipos
- Paralelismo a nivel de tarea
- los nucleos realizan tareas distintas con datos distintos o los mismos
- Paralelismo a nivel de datos
- los nucleos realizan la misma tarea con diferentes datos
Paralelismo vs concurrencia.
- Concurrencia = realizacion de procesos en distintos tiempos(secuenciales) de milesimas de segundo que aparentan simultaneidad o paralelismo
- Paralelismo = distintos nucleos realizan tareas al mismo tiempo
------------------------104- Install -------------------------------------------------------------------------------------------------------------------------------------------------------
Revisar compatibilidad en wikipedia en ingles.
GPGPU-
windows + r (dxdiag) // visualiza las caracteristicas del PC
windows + r (cmd) //escribir (nvcc --version) para saber la version de CUDA instalado del PC
*/
//------------------------105 - Basic steps of a CUDA program----------------------------------------------------
/*RESUMEN
* - initization of data from CPU
* - transfer data from CPU context to GPU context
* - Kernel launc with needed grid/block size
* - Transfer results back to CPU context from CPU context
* - Reclaim the memory from both CPU and GPU
*
* - IMPORTANTE
* -Grid - Grid is a collection of all the threads launch for a kernel ( coleccion de todos los hilos lanzados para un kernel)
* - En el ejemplo hello CUDA world se tienen 20 subprocesos, los hilos en una cuadricula estan organizados
* en un grupo llamado bloques de hilos
* -Block - subconjunto de hilos dentro de un GRID que se pueden representar como un cubo(3d) mas pequeo que a su vez esta subdividido en pequeos cubos que representan a los hilos o threads
-GRID (cubo general en x, y z)
-BLOCK (subcubo dentro de GRID que forma un subconjunto de hilos)
-THREADS
kernel_ name <<<
number_of_blocks, // especifica cuantos bloques de hilos en la cuadracula en cada dimension
thread_per_block // especifica cuantos hilos en un bloque en cada dimension
>>> (arguments) // TODO ESTO ES EN UNA DIMENSION
* - Para especificar cuadriculas y bloques multidimensionales
* -dim3 variable_name (x,y,z) // se inicializa por defecto en 1
* - dim3 variable_name(x,y,z) // puede acceder a cada valor de dimension
* - variable_name.x
* * - variable_name.y
* * - variable_name.z
*EJEMPLO UNIDIMENSIONAL
* - 8 bloques de hilos, donde cada bloque tiene 4 hilos en la dimension x
-la dimension de nuestro bloque es de cuatro hilos en la dimension x y 1 hilo en las dimensiones Y y Z.
-dim3 grid(8,1,1) // nos referimos a todos los hilos lanzados para un kernel como grid.
-dim3 block(4,1,1)
_________________________ _________________________ _________________________ _________________________ _________________________ _________________________ _________________________ _________________________
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| |1| |2| |3| |4| | | |1| |2| |3| |4| | | |1| |2| |3| |4| | | |1| |2| |3| |4| | | |1| |2| |3| |4| | | |1| |2| |3| |4| | | |1| |2| |3| |4| | | |1| |2| |3| |4| |
|__|_|___|_|___|_|___|_|__| |__|_|___|_|___|_|___|_|__| |__|_|___|_|___|_|___|_|__| |__|_|___|_|___|_|___|_|__| |__|_|___|_|___|_|___|_|__| |__|_|___|_|___|_|___|_|__| |__|_|___|_|___|_|___|_|__| |__|_|___|_|___|_|___|_|__|
8 bloques unidimensionales con 8 hilos unidimensionales
- si no se especifican las dimensiones se inicializaran como 1
-LIMITES DE TAMAO DE BLOQUE
- 1024 HILOS PARA DIMENSION X
- 1024 HILOS PARA DIMENSION Y
- 64 HILOS PARA DIMENSION Z
- x* y* x <= 1024 la multiplicacion del numero de subprocesos en cada
dimension deber ser menor o igual a 1024
-LIMITES DE TAMAO DE CUADRICULA
- 65536 (1<<32-1) BLOQUES PARA DIMENSION X
- 65536 (2^32-1) BLOQUES PARA DIMENSION Y,Z
*/
//// EJEMPLOS
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
////kernel
///*
//* funcion asincrona ( el host puede continuar con las siguientes instrucciones) a
//* menos que se especifique que debe esperar (cudaDeviceSynchronize())
//*
//* cudaDeviceReset(); reestablece el dispositivo
//*/
//__global__ void hello_cuda()
//{
// printf("Hello CUDA world \n");
// //cout << "hello CPU world" << endl; // esta instruccion no funciona dentro del kernel
//}
//
//
//int main()
//{
// //************************// EJEMPLO 1 ***
//
// //hello_cuda <<<1,1 >>>(); // kernel con parametros de lanzamiento
// /*
// * - El segundo parametro hace referencia al numero de subprocesos que se ejecutanran en el DEVICE
// *
// */
// //hello_cuda << <1, 10 >> > (); // imprime 10 veces hello CUDA world
//
// //cudaDeviceSynchronize(); // hace que el CPU o host espere en este punto, hasta que termine el proceso de DEVICE
//
// //cudaDeviceReset(); //reestablece dispositivo
//
// //cout << "hello CPU world" << endl;
//
// //return 0;
//
// //************************// EJEMPLO 2 "8 BLOQUES(8X1) CON 4 HILOS(4X1)" imprime 32 veces hello CUDA world***
//
// //dim3 grid(8); // conjunto de 8 blocks en X y 1 en las dimensiones Y,Z.
// //dim3 block(4); // bloque con tamao 4 en X y 1 en Y,Z.
// //
// //// el primer parametro(grid) es el numero de bloques de hilos en cada dimension
// //// el segundo parametro(block) es el numero de hilos en cada dimension del bloque
//
// //hello_cuda << <grid, block >> > ();
//
// //cudaDeviceSynchronize(); // hace que el CPU o host espere en este punto, hasta que termine el proceso de DEVICE
//
// //cudaDeviceReset(); //reestablece dispositivo
//
// //cout << "hello CPU world" << endl;
//
// //return 0;
//
// //************************// EJEMPLO 3 "4 BLOQUES (2X2) CON 16 HILOS (8X2) " imprime 32 veces hello CUDA world***
//
//
// int nx; // variables dinamicas para ir modificando en tiempo de ejecucion
// int ny;
// nx = 16;
// ny = 4;
//
//
// dim3 block(8,2); // 16 hilos en cada bloque
// dim3 grid(nx/block.x, ny/block.y); // 16/8=2 , 4/2=2 4 bloques en total
//// _____________________________________________
//// | | | | | | | | | | | | | | | | |
//// | |1| |2| |3| |4| |5| |6| |7| |8| == 4 BLOQUES (GRID) IGUALES A ESTE.
//// | |1| |2| |3| |4| |5| |6| |7| |8|
//// |__|_|___|_|___|_|___|_|__|_|__|_|___|_|___|_|
//
// // el primer parametro(grid) es el numero de bloques de hilos en cada dimension
// // el segundo parametro(block) es el numero de hilos en cada dimension del bloque
//
// hello_cuda << <grid, block >> > ();
//
// cudaDeviceSynchronize(); // hace que el CPU o host espere en este punto, hasta que termine el proceso de DEVICE
//
// cudaDeviceReset(); //reestablece dispositivo
//
// cout << "hello CPU world" << endl;
//
// return 0;
//
//}
//------------------------106 - Organization of threads in a CUDA program 1----------------------------------------------------
//1D
// ______________________ ____________________
// | |A| |B| |C| |D| |E| |F| |G| |H|
//Threadlx.X| |0| |1| |2| |3| |0| |1| |2| |3|
//Threadlx.Y| |0| |0| |0| |0| |0| |0| |0| |0| 2 bloques : ejemplo de identificacion de hilo
//Threadlx.Z| |0| |0| |0| |0| |0| |0| |0| |0| C = 2,0,0
// |__|_|___|_|___|_|___|_| |_|__|_|_____|_|___|_|
//2D
// __0_____1____2____3___ _0___1_______2___3__
// | || |X| || || |P| || || ||
// | || |Y| || || || || |Q| ||
// ______________________ ____________________
// X Y P Q R S T U
// ______________________ ____________________ Threadlcx.X 1 1 0 2 0 3 1 0
// | |R| || || || || |T| || || Threadlcx.Y 0 1 0 1 0 1 0 1
// | || || || |S| |U| || || ||
// ______________________ ____________________
// //************************// EJEMPLO 1 -> GRID 2X2 CON 8 HILOS CADA BLOQUE ***
// //************************// EJEMPLO 1 -> IDENTIFICACION DE HILOS ***
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
//__global__ void print_threadIds()
//{
// printf("threadIdx.x : %d, threadIdx.y : %d, threadIdx.z : %d \n", threadIdx.x, threadIdx.y, threadIdx.z);
//}
//
//int main()
//{
// int nx, ny;
// nx = 2;
// ny = 2;
//
// dim3 block(2, 2); // 8 subprocesos en la dimension X y 8 subprocesos en la dimension Y.
// dim3 grid(nx / block.x, ny / block.y); // grid de 2x2
//
// print_threadIds << <grid, block >> > ();
// cudaDeviceSynchronize(); // da la orden para que el host o int main espere a que termine el kernel o __global__.
// cudaDeviceReset();
// return 0;
//
// /*
// ORDEN GRID 2X2 = 4 BLOCKS , 4 HILOS POR BLOCK = 16 HILOS
//
// PRIMER BLOCK 1
// _____
// | |
// | A B |
// | C D |
// |_____|
//
// *BLOCKS 1 | 2 | 3 | 4
// A B C D E F G H I J K L M N O P
// Threadidx.X 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1
// Threadidx.Y 0 0 1 1 0 0 1 1 0 0 1 1 0 0 1 1
// Threadidx.Z 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
//
// */
//}
//------------------------107 - Organization of threads in a CUDA program 2----------------------------------------------------
/*
* En tiempo de ejecucion CUDA la variable blckldx inicializada de forma unica para cada hilo dependiendo de las coordenadas de la pertenencia
blockldx.X = coordenadas de cada hilo tomando como base cada block
//1D 0 1
// ____________________ __________________
// |P| || |Q| || || |R| || |S|
// 0 ____________________ __________________ P Q R S T U V X
// blockldx.X 0 0 1 1 0 0 1 1
// ____________________ __________________ blockldx.Y 0 0 0 0 1 1 1 1
// |T| || |U| || |V| || || |X|
// 1 ____________________ __________________
//--------------------------------------------------------------------------
// __________________X_______________
//2D 0 1
// ____________________ ___________________
// || |X| || || |P| || || ||
// |0
// | || |Y| || || || || |Q| ||
// | ______________________ ____________________
// Y| X Y P Q R S T U
// | ______________________ ____________________ blockldx.X 0 0 1 1 0 0 1 1
// | |R| || || || || |T| || || blockldx.Y 0 0 0 0 1 1 1 1
// |1 blockDim.X = 4
// || || || |S| |U| || || || blockDim.Y = 2
// ______________________ ____________________ GridDim.X = 2
GridDim.Y = 2
blockDim = es la dimension del bloque ej. blockDim.x=4 y blockDim.y = 2 da como resultado un bloque de 8 hilos
GridDim = es la dimension de la rejilla ej. gridDim.x = 2 y gridDim.y = 2 da como resultado 4 bloques de hilos
*/
//EJEMPLO
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
//__global__ void print_details()
//{
// printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y : %d \n", blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
//}
//
//int main()
//{
// int nx, ny;
// nx = 4;
// ny = 1;
//
// dim3 block(1, 1); // 8 subprocesos en la dimension X y 8 subprocesos en la dimension Y.
// dim3 grid(nx / block.x, ny / block.y); // grid de 2x2
//
// print_details << <grid, block >> > ();
// cudaDeviceSynchronize(); // da la orden para que el host o int main espere a que termine el kernel o __global__.
// cudaDeviceReset();
// return 0;
//}
//------------------------108 - Ejercicio grid 3d y block 3d----------------------------------------------------
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
//__global__ void print_details()
//{
// printf("threadIdx.x : %d, threadIdx.y : %d, threadIdx.z : %d \n", threadIdx.x, threadIdx.y, threadIdx.z);
// printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d, gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n", blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
//}
//
//int main()
//{
// int nx, ny, nz;
// nx = 4;
// ny = 4;
// nz = 4;
//
// dim3 block(2, 2, 2); // 8 subprocesos en la dimension X y 8 subprocesos en la dimension Y.
// dim3 grid(nx / block.x, ny / block.y, nz / block.z); // grid de 2x2
//
// print_details << <grid, block >> > ();
// cudaDeviceSynchronize(); // da la orden para que el host o int main espere a que termine el kernel o __global__.
// cudaDeviceReset();
// return 0;
//}
//------------------------109 Unique index calculation using threadIdx blockId and blockDim--------------------
//************************************Ejemplo 1
////asignar valores de un array a cada hilo
//
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
//__global__ void unique_idx_calc_threadIdx(int* input)
//{
// int tid = threadIdx.x;
// printf("threadIdx : %d, value : %d \n", tid, input[tid]);
//}
//
//int main()
//{
// int array_size = 8;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = { 23,9,4,53,65,12,1,33 };
//
// for (int i = 0; i < array_size; i++)
// {
// cout << h_data[i] << " ";
// }
//
// cout << endl;
// cout << endl;
//
// int* d_data;
// cudaMalloc((void**)&d_data, array_byte_size);
// cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
//
// //dim3 block(8); //8 threads en un bloque
// //dim3 grid(1);
//
// dim3 block(4); // 8 threads en 2 bloques de 4 cada uno
// dim3 grid(2);
//
// unique_idx_calc_threadIdx << <grid, block >> > (d_data);
// cudaDeviceReset();
// return 0;
//
//
//}
//************************************//Ejemplo 2
//asignar valores de un array continuos a un grupo de blocks (grid 1D con 16 hilos en 4 bloques)
// gid = tid + offset
// gid = tid + blackldx.x * blockDim.x
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
//
//
//__global__ void unique_gid_calculation(int * input)
//{
// int tid = threadIdx.x;
// int offset = blockIdx.x * blockDim.x; // numero de hilos que componen un bloque
// int gid = tid + offset; //indice en el que empezara a asignar valores a cada bloque de hilos
//
// //ejemplo de 3 blocks
///*
//
////1D 0 1 3
//// _______________________ ________________________ ________________________
//// |23| |9| |4| |53| |65| |12| |1| |33| |65| |12| |1| |33|
//tid(threadIdx) = 0 1 2 3 0 1 2 3 0 1 2 3
//blockIdx.x = 0 0 0 0 1 1 1 1 2 2 2 2
//blockDim.x = 4 4 4 4 4 4 4 4 4 4 4 4
//offset = 0 0 0 0 4 4 4 4 8 8 8 8
//gid = 0 1 2 3 4 5 6 7 8 9 10 11
//
//*/
//
// printf("blockIdx.x : %d, threadIdx.x : %d, gid: %d, value : %d \n",
// blockIdx.x, tid, gid, input[gid]);
//
//}
//
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = { 23,9,4,53,65,12,1,33,22,1,1,3,5,2,1,3 };
//
// for (int i = 0; i < array_size; i++)
// {
// cout << h_data[i] << " ";
// }
//
// cout << endl;
// cout << endl;
//
// int* d_data;
// cudaMalloc((void**)&d_data, array_byte_size);
// cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
//
// //dim3 block(8); //8 threads en un bloque
// //dim3 grid(1);
//
// dim3 block(4); // 4 threads en 4 bloques
// dim3 grid(4);
//
// unique_gid_calculation << <grid, block >> > (d_data);
// cudaDeviceReset();
// return 0;
//
//
//}
//------------------------110 Unique index calculation for 2D grid 1--------------------
//------------------------110 calculo del indice global para cuadricula 2D 1 (GRID DE 2X2 CON 4x1 hilos) ------
/*
*
* Formula para calcular el indice unico para identificar los hilos que estan en una segunda fila
*
* Index = row offset + block offset + tid
* row offset = number of threads in one thread block row (blockldx.y)
* block offset = number of threads in thread block(blockldx.x)
* tid = threadldx.x
*
* gid = gridDim.x * blockDim.x * blockldx.y + blockldx.x * blockDim.x + threadldx.x
*/
//asignar valores de un array continuos a un grupo de blocks (grid 2D con 16 hilos en 4 bloques de 4x1 hilos)
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
//
//
//__global__ void unique_gid_calculation_2d(int * input)
//{
// int tid = threadIdx.x;
// int offset = blockIdx.x * blockDim.x; // numero de hilos que componen un bloque
//
// int row_offset = blockDim.x * gridDim.x * blockIdx.y;
//
// int gid = tid + offset + row_offset; //indice en el que empezara a asignar valores a cada bloque de hilos
//ejemplo de 3 blocks
/*
//2D (4 BLOQUES EN UN GRID DE 2X2
// 0 1
// _______________________ ________________________
//fila 1 de bloques |23| |9| |4| |53| |22| |1| |1| |3|
// _______________________ ________________________
//fila 2 de bloques |65| |12| |1| |33| |5| |2| |1| |3|
//fila 1 de bloques |23| |9| |4| |53| |65| |12| |1| |33|
tid(threadIdx.X)= 0 1 2 3 0 1 2 3
blockIdx.x = 0 0 0 0 1 1 1 1
blockDim.x = 4 4 4 4 4 4 4 4
offset = 0 0 0 0 4 4 4 4
blockIdx.y = 0 0 0 0 0 0 0 0
gridDim.x = 2 2 2 2 2 2 2 2
rowOffset = 0 0 0 0 0 0 0 0
gid = 0 1 2 3 4 5 6 7
//fila 2 de bloques |65| |12| |1| |33| |5| |2| |1| |3|
tid(threadIdx.X)= 0 1 2 3 0 1 2 3
blockIdx.x = 0 0 0 0 1 1 1 1
blockDim.x = 4 4 4 4 4 4 4 4
offset = 0 0 0 0 4 4 4 4
blockIdx.y = 1 1 1 1 1 1 1 1
gridDim.x = 2 2 2 2 2 2 2 2
rowOffset = 8 8 8 8 8 8 8 8
gid = 8 9 10 11 12 13 14 15
rowOffset = blockDim.x * gridDim.x * blockIdx.y;
gid = tid + offset + row_offset; //indice en el que empezara a asignar valores a cada bloque de hilos
*/
// printf("blockIdx.x : %d, blockIdx.y: %d, threadIdx.x: %d, gid: %d - input: %d \n",
// blockIdx.x, blockIdx.y, tid, gid, input[gid]);
//
//}
//
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = { 23,9,4,53,65,12,1,33,22,1,1,3,5,2,1,3 };
//
// for (int i = 0; i < array_size; i++)
// {
// cout << h_data[i] << " ";
// }
//
// cout << endl;
// cout << endl;
//
// int* d_data;
// cudaMalloc((void**)&d_data, array_byte_size);
// cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
//
// //dim3 block(8); //8 threads en un bloque
// //dim3 grid(1);
//
// dim3 block(4); // 4 threads en 4 bloques
// dim3 grid(2,2);
//
// unique_gid_calculation_2d << <grid, block >> > (d_data);
// cudaDeviceReset();
// return 0;
//
//
//}
//-----------------111 Unique index calculation for 2D grid 2--------------------
//-----------------111 calculo del indice global para cuadricula 2D (GRID DE 2X2 CON 2x2 hilos) -----
/*
*
* Formula para calcular el indice unico para identificar los hilos que estan en una segunda fila
*
* Index = row offset + block offset + tid
* row offset = number of threads in one thread block row (blockldx.y)
* block offset = number of threads in thread block(blockldx.x)
* tid = threadldx.x
*
* gid = gridDim.x * blockDim.x * blockldx.y + blockldx.x * blockDim.x + threadldx.x
*/
//asignar valores de un array continuos a un grupo de blocks (grid 2D con 16 hilos en 4 bloques de 2x2 hilos)
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//using namespace std;
//
//
//
//__global__ void unique_gid_calculation_2d_2d(int * input)
//{
// int tid = blockDim.x * threadIdx.y + threadIdx.x;
//
// int num_threads_in_a_block = blockDim.x * blockDim.y;
// int block_offset = blockIdx.x * num_threads_in_a_block;
//
// int num_threads_in_a_row = num_threads_in_a_block * gridDim.x;
// int row_offset = num_threads_in_a_row * blockIdx.y;
//
// int gid = tid + block_offset + row_offset; //indice en el que empezara a asignar valores a cada bloque de hilos
//ejemplo de 3 blocks
/*
//2D (4 BLOQUES EN UN GRID DE 2X2
// 0 1
//bloques 1 y 2 ___________ __________
//fila 1 |23| |9| |22| |1|
//fila 2 |4| |53| |1| |3|
//bloques 3 y 4 ____________ __________
//fila 1 |65| |12| |5| |2|
//fila 2 |1| |33| |1| |3|
tid = blockDim.x * threadIdx.y + threadIdx.x;
num_threads_in_a_block = blockDim.x * blockDim.y;
block_offset = blockIdx.x * num_threads_in_a_block;
num_threads_in_a_row = num_threads_in_a_block * gridDim.x;
row_offset = num_threads_in_a_row * blockIdx.y;
gid = tid + block_offset + row_offset;
//fila 1 de bloques |23| |9| |4| |53| |22| |1| |1| |3|
blockDim.x = 2 2 2 2 2 2 2 2
treadsIdx.x = 0 1 0 1 0 1 0 1
treadsIdx.y = 0 0 1 1 0 0 1 1
tid = 0 1 2 3 0 1 2 3
blockDim.y = 2 2 2 2 2 2 2 2
num_threads_in_a_block = 4 4 4 4 4 4 4 4
blockIdx.x = 0 0 0 0 1 1 1 1
block_offset = 0 0 0 0 4 4 4 4
gridDim.x = 2 2 2 2 2 2 2 2
num_threads_in_a_row = 8 8 8 8 8 8 8 8
blockIdx.y = 0 0 0 0 0 0 0 0
rowOffset = 0 0 0 0 0 0 0 0
gid = 0 1 2 3 4 5 6 7
tid = blockDim.x * threadIdx.y + threadIdx.x;
num_threads_in_a_block = blockDim.x * blockDim.y;
block_offset = blockIdx.x * num_threads_in_a_block;
num_threads_in_a_row = num_threads_in_a_block * gridDim.x;
row_offset = num_threads_in_a_row * blockIdx.y;
gid = tid + block_offset + row_offset;
//fila 2 de bloques |65| |12| |1| |33| |5| |2| |1| |3|
blockDim.x = 2 2 2 2 2 2 2 2
treadsIdx.x = 0 1 0 1 0 1 0 1
treadsIdx.y = 0 0 1 1 0 0 1 1
tid = 0 1 2 3 0 1 2 3
blockDim.y = 2 2 2 2 2 2 2 2
num_threads_in_a_block = 4 4 4 4 4 4 4 4
blockIdx.x = 0 0 0 0 1 1 1 1
block_offset = 0 0 0 0 4 4 4 4
gridDim.x = 2 2 2 2 2 2 2 2
num_threads_in_a_row = 8 8 8 8 8 8 8 8
blockIdx.y = 1 1 1 1 1 1 1 1
rowOffset = 8 8 8 8 8 8 8 8
gid = 8 9 10 11 12 13 14 15
*/
// printf("blockIdx.x : %d, blockIdx.y: %d, threadIdx.x: %d, gid: %d - input: %d \n",
// blockIdx.x, blockIdx.y, tid, gid, input[gid]);
//
//}
//
//int main()
//{
// int array_size = 16;
// int array_byte_size = sizeof(int) * array_size;
// int h_data[] = { 23,9,4,53,65,12,1,33,22,1,1,3,5,2,1,3 };
//
// for (int i = 0; i < array_size; i++)
// {
// cout << h_data[i] << " ";
// }
//
// cout << endl;
// cout << endl;
//
// int* d_data;
// cudaMalloc((void**)&d_data, array_byte_size);
// cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
//
// //dim3 block(8); //8 threads en un bloque
// //dim3 grid(1);
//
// dim3 block(2,2); // 4 threads en cada block (2x2)
// dim3 grid(2,2); // 4 blocks (2x2)
//
// unique_gid_calculation_2d_2d << <grid, block >> > (d_data);
// cudaDeviceReset();
// return 0;
//
//
//}
//-----------------112 CUDA MEMORY TRANSFER --------------------------------------------------
/*
-Two devices
-HOST ( cpu- memory) - CPU - CACHES AND DRAM
-DEVICE ( gpu - internal gpu memory)- SM (stream multiprocess) - CACHES AND DRAM
- Para transferir memoria entre el host y el dispositivo
cudaMemCpy(
destination ptr, source ptr,
size in byte, direction)
* ptr = puntero
*destination ptr = hostToDevice o DeviceToHost o HostToHost (cudamemcpyhtod, cudamemcpydtoh, cudamemcpydtod)
*/
//Ejemplo 1. pasar datos a memoria del device en un solo bloque de hilos
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <stdlib.h>
//#include <time.h>
//using namespace std;
//
//__global__ void mem_trs_test(int* input) // kernel que toma como un puntero a una matriz de enteros
//{
// //cuadricula 1D con 2 bloques de hilos
// int gid = blockIdx.x * blockDim.x + threadIdx.x; //indice global para acceder a elementos de la matriz
// printf("tid: %d, gid: %d, value: %d \n", threadIdx.x, gid, input[gid]);
//}
//
//
//
//int main()
//{
// int size = 128; // tamao de la matriz
// int byte_size = size * sizeof(int);// cantidad de bytes que necesitamos para asignar a esta matriz
// int* h_input; //asignar memoria del Host (la h_ es para indicar que es una variable del lenguaje principal)
//
// //asignacion de memoria usando funcion malloc.
// h_input = (int*)malloc(byte_size); // asinacion de bytes necesarios
//
// //inicializacion aleatoria de la matriz con secuencia aleatoria de numeros
// time_t t;
// srand((unsigned)time(&t));
// for (int i = 0; i < size; i++)
// {
// h_input[i] = (int)(rand() & 0xff);//valor aleatoria entre 0 y 255
// }
//
// int* d_input; // se utiliza d_ para indicar que es una variable de dispositivo
//
// //asignacion de memoria en el dispositivo(gpu)
// /*
// C CUDA
// malloc cudaMalloc -- asignar memoria
// memset cudaMemset -- establece valores para una ubicacion de memoria dada
// free cudaFree -- recupera la ubicacion de memoria especificada
//
// */
//
// // ** = puntero doble o puntero a un puntero
// // &d_input = especifica tamao de la memoria
// cudaMalloc((void**)&d_input,byte_size);
//
// cudaMemcpy(d_input,h_input,byte_size,cudaMemcpyHostToDevice);// tranferir la matriz inicializada en el host al dispositivo
// // h_input = puntero de origen
// // d_input = puntero de destino en el device
//
// //parametros de lanzamiento
// dim3 block(64); // TODO: POR LO GENERAL SE MANTIENE EL TAMAO EN MULTIPLOS DE 32
// dim3 grid(2);
//
// mem_trs_test << <grid, block >> > (d_input);
// cudaDeviceSynchronize();// hace que la ejecucion espere en este punto
//
// cudaFree(d_input); // recuperar memoria
// free(h_input); // recuperar memoria
//
// cudaDeviceReset();
// return 0;
//
//}
//***********************************************
//Ejemplo 2. pasar datos a memoria del device en varios bloques de hilos
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <stdlib.h>
//#include <time.h>
//using namespace std;
//
//__global__ void mem_trs_test2(int* input, int size) // kernel que toma como un puntero a una matriz de enteros
//{ //int size = tamao matriz
//
// //cuadricula 1D con 2 bloques de hilos
// int gid = blockIdx.x * blockDim.x + threadIdx.x; //indice global para acceder a elementos de la matriz
//
//
// // CON ESTA VERIFICACION SOLO SE UTILIZAN LOS HILOS QUE MANEJARAN DATOS DADO EL INPUT
// /*if (gid < size)
// {
// printf("tid: %d, gid: %d, value: %d \n", threadIdx.x, gid, input[gid]);
// }*/
//
// // SIN LA VERIFICACION SE ACCEDE A LOS HILOS DE TODO EL GRID AUN CUANDO NO MANEJEN DATOS
// printf("tid: %d, gid: %d, value: %d \n", threadIdx.x, gid, input[gid]);
//}
//
//
//
//int main()
//{
// int size = 150; // tamao de la matriz
// int byte_size = size * sizeof(int);// cantidad de bytes que necesitamos para asignar a esta matriz
// int* h_input; //asignar memoria del Host (la h_ es para indicar que es una variable del lenguaje principal)
//
// //asignacion de memoria usando funcion malloc.
// h_input = (int*)malloc(byte_size); // asinacion de bytes necesarios
//
// //inicializacion aleatoria de la matriz con secuencia aleatoria de numeros
// time_t t;
// srand((unsigned)time(&t));
// for (int i = 0; i < size; i++)
// {
// h_input[i] = (int)(rand() & 0xff);//valor aleatoria entre 0 y 255
// }
//
// int* d_input; // se utiliza d_ para indicar que es una variable de dispositivo
//asignacion de memoria en el dispositivo(gpu)
/*
C CUDA
malloc cudaMalloc -- asignar memoria
memset cudaMemset -- establece valores para una ubicacion de memoria dada
free cudaFree -- recupera la ubicacion de memoria especificada
*/
// ** = puntero doble o puntero a un puntero
// &d_input = especifica tamao de la memoria
// cudaMalloc((void**)&d_input, byte_size);
//
// cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice);// tranferir la matriz inicializada en el host al dispositivo
// // h_input = puntero de origen
// // d_input = puntero de destino en el device
//
// //parametros de lanzamiento
// dim3 block(32); // TODO: POR LO GENERAL SE MANTIENE EL TAMAO EN MULTIPLOS DE 32
// dim3 grid(5);
//
// mem_trs_test2 << <grid, block >> > (d_input,size);
// cudaDeviceSynchronize();// hace que la ejecucion espere en este punto
//
// cudaFree(d_input); // recuperar memoria
// free(h_input); // recuperar memoria
//
// cudaDeviceReset();
// return 0;
//
//}
//-----------------112 exercise GRID 3D --------------------------------------------------
//-----------------114 Sum array example with validity check --------------------------------------------------
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
////#include "cuda_common.cuh"
//
//#include <stdio.h>
//#include "common.h" // incluye metodo para comparar matrices
//
//// for random initialize
//#include <stdlib.h>
//#include <time.h>
//
//// for memset
//#include <cstring>
//using namespace std;
//
//__global__ void sum_array_gpu(int* a, int* b, int* c, int size)
//{
// int gid = blockIdx.x * blockDim.x + threadIdx.x;
//
// if (gid < size) // verificar si el indice global esta dentro del tamao de nuestra matriz
// {
// c[gid] = a[gid] + b[gid];
// }
//}
//
//// funcion para verificar resultado de gpu
//void sum_array_cpu(int* a, int* b, int* c, int size)
//{
// for (int i = 0; i < size; i++)
// {
// c[i] = a[i] + b[i];
// }
//}
//
//int main()
//{
// int size = 10000; // tamao de la matriz
// int block_size = 128; // tamao del bloque en 128
// int num_bytes = size * sizeof(int); // tamao necesario en bytes
//
// // punteros host
// int* h_a, * h_b, * gpu_results;
//
// int* h_c; // para verificacion en cpu
//
// //asignacion de memoria para cada puntero
// h_a = (int*)malloc(num_bytes);
// h_b = (int*)malloc(num_bytes);
// gpu_results = (int*)malloc(num_bytes);
//
// h_c = (int*)malloc(num_bytes);// para verificacion en cpu
//
// //inicializacion aleatoria de cada matriz
// time_t t;
// srand((unsigned)time(&t));
// for (int i = 0; i < size; i++)
// {
// h_a[i] = (int)(rand() & 0xFF); // valor generado entre 0 y 255
// }
// for (int i = 0; i < size; i++)
// {
// h_b[i] = (int)(rand() & 0xFF);
// }
//
// sum_array_cpu(h_a, h_b, h_c, size);
//
// memset(gpu_results, 0, num_bytes);
//
// // punteros device
// int* d_a, * d_b, * d_c;
// cudaMalloc((int**)&d_a, num_bytes);
// cudaMalloc((int**)&d_b, num_bytes);
// cudaMalloc((int**)&d_c, num_bytes);
//
// //tranferencia de matriz h_a y h_b
// cudaMemcpy(d_a, h_a, num_bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b, num_bytes, cudaMemcpyHostToDevice);
//
// //launching the grid
// dim3 block(block_size); //tamao de bloque 128 en la dimension X
// dim3 grid((size / block.x) + 1); // (10000 / 128) + 128 = GRID 1D de 79 block de 128 hilos cada uno
//
// sum_array_gpu << <grid, block >> > (d_a, d_b, d_c, size);
// cudaDeviceSynchronize();
//
// cudaMemcpy(gpu_results, d_c, num_bytes, cudaMemcpyDeviceToHost); // puntero de origen d_c, puntero de destino gpu_results
//
// // COMPARACION DE RESULTADOS CPU Y GPU
// compare_arrays(gpu_results, h_c, size);
//
// cudaFree(d_c);
// cudaFree(d_b);
// cudaFree(d_a);
//
// free(gpu_results);
// free(h_b);
// free(h_a);
//
// cudaDeviceReset();
// return 0;
//
//
//}
//-----------------115 Error handling --------------------------------------------------
/*Types error
* -Compile time errors
* -Errors language syntax.
*
* -Run time errors
* -Errors happens while program is running
*
*/
//ejemplo con la suma anterior
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
////#include "cuda_common.cuh"
//
//#include <stdio.h>
//#include "common.h" // incluye metodo para comparar matrices
//
//// for random initialize
//#include <stdlib.h>
//#include <time.h>
//
//// for memset
//#include <cstring>
//
//#include "cuda_common.cuh"
//
//using namespace std;
//
//__global__ void sum_array_gpu(int* a, int* b, int* c, int size)
//{
// int gid = blockIdx.x * blockDim.x + threadIdx.x;
//
// if (gid < size) // verificar si el indice global esta dentro del tamao de nuestra matriz
// {
// c[gid] = a[gid] + b[gid];
// }
//}
//
//// funcion para verificar resultado de gpu
//void sum_array_cpu(int* a, int* b, int* c, int size)
//{
// for (int i = 0; i < size; i++)
// {
// c[i] = a[i] + b[i];
// }
//}
//
//int main()
//{
// int size = 10000; // tamao de la matriz
// int block_size = 128; // tamao del bloque en 128
// int num_bytes = size * sizeof(int); // tamao necesario en bytes
//
// //ERROR (comprobacion)
// cudaError error;
//
// // punteros host
// int* h_a, * h_b, * gpu_results;
//
// int* h_c; // para verificacion en cpu
//
// //asignacion de memoria para cada puntero
// h_a = (int*)malloc(num_bytes);
// h_b = (int*)malloc(num_bytes);
// gpu_results = (int*)malloc(num_bytes);
//
// h_c = (int*)malloc(num_bytes);// para verificacion en cpu
//
// //inicializacion aleatoria de cada matriz
// time_t t;
// srand((unsigned)time(&t));
// for (int i = 0; i < size; i++)
// {
// h_a[i] = (int)(rand() & 0xFF); // valor generado entre 0 y 255
// }
// for (int i = 0; i < size; i++)
// {
// h_b[i] = (int)(rand() & 0xFF);
// }
//
// sum_array_cpu(h_a, h_b, h_c, size);
//
// memset(gpu_results, 0, num_bytes);
//
// // punteros device
// int* d_a, * d_b, * d_c;
//
// //---------------------------
// //ERROR FORMA MANUAL
// /*error = cudaMalloc((int**)&d_a, num_bytes);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Error : %s \n", cudaGetErrorString(error));
// }*/
//
// //ERROR UTILIZANDO cuda_common.cuh
//
// gpuErrchk(cudaMalloc((int**)&d_a, num_bytes));
// gpuErrchk(cudaMalloc((int**)&d_b, num_bytes));
// gpuErrchk(cudaMalloc((int**)&d_c, num_bytes));
//
// //-------------------------------
// //cudaMalloc((int**)&d_a, num_bytes);
// //cudaMalloc((int**)&d_b, num_bytes);
// //cudaMalloc((int**)&d_c, num_bytes);
//
// //tranferencia de matriz h_a y h_b
// cudaMemcpy(d_a, h_a, num_bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b, num_bytes, cudaMemcpyHostToDevice);
//
// //launching the grid
// dim3 block(block_size); //tamao de bloque 128 en la dimension X
// dim3 grid((size / block.x) + 1); // (10000 / 128) + 128 = GRID 1D de 79 block de 128 hilos cada uno
//
// sum_array_gpu << <grid, block >> > (d_a, d_b, d_c, size);
// cudaDeviceSynchronize();
//
// cudaMemcpy(gpu_results, d_c, num_bytes, cudaMemcpyDeviceToHost); // puntero de origen d_c, puntero de destino gpu_results
//
// // COMPARACION DE RESULTADOS CPU Y GPU
// compare_arrays(gpu_results, h_c, size);
//
// cudaFree(d_c);
// cudaFree(d_b);
// cudaFree(d_a);
//
// free(gpu_results);
// free(h_b);
// free(h_a);
//
// cudaDeviceReset();
// return 0;
//
//
//}
//-----------------116 Sum array example with timing --------------------------------------------------
// tiempo de ejecucion en cpu y gpu
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
////#include "cuda_common.cuh"
//
//#include <stdio.h>
//#include "common.h" // incluye metodo para comparar matrices
//
//// for random initialize
//#include <stdlib.h>
//#include <time.h>
//
//// for memset
//#include <cstring>
//
//#include "cuda_common.cuh"
//
//using namespace std;
//
//__global__ void sum_array_gpu(int* a, int* b, int* c, int size)
//{
// int gid = blockIdx.x * blockDim.x + threadIdx.x;
//
// if (gid < size) // verificar si el indice global esta dentro del tamao de nuestra matriz
// {
// c[gid] = a[gid] + b[gid];
// }
//}
//
//// funcion para verificar resultado de gpu
//void sum_array_cpu(int* a, int* b, int* c, int size)
//{
// for (int i = 0; i < size; i++)
// {
// c[i] = a[i] + b[i];
// }
//}
//
//int main()
//{
// int size = 10000; // tamao de la matriz
// int block_size = 128; // tamao del bloque en 128
// int num_bytes = size * sizeof(int); // tamao necesario en bytes
//
// //ERROR (comprobacion)
// cudaError error;
//
// // punteros host
// int* h_a, * h_b, * gpu_results;
//
// int* h_c; // para verificacion en cpu
//
// //asignacion de memoria para cada puntero
// h_a = (int*)malloc(num_bytes);
// h_b = (int*)malloc(num_bytes);
// gpu_results = (int*)malloc(num_bytes);
//
// h_c = (int*)malloc(num_bytes);// para verificacion en cpu
//
// //inicializacion aleatoria de cada matriz
// time_t t;
// srand((unsigned)time(&t));
// for (int i = 0; i < size; i++)
// {
// h_a[i] = (int)(rand() & 0xFF); // valor generado entre 0 y 255
// }
// for (int i = 0; i < size; i++)
// {
// h_b[i] = (int)(rand() & 0xFF);
// }
//
// //*******************
// // PARA SUMA DE TIEMPo en la funcion sum_array_cpu
// clock_t cpu_start, cpu_end;
// cpu_start = (double)clock();
// printf("cpu_start: %0.5f \n", (double)cpu_start);
// sum_array_cpu(h_a, h_b, h_c, size);
// cpu_end = (double)clock();
// printf("cpu_end: %0.5f \n", (double)cpu_end);
// //*******************
//
//
// memset(gpu_results, 0, num_bytes);
// memset(gpu_results, 0, num_bytes);
//
//
//
// // punteros device
// int* d_a, * d_b, * d_c;
//
// //---------------------------
// //ERROR FORMA MANUAL
// /*error = cudaMalloc((int**)&d_a, num_bytes);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Error : %s \n", cudaGetErrorString(error));
// }*/
//
// //ERROR UTILIZANDO cuda_common.cuh
//
// gpuErrchk(cudaMalloc((int**)&d_a, num_bytes));
// gpuErrchk(cudaMalloc((int**)&d_b, num_bytes));
// gpuErrchk(cudaMalloc((int**)&d_c, num_bytes));
//
// //-------------------------------
// //cudaMalloc((int**)&d_a, num_bytes);
// //cudaMalloc((int**)&d_b, num_bytes);
// //cudaMalloc((int**)&d_c, num_bytes);
//
// //*******************
// // TIEMPO QUE UTILIZA GPU PARA tranferir datos a memoria
// clock_t htod_start, htod_end;
// htod_start = clock();
// //tranferencia de matriz h_a y h_b
// cudaMemcpy(d_a, h_a, num_bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b, num_bytes, cudaMemcpyHostToDevice);
// htod_end = clock();
// //*******************
//
// //launching the grid
// dim3 block(block_size); //tamao de bloque 128 en la dimension X
// dim3 grid((size / block.x) + 1); // (10000 / 128) + 128 = GRID 1D de 79 block de 128 hilos cada uno
//
// //*******************
// // TIEMPO QUE UTILIZA GPU PARA ejecutar el kernel
// clock_t gpu_start, gpu_end;
// gpu_start = clock();
// sum_array_gpu << <grid, block >> > (d_a, d_b, d_c, size);
// gpu_end = clock();
// //*******************
//
// cudaDeviceSynchronize();
//
//
// //*******************
// // TIEMPO QUE UTILIZA GPU PARA tranferir datos a host
// clock_t dtoh_start, dtoh_end;
// dtoh_start = clock();
// cudaMemcpy(gpu_results, d_c, num_bytes, cudaMemcpyDeviceToHost); // puntero de origen d_c, puntero de destino gpu_results
// dtoh_end = clock();
// //*******************
//
// // COMPARACION DE RESULTADOS CPU Y GPU
// compare_arrays(gpu_results, h_c, size);
//
// cudaFree(d_c);
// cudaFree(d_b);
// cudaFree(d_a);
//
// // VELOCIDAD DE RELOJ DE CPU
// printf("sum array CPU execution time: %4.6f \n", (double)((double)(cpu_end - cpu_start) / CLOCKS_PER_SEC));
//
// // VELOCIDAD DE RELOJ DE CPU
// printf("sum array GPU execution time: %4.6f \n", (double)((double)(gpu_end - gpu_start) / CLOCKS_PER_SEC));
// printf("htod mem transfer time: %4.6f \n", (double)((double)(htod_end - htod_start) / CLOCKS_PER_SEC));
// printf("dtod mem transfer time: %4.6f \n", (double)((double)(dtoh_end - dtoh_start) / CLOCKS_PER_SEC));
//
// printf("Sum array GPU total execution time: %4.6f \n", (double)((double)(dtoh_end - htod_start) / CLOCKS_PER_SEC));
//
//
// free(gpu_results);
// free(h_b);
// free(h_a);
//
// cudaDeviceReset();
// return 0;
//
//
//}
//----------------------------------117 Device properties --------------------------------------------------
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void query_device()
{
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
{
printf("No CUDA support device found");
}
int devNo = 0;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, devNo);
printf("Device %d: %s\n", devNo, iProp.name);
printf(" Number of multiprocessors: %d\n",
iProp.multiProcessorCount);
printf(" clock rate : %d\n",
iProp.clockRate);
printf(" Compute capability : %d.%d\n",
iProp.major, iProp.minor);
printf(" Total amount of global memory: %4.2f KB\n",
iProp.totalGlobalMem / 1024.0);
printf(" Total amount of constant memory: %4.2f KB\n",
iProp.totalConstMem / 1024.0);
printf(" Total amount of shared memory per block: %4.2f KB\n",
iProp.sharedMemPerBlock / 1024.0);
printf(" Total amount of shared memory per MP: %4.2f KB\n",
iProp.sharedMemPerMultiprocessor / 1024.0);
printf(" Total number of registers available per block: %d\n",
iProp.regsPerBlock);
printf(" Warp size: %d\n",
iProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
iProp.maxThreadsPerBlock);
printf(" Maximum number of threads per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of warps per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor / 32);
printf(" Maximum Grid size : (%d,%d,%d)\n",
iProp.maxGridSize[0], iProp.maxGridSize[1], iProp.maxGridSize[2]);
printf(" Maximum block dimension : (%d,%d,%d)\n",
iProp.maxThreadsDim[0], iProp.maxThreadsDim[1], iProp.maxThreadsDim[2]);
}
int main()
{
query_device();
} |
12,970 | #include "includes.h"
__global__ void init(int *arr, int sqroot, int limit) {
int c;
for(c = 2; c <= sqroot; c++) {
if(arr[c] == 0) {
/*
#pragma omp parallel for shared(arr, limit, c) private(m)
for(m = c+1; m < limit; m++) {
if(m%c == 0) {
arr[m] = 1;
}
}
*/
int tid = c+1+ threadIdx.x + (blockIdx.x * blockDim.x);
if (tid<limit){
if (tid % c ==0) {
arr[tid] = 1;
}
}
}
}
} |
12,971 | #include <stdio.h>
__global__ void kernel( void )
{
/* Do something fun! */
}
int main(void)
{
kernel<<<1,1>>>();
printf( "Hello, World!\n" );
return 0;
}
|
12,972 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <chrono>
__global__ void convoluteGPU(int* pixeliIntrare, int* pixeliIesire, int linii, int coloane, int canaleCuloare) {
int kernel[5][5] = {
{ 0, 0, -1, 0, 0},
{ 0, -1, -2, -1, 0},
{-1, -2, 16, -2, -1},
{ 0, -1, -2, -1, 0},
{ 0, 0, -1, 0, 0}
};
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < linii * coloane * canaleCuloare) {
//out[id] = in[id]+20;
//apply kernel
int linie = id / (coloane * canaleCuloare);
int coloana = (id % (coloane * canaleCuloare)) / canaleCuloare;
int canalCuloare = id % canaleCuloare;
int pixel = 0;
if (linie > 1 && linie < (linii - 2) && coloana > 1 && coloana < (coloane - 2)) {
//mijloc
pixel += kernel[2][2] * pixeliIntrare[id];
//N
pixel += kernel[1][2] * pixeliIntrare[id - coloane * canaleCuloare];
//NE
pixel += kernel[1][3] * pixeliIntrare[id - coloane * canaleCuloare + canaleCuloare];
//E
pixel += kernel[2][3] * pixeliIntrare[id + canaleCuloare];
//SE
pixel += kernel[3][3] * pixeliIntrare[id + coloane * canaleCuloare + canaleCuloare];
//S
pixel += kernel[3][2] * pixeliIntrare[id + coloane * canaleCuloare];
//SV
pixel += kernel[3][1] * pixeliIntrare[id + coloane * canaleCuloare - canaleCuloare];
//V
pixel += kernel[2][1] * pixeliIntrare[id - canaleCuloare];
//NV
pixel += kernel[1][1] * pixeliIntrare[id - coloane * canaleCuloare - canaleCuloare];
//conturul kernelului
//N
pixel += kernel[0][0] * pixeliIntrare[id - 2 * coloane * canaleCuloare - 2 * canaleCuloare];
pixel += kernel[0][1] * pixeliIntrare[id - 2 * coloane * canaleCuloare - canaleCuloare];
pixel += kernel[0][2] * pixeliIntrare[id - 2 * coloane * canaleCuloare];
pixel += kernel[0][3] * pixeliIntrare[id - 2 * coloane * canaleCuloare + canaleCuloare];
pixel += kernel[0][4] * pixeliIntrare[id - 2 * coloane * canaleCuloare + 2 * canaleCuloare];
//E
pixel += kernel[1][4] * pixeliIntrare[id - 1 * coloane * canaleCuloare + 2 * canaleCuloare];
pixel += kernel[2][4] * pixeliIntrare[id - 0 * coloane * canaleCuloare + 2 * canaleCuloare];
pixel += kernel[3][4] * pixeliIntrare[id + 1 * coloane * canaleCuloare + 2 * canaleCuloare];
//S
pixel += kernel[4][0] * pixeliIntrare[id + 2 * coloane * canaleCuloare - 2 * canaleCuloare];
pixel += kernel[4][1] * pixeliIntrare[id + 2 * coloane * canaleCuloare - canaleCuloare];
pixel += kernel[4][2] * pixeliIntrare[id + 2 * coloane * canaleCuloare];
pixel += kernel[4][3] * pixeliIntrare[id + 2 * coloane * canaleCuloare + canaleCuloare];
pixel += kernel[4][4] * pixeliIntrare[id + 2 * coloane * canaleCuloare + 2 * canaleCuloare];
//V
pixel += kernel[1][0] * pixeliIntrare[id - 1 * coloane * canaleCuloare - 2 * canaleCuloare];
pixel += kernel[2][0] * pixeliIntrare[id - 0 * coloane * canaleCuloare - 2 * canaleCuloare];
pixel += kernel[3][0] * pixeliIntrare[id + 1 * coloane * canaleCuloare - 2 * canaleCuloare];
pixel = pixel / 1;
}
else {
pixel = 0;
}
pixeliIesire[id] = pixel;
}
}
int* mapareMatricePixeliRGBLaVector(int*** imagine, int linii, int coloane, int canaleCuloare) {
int* vector = (int*)malloc(linii * coloane * canaleCuloare * sizeof(int));
int id = 0;
for (int i = 0; i < linii; i++) {
for (int j = 0; j < coloane; j++) {
for (int c = 0; c < canaleCuloare; c++) {
vector[id] = imagine[i][j][c];
id++;
}
}
}
return vector;
}
int*** mapareVectorLaMatricePixeliRGB(int* vector, int linii, int coloane, int canaleCuloare) {
int*** imagine = (int***)malloc(linii * sizeof(int**));
int id = 0;
for (int i = 0; i < linii; i++) {
imagine[i] = (int**)malloc(coloane * sizeof(int*));
for (int j = 0; j < coloane; j++) {
imagine[i][j] = (int*)malloc(canaleCuloare * sizeof(int));
for (int c = 0; c < canaleCuloare; c++) {
imagine[i][j][c] = vector[id];
id++;
}
}
}
return imagine;
}
void aplicareFiltru() {
//citim matricea de pixeli RGB
std::ifstream in("pixels.txt");
int linii, coloane, canaleCuloare;
in >> linii >> coloane >> canaleCuloare;
int BLOCK_SIZE = 1000;
int blockCount = ((linii * coloane * canaleCuloare) / BLOCK_SIZE) + 1;
//citire in memorie
int*** matrix = (int***)malloc(linii * sizeof(int**));
for (int i = 0; i < linii; i++) {
matrix[i] = (int**)malloc(coloane * sizeof(int*));
for (int j = 0; j < coloane; j++) {
int* line = (int*)malloc(canaleCuloare * sizeof(int));
in >> line[0] >> line[1] >> line[2];
matrix[i][j] = line;
}
}
int dimensiune = linii * coloane * canaleCuloare;
//maparea matricei la vector
int* vector = mapareMatricePixeliRGBLaVector(matrix, linii, coloane, canaleCuloare);
int* rezultat = (int*)malloc(dimensiune * sizeof(int));
//copiem vectorul de pixeli in vectorDevice
int* vectorDevice;
int* rezultatDevice;
cudaMalloc(&vectorDevice, dimensiune * sizeof(int));
cudaMalloc(&rezultatDevice, dimensiune * sizeof(int));
cudaMemcpy(
vectorDevice, vector,
dimensiune * sizeof(int),
cudaMemcpyHostToDevice
);
//apelam filtrul convolutional (better: multiplu de 2 ca numar de thread-uri) (test: different block sizes)
convoluteGPU <<< blockCount, BLOCK_SIZE >>> (vectorDevice, rezultatDevice, linii, coloane, canaleCuloare);
//copiem rezultatDevice in rezultat
cudaMemcpy(
rezultat, rezultatDevice,
dimensiune * sizeof(int),
cudaMemcpyDeviceToHost
);
int*** imagine = mapareVectorLaMatricePixeliRGB(rezultat, linii, coloane, canaleCuloare);
std::ofstream out("pixels.txt");
out << linii << " " << coloane << " " << canaleCuloare << "\n";
for (int i = 0; i < linii; i++) {
for (int j = 0; j < coloane; j++) {
for (int k = 0; k < canaleCuloare; k++) {
out << imagine[i][j][k] << " ";
}
out << "\n";
}
}
out.close();
}
int main() {
char* pathFisierIntrare = "python in.py C:/Users/George/source/repos/P2/P2/landscape.png";
char* pathFisierIesire = "python out.py C:/Users/George/source/repos/P2/P2/landscape1.png";
system(pathFisierIntrare); //citim si scriem valoarea pixelilor in pixels.txt
auto start = std::chrono::steady_clock::now();
aplicareFiltru();
auto stop = std::chrono::steady_clock::now();
system(pathFisierIesire); //scriem pixelii in imaginea filtrata
// we cuda've done that
auto diff = stop - start;
std::cout << std::chrono::duration <double, std::milli>(diff).count() << " ms" << std::endl;
return 0;
} |
12,973 | /*-----------
*
* atomics.cu
*
* This is the source file of antomic operations.
*
* This kernel is based on CUDA samples. simpleAtomicIntrinsics.cuh
*
* streamsOptBenchmark/atomics.cu
*
* By Hao Li
*
*------------
*/
#include <time.h>
#include <cuda_runtime.h>
// #include "functions.cuh"
__global__ void atomicFunc(float *g_idata, float *g_odata)
{
for(int l = 0; l < 100000; l++)
{
// access thread id
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Test various atomic instructions
// Arithmetic atomic instructions
int i = 0;
while(g_odata[i] != NULL)
{
g_odata[i] = g_idata[i];
// Atomic addition
atomicAdd(&g_odata[i], 10.0);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic subtraction (final should be 0)
atomicSub((int *)&g_odata[i], 10);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic exchange
atomicExch(&g_odata[i], (float)tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic maximum
atomicMax((int *)&g_odata[i], tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic minimum
atomicMin((int *)&g_odata[i], tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic increment (modulo 17+1)
atomicInc((unsigned int *)&g_odata[i], 17);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic decrement
atomicDec((unsigned int *)&g_odata[i], 137);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic compare-and-swap
atomicCAS((int *)&g_odata[i], tid-1, tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Bitwise atomic instructions
// Atomic AND
atomicAnd((int *)&g_odata[i], 2*tid+7);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic OR
atomicOr((int *)&g_odata[i], 1 << tid);
if(g_odata[i++] != NULL)
break;
g_odata[i] = g_idata[i];
// Atomic XOR
atomicXor((int *)&g_odata[i], tid);
i++;
}
}
}
// int main(int argc, char **argv)
// {
// unsigned int numThreads = 256;
// unsigned int numBlocks = 64;
// unsigned int numData = 1000000;
// unsigned int memSize = sizeof(int) * numData;
// //allocate mem for the result on host side
// int *hOData = (int *) malloc(memSize);
// //initalize the memory
// for (unsigned int i = 0; i < numData; i++)
// hOData[i] = 0;
// //To make the AND and XOR tests generate something other than 0...
// hOData[8] = hOData[10] = 0xff;
// // allocate device memory for result
// float *dOData;
// cudaMalloc((void **) &dOData, sizeof(float) * memSize);
// // copy host memory to device to initialize to zers
// cudaMemcpy(dOData, hOData, sizeof(float) * memSize, cudaMemcpyHostToDevice);
// cudaEvent_t start;
// error = cudaEventCreate(&start);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// cudaEvent_t stop;
// error = cudaEventCreate(&stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Record the start event
// error = cudaEventRecord(start, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // execute the kernel
// atomicFunc<<<numBlocks, numThreads>>>(dOData);
// // Record the stop event
// error = cudaEventRecord(stop, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Wait for the stop event to complete
// error = cudaEventSynchronize(stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// float msecTotal = 0.0f;
// error = cudaEventElapsedTime(&msecTotal, start, stop);
// printf("Running Time: %f ms\n", msecTotal);
// cudaMemcpy(hOData, dOData, memSize, cudaMemcpyDeviceToHost);
// free(hOData);
// cudaFree(dOData);
// return 0;
// }
|
12,974 | #include <stdio.h>
#include <string>
#include <iostream>
using namespace std; //no longer require std:: prefix for string functions
int getSPcores(cudaDeviceProp devProp, std::string& arch)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
arch="unknown";
switch (devProp.major){
case 2: // Fermi
arch="Fermi";
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
arch="Kepler";
cores = mp * 192;
break;
case 5: // Maxwell
arch="Maxwell";
cores = mp * 128;
break;
case 6: // Pascal
arch="Pascal";
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 7: // Volta and Turing
arch="Volta|Turing";
if ((devProp.minor == 0) || (devProp.minor == 5)) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
int main() {
string arch("undefined");
cudaDeviceProp prop;
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" CUDA Cores: %d\n", getSPcores(prop, arch));
cout<< " Architecture: "+arch+"\n";
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
12,975 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
__global__ void matrixMult(int *a, int *b, int *c, int N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
int index, i;
if (row < N && col < N) {
index = col + (row * N);
for (i = 0; i < N; i++) {
sum += a[i + (row * N)] * b[col + (i * N)];
}
c[index] = sum;
}
}
int main(void) {
int N, T, B, repeat;
repeat = 1;
while (repeat == 1) {
printf("Enter size of matrices: ");
scanf("%d", &N);
while (N <= 0) {
printf(
"Size of matrices must be greater than 0. Enter a valid size of matrices: ");
scanf("%d", &N);
}
printf("Enter number of threads in a block: ");
scanf("%d", &T);
while (T <= 0) {
printf(
"Number of threads must be greater than 0. Enter number of threads in a block: ");
scanf("%d", &T);
}
while (T > 1024) {
printf(
"Number of threads must not exceed the device bandwidth. Enter number of threads in a block: ");
scanf("%d", &T);
}
printf("Enter number of blocks in a grid: ");
scanf("%d", &B);
while (B <= 0) {
printf(
"Number of blocks must be greater than 0. Enter number of blocks in a grid: ");
scanf("%d", &B);
}
while (B > 65535) {
printf(
"Number of blocks must not exceed the device bandwidth. Enter number of blocks in a grid: ");
scanf("%d", &B);
}
int *a, *b, *c, *deviceC;
int *dev_a, *dev_b, *dev_c;
int i, j, k;
int ssd = 0;
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void**) &dev_a, (N * N) * sizeof(int));
cudaMalloc((void**) &dev_b, (N * N) * sizeof(int));
cudaMalloc((void**) &dev_c, (N * N) * sizeof(int));
a = (int *) malloc((N * N) * sizeof(int));
b = (int *) malloc((N * N) * sizeof(int));
c = (int *) malloc((N * N) * sizeof(int));
deviceC = (int *) malloc((N * N) * sizeof(int));
srand(time(NULL));
//loop will generate the matrix with random integers from 0 to 9
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
a[j + (i * N)] = (int) rand() % 10;
b[j + (i * N)] = (int) rand() % 10;
}
}
/*******************begin host code*****************************/
clock_t begin, end;
begin = clock();
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
c[j + i * N] = 0;
for (k = 0; k < N; k++) {
c[j + i * N] = c[j + i * N] + a[k + i * N] * b[j + k * N];
}
}
}
end = clock();
printf("It took %f seconds for the host to do the matrix operation.\n",
(float) (end - begin) / (CLOCKS_PER_SEC));
/*******************end host code*****************************/
/*******************begin device code*****************************/
cudaMemcpy(dev_a, a, (N * N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, (N * N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, deviceC, (N * N) * sizeof(int),
cudaMemcpyHostToDevice);
dim3 grid(B, B);
dim3 block(T, T);
cudaEventRecord(start, 0);
matrixMult<<<grid, block>>>(dev_a, dev_b, dev_c, N);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(deviceC, dev_c, (N * N) * sizeof(int),
cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf(
"It took %f seconds for the device to do the matrix operation.\n",
(elapsedTime / 1000));
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
ssd += deviceC[j + (i * N)] - c[j + (i * N)];
}
}
printf("The sum of square difference is %d.\n", ssd);
printf("The speedup factor is %f.\n",
((float) (end - begin) / (CLOCKS_PER_SEC))
/ (elapsedTime / 1000));
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(a);
free(b);
free(c);
free(deviceC);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
/*******************end device code*****************************/
printf("Enter 1 to continue: ");
scanf("%d", &repeat);
}
return 0;
}
|
12,976 | #include <chrono>
#include <iostream>
__global__ void _cuda_parallel_multiplication(int count, int* test_data, int magnitude);
int main() {
int count = 60000000; // 60 million elements
int* test_data = new int[count];
for(int i = 0; i < count; i++)
test_data[i] = i;
// Perform calculation on host CPU
auto t1 = std::chrono::high_resolution_clock::now();
for(int i = 0; i < count; i++)
test_data[i] = test_data[i] * 5;
auto t2 = std::chrono::high_resolution_clock::now();
// Copy data to device
int* d_test_data;
cudaMalloc(&d_test_data, count * sizeof(int));
cudaMemcpy(d_test_data, test_data, count * sizeof(int), cudaMemcpyHostToDevice);
// Launch kernel
int block_count = ceil((double)count / 1024);
_cuda_parallel_multiplication<<<10, 1024>>>(count, d_test_data, 5);
cudaDeviceSynchronize();
cudaMemcpy(test_data, d_test_data, count * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_test_data);
for(int i = 0; i < 10; i++)
std::cout << i << ": " << test_data[i] << std::endl;
// Copy results back to device
std::cout << "CPU time: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count()
<< "ms" << std::endl;
}
__global__ void _cuda_parallel_multiplication(int count, int* test_data, int magnitude) {
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
while (globalIdx < count) {
test_data[globalIdx] = test_data[globalIdx] * magnitude;
globalIdx += blockDim.x * gridDim.x;
__syncthreads();
}
}
|
12,977 | #include <stdio.h>
#include <time.h>
int generateInitialPrimes(int *intialTempArray, int **PL, int initialPrimesRange);
__global__ void calcPrimes(int *d_IL, int *d_PL, int numOfPrimes, int lenInputList);
#define LEN_IL 1000000
#define LEN_INITIAL_PRIMES 1000
#define THREADS_PER_BLOCK 32
int main() {
int *IL = NULL, *PL = NULL, *tempPL = NULL;
int *d_IL = NULL, *d_PL = NULL;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
clock_t t;
//int count = 0;
t = clock();
int numOfInitialPrimes = generateInitialPrimes(tempPL, &PL, LEN_INITIAL_PRIMES);
t = clock() - t;
double time_taken = ((double)t)/CLOCKS_PER_SEC; // in seconds
// Print the initial range of primes calculated in the CPU, which will be passed to the GPU:
printf("\nThe initial primes calculated are:\n");
for(int i=0; i < numOfInitialPrimes; i++) {
printf("%d ", PL[i]);
}
printf("\nNumber of initial primes = %d\n\n", numOfInitialPrimes);
// Space for host copies:
IL = (int*) malloc(LEN_IL * sizeof(int));
//PL = (int*) malloc(LEN_INITIAL_PRIMES * sizeof(int)); // Allocated in the generate function instead
int size_IL = LEN_IL * sizeof(int);
int size_PL = numOfInitialPrimes * sizeof(int);
// Initialize Input list: 0 -> Not prime:
for(int i=0; i<LEN_IL; i++) {
IL[i] = 1;
}
// Space for device copies:
cudaMalloc((void **) &d_IL, size_IL);
cudaMalloc((void **) &d_PL, size_PL);
// Copying the data to the device (GPU):
cudaMemcpy(d_IL, IL, size_IL, cudaMemcpyHostToDevice);
cudaMemcpy(d_PL, PL, size_PL, cudaMemcpyHostToDevice);
// Launching the kernel and measuring the time taken:
cudaEventRecord(start, 0);
calcPrimes<<<(numOfInitialPrimes/THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK>>> (d_IL, d_PL, numOfInitialPrimes, LEN_IL);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// Space allocated to store the modified form of input array, with marking for prime and non-prime:
int *result = (int*) malloc(size_IL);
// Copy the result back to the host:
cudaMemcpy(result, d_IL, size_IL, cudaMemcpyDeviceToHost);
// Extract indexes of primes in 'result' to get the actual new prime numbers:
printf("********* New Primes List **********\n");
int *newPrimes = (int*)malloc(LEN_IL / 4 * sizeof(int)); // Arbitrary size; which is '1/4th' of numbers list size
int newPrimesCount = 0;
for(int i=LEN_INITIAL_PRIMES; i<LEN_IL; i++) {
int num = result[i];
if(num == 1) {
newPrimes[newPrimesCount] = num;
newPrimesCount++;
printf("%d ", i);
}
}
printf("\n\nNumber of new primes found = %d\n\n", newPrimesCount);
printf("Time taken to find initial primes on CPU = %f ms\n", time_taken * 1000);
printf("Parallel Job time for current iteration = %f ms\n\n", time);
// Free memory:
cudaFree(d_IL);
cudaFree(d_PL);
free(IL);
free(PL);
free(result);
free(newPrimes);
return 0;
}
// Generate initial prime numbers in the CPU:
// Returns: Number of primes found from 1 to 'LEN_INITIAL_PRIMES'
int generateInitialPrimes(int *intialTempArray, int **PL, int initialPrimesRange) {
int primesCount = 0;
//int intialTempArray[initialPrimesRange];
intialTempArray = (int*) malloc(LEN_INITIAL_PRIMES * sizeof(int));
*PL = (int*) malloc(LEN_INITIAL_PRIMES / 2 * sizeof(int)); // Taking half size of initial (full) primes array
// Initialize array with all 1's:
for(int i=0; i < initialPrimesRange; i++) {
intialTempArray[i] = 1;
}
// Make non-primes as '0':
for(int i=2; i*i <= initialPrimesRange; i++) {
for(int j=2*i; j <= initialPrimesRange; j=j+i) {
intialTempArray[j] = 0;
}
}
// Store the actual primes in a new array which will be copied later to the device (converting 'prime num indexes' to 'prime numbers') :
for(int i=2; i<=initialPrimesRange; i++) {
if(intialTempArray[i] == 1) {
(*PL)[primesCount] = i;
primesCount++;
}
}
free(intialTempArray);
return primesCount;
}
// GPU Kernel (Parallel Processing):
__global__ void calcPrimes(int *d_IL, int *d_PL, int numOfPrimes, int lenInputList) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < numOfPrimes) {
for(int i = d_PL[numOfPrimes-1]+1; i < lenInputList; i++) {
if(i % d_PL[index] == 0) {
d_IL[i] = 0;
}
}
}
}
|
12,978 | //
// Created by Peter Rigole on 2019-04-26.
//
#ifndef AXONBITS_CYCLEPARITY_H
#define AXONBITS_CYCLEPARITY_H
/**
* All threads working on a cycle have the same cycle parity. This parity is used to identify the activity variable
* in the neuron that is to be updated (the next activity) versus the one that must be used as the neuron's current
* activity.
*/
enum CycleParity { EvenCycle, OddCycle };
#endif //AXONBITS_CYCLEPARITY_H
|
12,979 | // 1D version of the code
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define N 5
#define M 6
__global__ void dkernel(unsigned *mat)
{
unsigned xId = blockIdx.x, yId = threadIdx.x, col = blockDim.x;
mat[xId * col + yId] = xId * col + yId;
}
int main()
{
//dim3 block(N, M, 1);
unsigned *matrix, *hmatrix, i = 0, j = 0;
cudaMalloc(&matrix, N * M * sizeof(unsigned));
hmatrix = (unsigned *)malloc(N * M * sizeof(unsigned));
dkernel<<<N, M>>>(matrix);
cudaMemcpy(hmatrix, matrix, N * M * sizeof(unsigned), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++)
{
for(j = 0; j < M; j++)
{
printf("%d\t", hmatrix[i * M + j]);
}
printf("\n");
}
return 0;
} |
12,980 | #include <iostream>
#include <cuda.h>
using namespace std;
__global__
void dkernel(int *arr, int N){
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
while(id < N)
{
arr[id] = 0;
id += blockDim.x * gridDim.x ;
}
}
__global__
void dkernel_add(int *arr, int N)
{
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < N)
{
arr[id] += id;
id += blockDim.x * gridDim.x;
}
}
int main() {
int *gpuArray, *cpuArray;
//int *cpuArray = new int[32];
cudaMallocManaged(&cpuArray, 32*sizeof(int));
cudaMallocManaged(&gpuArray, 32*sizeof(int));
dkernel<<<1, 32>>>(gpuArray,32);
cudaMemcpy(cpuArray, gpuArray, 32*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < 32 ; i++)
{
cout <<"cpuArray["<<i<<"]"<<cpuArray[i]<< endl;
}
cudaMallocManaged(&gpuArray, 1024*sizeof(int));
dkernel<<<1, 1024>>>(gpuArray, 1024);
dkernel_add<<<1, 1024>>>(gpuArray, 1024);
//cpuArray = new int[1024];
cudaMallocManaged(&cpuArray, 1024*sizeof(int));
cudaMemcpy(cpuArray, gpuArray, 1024*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0 ; i < 1024 ; i++)
{
cout << "cpuArray["<<i<<"]"<< cpuArray[i] << endl;
}
cudaMallocManaged(&gpuArray, 8000*sizeof(int));
dkernel<<<8000/128, 128>>>(gpuArray, 8000);
dkernel_add<<<8000,128>>>(gpuArray, 8000);
cpuArray = new int[8000];
cudaMemcpy(cpuArray, gpuArray, 8000*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 8000 ; i++)
{
cout << "cpuArray["<<i<<"]" << cpuArray[i] << endl;
}
}
|
12,981 | /*Created by Alessandro Bigiotti*/
// Kernel Function to calculate the Unit Clause of the formula
__global__ void UnitClause(int NValid, int b, int t, int *nextpos, int *num_args, int *lett, int *clause, int *matrixelem, int *col, int *row, int *poslet, int *tipo, int n, int m, int nk)
{
int idx = threadIdx.x;
int idblock = blockIdx.x;
int indexpos = 0;
int indexclause = 0;
if (idblock*t + idx < m){
if (tipo[idblock*t + idx] == 1){
indexpos = poslet[idblock*t + idx];
indexclause = idblock*t + idx;
nextpos[indexclause] = 1;
nextpos[m] = 1;
lett[indexpos] = 1;
}
}
}
// kernel_function to calculate the Propagation of the assignments
__global__ void Propagate(int NValid, int b, int t, int *nextpos, int *num_args, int *lett, int *clause, int *matrixelem, int *col, int *row, int *poslet, int *tipo, int n, int m, int nk)
{
int idx = threadIdx.x;
int idblock = blockIdx.x;
int indexpos = 0;
int indexnextpos = 0;
if (idblock*t + idx < m){
if (nextpos[idblock*t + idx] == 1){
indexpos = poslet[idblock*t + idx];
nextpos[idblock*t + idx] = 0;
for (int i = row[indexpos]; i < row[indexpos + 1]; i++){
if (matrixelem[i] == 2){
int old = atomicSub(num_args + col[i], 1);
if (old == 1){
indexnextpos = poslet[col[i]];
if (indexnextpos != NValid){
if (lett[indexnextpos] == 0){
lett[indexnextpos] = 1;
nextpos[col[i]] = 1;
nextpos[m] = 1;
}
}
else{
if (tipo[col[i]] % 2 == 0){
nextpos[m + 1] = NValid;
break;
}
}
}
}
}
}
}
}
|
12,982 | #include "includes.h"
//FILE IO RELATED
//max number of lines in the training dataset
#define MAX_ROWS_TRAINING 16896
// max number of columns/features in the training dataset
#define MAX_COLUMNS_TRAINING 26
// max number of rows in the testing dataset
#define MAX_ROWS_TESTING 4096
// max number of columns in the testing data
#define MAX_COLUMNS_TESTING 26
//max number of characters/line
#define MAX_CHAR 300
__constant__ int features = 26;
__constant__ int num_rows = 16896;
long mem_cpy_time = 0;
long beta_cpy_time = 0;
// parallelized across the rows
// parallelized across the features
__global__ void log_gradient(float* log_func_v, float* gradient, float* betas, float* data, int* yvec) {
// the logistic function itself has been pulled out
int feature_index = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.0f;
for(int i = 0; i < num_rows; i++) {
float sub = log_func_v[i] - yvec[i];
float accessed_data = data[(i * features) + feature_index];
temp += sub * accessed_data;
}
gradient[feature_index] = temp;
} |
12,983 | #include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <malloc.h>
#define MAP_COUNT __device__ void mapCount(char*key,char*value,size_t key_size, size_t value_size,int*key_im_size,int*value_im_size,int*map_im_num,int threadID)
#define EMIT_IM_COUNT(im_key_size,im_value_size) emitMapCount(im_key_size,im_value_size,word_num,key_im_size,value_im_size,map_im_num,threadID)
/*extern */MAP_COUNT;
typedef struct MapFileList {
char* filename;
struct MapFileList* next;
}MapFileList;
typedef enum InputFormat{TextInputFormat,KeyValueInputFormat,SequenceFileInputFormat} input_format;
typedef struct Index{
size_t key_offset;
size_t key_size;
size_t value_offset;
size_t value_size;
}Index;
typedef struct MapReduceSpec{
MapFileList* map_file_list;
char* map_input_keys;
char* map_input_values;
Index* map_input_index;
int* map_im_key_size;
int* map_im_value_size;
int* map_im_num;
int map_input_num;
int map_block_num;
int map_thread_num;
input_format map_input_format;
}MapReduceSpec;
void init_map_file_list(MapFileList* list){
list->filename=NULL;
list->next=NULL;
}
void free_map_file_list(MapFileList* list){
MapFileList* del;
MapFileList* tmp;
del=list;
tmp=list->next;
while(tmp){
if(del->filename!=NULL)
free(del->filename);
free(del);
del=tmp;
tmp=tmp->next;
}
if(del->filename!=NULL)
free(del->filename);
free(del);
}
void init_mapreduce_spec(MapReduceSpec* spec){
spec->map_file_list=NULL;
spec->map_input_keys=NULL;
spec->map_input_values=NULL;
spec->map_input_index=NULL;
spec->map_im_key_size=NULL;
spec->map_im_value_size=NULL;
spec->map_im_num=NULL;
spec->map_input_num=0;
spec->map_block_num=0;
spec->map_thread_num=512;
spec->map_input_format=TextInputFormat;
}
void free_spec(MapReduceSpec* spec){
free_map_file_list(spec->map_file_list);
free(spec->map_input_keys);
free(spec->map_input_values);
free(spec->map_input_index);
free(spec->map_im_key_size);
free(spec->map_im_value_size);
free(spec->map_im_num);
free(spec);
}
char *my_strncpy(char *dest, const char *src, size_t n)
{
size_t i;
for (i = 0; i < n && src[i] != '\0'; i++)
dest[i] = src[i];
for ( ; i < n; i++)
dest[i] = '\0';
return dest;
}
void map_input_split(MapReduceSpec* spec){
MapFileList* file_list_entry;
size_t buffer_size=(size_t)256*1024*1024;
size_t buffer_used=0;
FILE* pFile;
file_list_entry=spec->map_file_list;
size_t file_size;
size_t key_array_size;
size_t value_array_size;
size_t index_array_size;
if(spec->map_input_format==TextInputFormat){
file_size=key_array_size=value_array_size=index_array_size=0;
while(file_list_entry->filename!=NULL){
pFile=fopen(file_list_entry->filename,"rb");
if (pFile==NULL) {fputs ("File error\n",stderr); exit (1);}
fseek (pFile , 0 , SEEK_END);
file_size = ftell (pFile);
rewind (pFile);
if(buffer_used+file_size<=buffer_size){
ssize_t result=0;
while (result!= -1) {
size_t value_size = 0;
size_t key_size=0;
char* temp_key=NULL;
char* temp_value=NULL;
temp_key=(char*)malloc(10);
sprintf(temp_key,"%d",(int)ftell(pFile));
key_size=strlen(temp_key)+1; //get the new key's size
spec->map_input_keys=(char*)realloc(spec->map_input_keys,key_array_size+key_size); //reallocate key_array, so that it can contain new keys
my_strncpy((spec->map_input_keys)+key_array_size,temp_key,key_size);
result=getline(&(temp_value), &value_size, pFile);
value_size=strlen(temp_value)+1;
spec->map_input_values=(char*)realloc(spec->map_input_values,value_array_size+value_size); //reallocate value_size, so that it can contain new values
strcpy((char*)(spec->map_input_values+value_array_size),temp_value);
spec->map_input_index=(Index*)realloc(spec->map_input_index,(index_array_size+1)*sizeof(Index)); //reallocate index array, so that it can contain new <key,value> information
spec->map_input_index[index_array_size].key_offset=key_array_size;
spec->map_input_index[index_array_size].key_size=key_size;
spec->map_input_index[index_array_size].value_offset=value_array_size;
spec->map_input_index[index_array_size].value_size=value_size;
key_array_size+=key_size;
value_array_size+=value_size;
index_array_size++;
free(temp_key); free(temp_value);
}
buffer_used=buffer_used+file_size;
}
else
printf("Buffer full!!\n");
file_list_entry=file_list_entry->next;
fclose(pFile);
}
spec->map_input_num=index_array_size;
printf("Map Input entry number: %i, %u, %u, %u\n",spec->map_input_num,key_array_size,value_array_size,index_array_size*sizeof(Index));
}
}
__device__ bool isChar(char c){
if(((c<='z')&&(c>='a'))||((c<='Z')&&(c>='A')))
return true;
else
return false;
}
__device__ void emitMapCount(int key_size, int value_size,int word_num,int*key_im_size_array,int*value_im_size_array,int*map_im_num,int threadID){
*(key_im_size_array+threadID)=key_size;
*(value_im_size_array+threadID)=value_size;
*(map_im_num+threadID)=word_num;
}
__global__ void map_count_warp(char*keys,char*values,Index*index,int*map_im_key_size,int*map_im_value_size,int*map_im_num,int input_num){
int i=blockDim.x*blockIdx.x+threadIdx.x;
if(i<input_num){
mapCount((keys+((index+i)->key_offset)),(values+((index+i)->value_offset)),(index+i)->key_size,(index+i)->value_size,map_im_key_size,map_im_value_size,map_im_num,i);
}
}
void map_count_phase(MapReduceSpec* spec){
char* d_map_input_keys;
char* d_map_input_values;
Index* d_map_input_index;
int* d_map_im_key_size;
int* d_map_im_value_size;
int* d_map_im_num;
size_t map_im_size=(spec->map_input_num)*sizeof(int);
spec->map_im_key_size=(int*)malloc(map_im_size);
spec->map_im_value_size=(int*)malloc(map_im_size);
spec->map_im_num=(int*)malloc(map_im_size);
size_t keys_size=malloc_usable_size(spec->map_input_keys);
size_t values_size=malloc_usable_size(spec->map_input_values);
size_t index_size=malloc_usable_size(spec->map_input_index);
//printf("%u,%u,%u\n",malloc_usable_size(spec->map_input_keys),malloc_usable_size(spec->map_input_values),malloc_usable_size(spec->map_input_index));
cudaMalloc(&d_map_input_keys,keys_size);
cudaMalloc(&d_map_input_values,values_size);
cudaMalloc(&d_map_input_index,index_size);
cudaMalloc(&d_map_im_key_size,map_im_size);
cudaMalloc(&d_map_im_value_size,map_im_size);
cudaMalloc(&d_map_im_num,map_im_size);
cudaMemcpy(d_map_input_keys,spec->map_input_keys,keys_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_map_input_values,spec->map_input_values,values_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_map_input_index,spec->map_input_index,index_size,cudaMemcpyHostToDevice);
spec->map_block_num=((spec->map_input_num)+(spec->map_thread_num)-1)/(spec->map_thread_num);
// printf("%d\n",spec->map_block_num);
map_count_warp<<<spec->map_block_num,spec->map_thread_num>>>(d_map_input_keys,d_map_input_values,d_map_input_index,d_map_im_key_size,d_map_im_value_size,d_map_im_num,spec->map_input_num);
cudaMemcpy(spec->map_im_key_size,d_map_im_key_size,map_im_size,cudaMemcpyDeviceToHost);
cudaMemcpy(spec->map_im_value_size,d_map_im_value_size,map_im_size,cudaMemcpyDeviceToHost);
cudaMemcpy(spec->map_im_num,d_map_im_num,map_im_size,cudaMemcpyDeviceToHost);
printf("%s\n",(spec->map_input_values+((spec->map_input_index+1)->value_offset)));
printf("%d %d %d\n",*(spec->map_im_key_size+1),*(spec->map_im_value_size+1),*(spec->map_im_num+1));
cudaFree(d_map_input_keys);
cudaFree(d_map_input_values);
cudaFree(d_map_input_index);
cudaFree(d_map_im_key_size);
cudaFree(d_map_im_value_size);
cudaFree(d_map_im_num);
}
void add_input_path(char *path,MapReduceSpec* spec){
MapFileList* plist;
plist=(MapFileList*)malloc(sizeof(MapFileList));
spec->map_file_list=plist;
struct dirent* entry = NULL;
DIR *pDir;
pDir=opendir(path);
while((entry=readdir(pDir))!=NULL){
if(entry->d_type==DT_REG){
plist->filename=(char*)malloc(strlen(path)+strlen(entry->d_name)+1);
strcpy(plist->filename,path);
strcat(plist->filename,entry->d_name);
plist->next=(MapFileList*)malloc(sizeof(MapFileList));
plist=plist->next;
}
}
map_input_split(spec);
map_count_phase(spec);
}
MAP_COUNT{
unsigned int i;
unsigned int im_key_size=0;
unsigned int im_value_size=0;
int word_num=0;
for(i=0;i<value_size;){
while((i<value_size)&&!isChar(*(value+i)))
i++;
int start = i;
while((i<value_size)&&isChar(*(value+i)))
i++;
if(start<i){
im_key_size+=(i-start);
im_value_size+=sizeof(int);
word_num++;
}
}
EMIT_IM_COUNT(im_key_size,im_value_size);
//emitMapCount(im_key_size,im_value_size,word_num,key_im_size,value_im_size,map_im_num,threadID);
}
int main(int argc, char **argv){
MapReduceSpec* spec=(MapReduceSpec*)malloc(sizeof(MapReduceSpec));
init_mapreduce_spec(spec);
add_input_path(argv[1],spec);
free(spec);
}
|
12,984 | #include <assert.h>
#include <stdio.h>
#include <cuda.h>
#define ARRAY_SIZE 10
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
//kernel
__global__ void addKernel( int *d_a, int *d_b, int *d_result){
int idx = threadIdx.x;
d_result[idx] = d_a[idx] + d_b[idx];
}
void onDevice( int *h_a, int *h_b, int *h_result ){
int *d_a, *d_b, *d_result;
//allocate memory on the device
cudaMalloc( (void**)&d_a, ARRAY_BYTES );
cudaMalloc( (void**)&d_b, ARRAY_BYTES );
cudaMalloc( (void**)&d_result, ARRAY_BYTES );
//copythe arrays 'a' and 'b' to the device
cudaMemcpy( d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, h_b, ARRAY_BYTES, cudaMemcpyHostToDevice );
//run the kernel
addKernel<<<1,ARRAY_SIZE>>>( d_a, d_b, d_result);
// copy the array 'result' back from the device to the CPU
cudaMemcpy( h_result, d_result, ARRAY_BYTES, cudaMemcpyDeviceToHost );
// free device memory
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_result );
}
void onHost(){
int *h_a, *h_b, *h_result;
//allocate memory on the host
h_a = (int*)malloc(ARRAY_BYTES);
h_b = (int*)malloc(ARRAY_BYTES);
h_result = (int*)malloc(ARRAY_BYTES);
// filling the arrays 'a' and 'b' on the CPU
for (int i=0; i<ARRAY_SIZE; i++) {
h_a[i] = i;
h_b[i] = i*i;
h_result[i]=0;
}
onDevice(h_a, h_b, h_result);
// check the results
for (int i=0; i<ARRAY_SIZE; i++) {
assert( h_a[i] + h_b[i] == h_result[i] );
}
printf("-: successful execution :-\n");
// free host memory
free(h_a);
free(h_b);
free(h_result);
}
int main(){
onHost();
return 0;
}
|
12,985 | /*** substitui os valores aleatórios de determinado vetor de tamanho N por valores ordenados de 0 a N ***/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define N 2//64
__global__ void foo(int* glob) {
int a;
int* p;
a = 0;
p = &a;
*p = threadIdx.x;
glob[*p] = threadIdx.x;
}
|
12,986 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
extern "C"
__global__ void matrixMulkernelShared(float * A, float *B, int wA, int wB, float *C)
{
//BLOCK index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
//Index of the first sub-matrix of A processed by the block
int aBegin = wA * 16 * by;
//Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
//Step size used to iterate thorugh the sub-matrices of A
int aStep = 16;
//Index of the first sub-matrix of B processed by the block
int bBegin = 16 * bx;
//Step size used to iterate through the sub-matrices of B
int bStep = 16 * wB;
// The element of the block sub-matrix that is computed
// by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to
// compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
// Shared memory for the sub-matrix of A
__shared__ float As[16][16];
// Shared memory for the sub-matrix of B
__shared__ float Bs[16][16];
// Load the matrices from global memory to shared memory;
// each thread loads one element of each ma trix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < 16; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int c = wB * 16 * by + 16 * bx;
C[c + wB * ty + tx] = Csub;
}
|
12,987 | #include<bits/stdc++.h>
using namespace std;
void vector_sum(vector<float> a,float &cpu_res,int n) {
for (int i = 0; i < n; i++) {
cpu_res += a[i];
}
}
void vector_min(vector<float> a,float &cpu_res,int n){
cpu_res = INT_MAX;
for (int i = 0; i < n; i++) {
cpu_res = min(cpu_res,a[i]);
}
}
void vector_max(vector<float> a,float &cpu_res,int n){
cpu_res = INT_MIN;
for (int i = 0; i < n; i++) {
cpu_res = max(cpu_res,a[i]);
}
}
void vector_sd(vector<float> a,float sum,double &cpu_res_sd,int n){
double mean = (double)sum/(double)n;
double s = 0;
for(int i=0;i<n;i++){
s += ((a[i]-mean)*(a[i]-mean));
}
cpu_res_sd = (double)s/(double)n;
}
__global__ void cuda_vector_sum(float* a,int n) {
const int tid=threadIdx.x;
int no_of_threads=blockDim.x;
for(int step=1;step < n; step *= 2,no_of_threads /= 2){
if (tid <= no_of_threads){
int ind=2*step*tid;
if((ind+step) >= n){
a[ind] = a[ind] + 0;
}else{
a[ind] = a[ind] + a[ind+step];
}
}
}
}
__global__ void cuda_vector_min(float* a,int n) {
const int tid=threadIdx.x;
int no_of_threads=blockDim.x;
for(int step=1;step < n; step *= 2,no_of_threads /= 2){
if (tid <= no_of_threads){
int ind=2*step*tid;
if((ind+step) >= n){
a[ind] = min(a[ind],FLT_MAX);
}else{
a[ind] = min(a[ind],a[ind+step]);
}
}
}
}
__global__ void cuda_vector_max(float* a,int n) {
const int tid=threadIdx.x;
int no_of_threads=blockDim.x;
for(int step=1;step < n; step *= 2,no_of_threads /= 2){
if (tid <= no_of_threads){
int ind=2*step*tid;
if((ind+step) >= n){
a[ind] = max(a[ind],FLT_MIN);
}else{
a[ind] = max(a[ind],a[ind+step]);
}
}
}
}
__global__ void cuda_update_arr(float *a,double mean){
const int tid=threadIdx.x;
a[tid] = (a[tid]-mean)*(a[tid]-mean);
}
int main() {
int N = 2048;
vector<float> a(N);
srand(time(0));
generate(begin(a), end(a), []() { return (float(rand())/float((RAND_MAX)) * 100.0); });
for(auto item:a)
cout<<item<<" ";
cout<<'\n';
float cpu_res=0,gpu_res=0;
double cpu_res_sd = 0,gpu_res_sd = 0;
cout<<"CPU: "<<'\n';
//-------------------------------------------------------------------
// Sum calculation
vector_sum(a,cpu_res,N);
cout << "Vector Sum using CPU :"<<cpu_res<<" \n";
// Average calculation
cout << "Vector Average using CPU :"<<(double)cpu_res/(double)N<<" \n";
vector_sd(a,cpu_res,cpu_res_sd,N);
cout << "Vector Standard Deviation using CPU :"<<fixed<<setprecision(2)<<sqrt(cpu_res_sd)<<" \n";
vector_min(a,cpu_res,N);
cout << "Vector Min using CPU :"<<cpu_res<<" \n";
vector_max(a,cpu_res,N);
cout << "Vector Max using CPU :"<<cpu_res<<" \n";
cout<<"GPU: "<<'\n';
//-------------------------------------------------------------------
// Allocate memory on the device
size_t bytes = sizeof(float) * N;
float* d_a;
cudaMalloc(&d_a, bytes);
//-------------------------------------------------------------------
// Copy data from the host to the device (CPU to GPU)
cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
cuda_vector_sum <<<1,N/2>>> (d_a,N);
cudaMemcpy(&gpu_res, d_a, sizeof(float), cudaMemcpyDeviceToHost);
cout << "Vector Sum using GPU :"<<gpu_res<<" \n";
//-------------------------------------------------------------------
cout << "Vector Average using GPU :"<<(double)gpu_res/(double)N<<" \n";
//-------------------------------------------------------------------
double mean = (double)gpu_res/(double)N;
cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
cuda_update_arr<<<1,N>>>(d_a,mean);
cuda_vector_sum<<<1,N/2>>>(d_a,N);
cudaMemcpy(&gpu_res, d_a, sizeof(float), cudaMemcpyDeviceToHost);
gpu_res = (double)gpu_res/(double)N;
cout << "Vector Standard Deviation using GPU :"<<fixed<<setprecision(2)<<sqrt(gpu_res)<<" \n";
//-------------------------------------------------------------------
cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
gpu_res = INT_MAX;
cuda_vector_min <<<1,N/2>>> (d_a,N);
cudaMemcpy(&gpu_res, d_a, sizeof(float), cudaMemcpyDeviceToHost);
cout << "Vector Min using GPU :"<<gpu_res<<" \n";
//-------------------------------------------------------------------
cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
gpu_res = INT_MIN;
cuda_vector_max <<<1,N/2>>> (d_a,N);
cudaMemcpy(&gpu_res, d_a, sizeof(float), cudaMemcpyDeviceToHost);
cout << "Vector Max using GPU :"<<gpu_res<<" \n";
//-------------------------------------------------------------------
// Free memory on device
cudaFree(d_a);
} |
12,988 | #include <cuda.h>
#include <stdio.h>
__global__ void sum(float* a, float* b, float* c)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
//int index = blockIdx.x;
c[index] = a[index] + b[index];
}
#define CUDA_CHECK_RETURN(value) ((cudaError_t)value != cudaSuccess) ? printf("Error %s at line %d in the file %s\n", cudaGetErrorString((cudaError_t)value), __LINE__, __FILE__) : printf("")
int main()
{
int n, k;
scanf("%d%d", &n, &k);
float* a = new float[n * k];
float* b = new float[n * k];
float* c = new float[n * k];
for(int i = 0; i < n * k; i++)
{
a[i] = i;
b[i] = i;
}
float* dev1;
float* dev2;
float* dev3;
float elapsedTime;
cudaEvent_t start, stop;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev1, n * k * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev2, n * k * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&dev3, n * k * sizeof(float)));
CUDA_CHECK_RETURN(cudaMemcpy(dev1, a, n * k * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(dev2, b, n * k * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
sum <<< n, k >>> (dev1, dev2, dev3);
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop));
fprintf(stderr, "gTest took %g\n", elapsedTime);
CUDA_CHECK_RETURN(cudaEventDestroy(start));
CUDA_CHECK_RETURN(cudaEventDestroy(stop));
CUDA_CHECK_RETURN(cudaMemcpy(c, dev3, n * k * sizeof(float), cudaMemcpyDeviceToHost));
for(int i = (n * k) - 5; i < n * k; i++)
{
printf("Element #%i: %f\n", i, c[i]);
}
free(a);
free(b);
free(c);
CUDA_CHECK_RETURN(cudaFree(dev1));
CUDA_CHECK_RETURN(cudaFree(dev2));
CUDA_CHECK_RETURN(cudaFree(dev3));
return 0;
}
|
12,989 | //
// Created by root on 2020/11/30.
//
#include "kernel.cuh"
#include "stdio.h"
#define TPB 64
#define ATOMIC 1 // 0 for non-atomic add
__global__ void dotKernel(int *d_res, int *d_a, int *d_b, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= n) {
return;
}
int s_dix = threadIdx.x;
__shared__ int s_prod[TPB];
s_prod[s_dix] = d_a[idx] * d_b[idx];
__syncthreads();
if (s_dix == 0) {
int blockSum = 0;
for (int i = 0; i < blockDim.x; i++) {
blockSum += s_prod[i];
}
printf("Block %d, block sum = %d\n", blockIdx.x, blockSum);
if (ATOMIC) {
atomicAdd(d_res, blockSum);
} else {
*d_res += blockSum;
}
}
}
void dotLauncher(int *res, int *a, int *b, int n) {
int *d_res;
int *d_a = 0, *d_b = 0;
cudaMalloc(&d_res, sizeof(int ));
cudaMalloc(&d_a, n * sizeof(int ));
cudaMalloc(&d_b, n * sizeof(int ));
cudaMemset(d_res, 0, sizeof(int ));
cudaMemcpy(d_a, a, n * sizeof(int ), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n * sizeof(int ), cudaMemcpyHostToDevice);
dotKernel<<<(n + TPB - 1) / TPB, TPB>>>(d_res, d_a, d_b, n);
cudaMemcpy(res, d_res, sizeof(int ), cudaMemcpyDeviceToHost);
cudaFree(d_res);
cudaFree(d_a);
cudaFree(d_b);
} |
12,990 | #include <cassert>
struct type { /* a regular type */ };
__host__ __device__
void ordinary_function(type* tc_operand, int block, int thread) {
assert(block == 0 && thread == 0);
// use tc_operand
}
__global__
void entry_point_function(type* tc_operand) {
ordinary_function(tc_operand, blockIdx.x, threadIdx.x);
}
#include <memory>
template <class T> struct managed {
typedef T value_type;
managed () = default;
template <class U> constexpr managed (const managed<U>&) noexcept {}
T* allocate(std::size_t n) {
void* out = nullptr;
cudaMallocManaged(&out, n*sizeof(T));
return static_cast<T*>(out);
}
void deallocate(T* p, std::size_t) noexcept { cudaFree(p); }
};
int main() {
auto managed_object = managed<type>().allocate(1);
entry_point_function<<<1,1>>>(managed_object);
cudaDeviceSynchronize();
ordinary_function(managed_object, 0, 0);
managed<type>().deallocate(managed_object, 1);
return 0;
}
|
12,991 | // nvcc ani2PCF.cu -o par.out && ./par.out data.dat rand0.dat 32768 30 180
#include <iostream>
#include <fstream> //manejo de archivos
#include <string.h>
#include <time.h>
#include <math.h>
using namespace std;
struct PointW3D{
float x;
float y;
float z;
float w;
};
struct Node{
int len; // Cantidad de elementos en el nodo.
PointW3D *elements; // Elementos del nodo.
};
void open_files(string name_file, int pts, PointW3D *datos){
/* Función para abrir nuestros archivos de datos */
ifstream file;
string mypathto_files = "../../../fake_DATA/DATOS/";
//This creates the full path to where I have my data files
name_file.insert(0,mypathto_files);
file.open(name_file.c_str(), ios::in | ios::binary); //le indico al programa que se trata de un archivo binario con ios::binary
if (file.fail()){
cout << "Error al cargar el archivo " << endl;
exit(1);
}
for ( int c = 0; c < pts; c++)
{
file >> datos[c].x >> datos[c].y >> datos[c].z >> datos[c].w;
}
file.close();
}
//====================================================================
/*
void save_histogram(string name, int bns, double *histo){
//Función para guardar nuestros archivos de histogramas
int i, j;
ofstream file;
file.open(name.c_str(),ios::out | ios::binary);
if (file.fail()){
cout << "Error al guardar el archivo " << endl;
exit(1);
}
for (i=0; i<bns; ++i){
for (j=0; j<bns; ++j) {
file << histo[i][j] << " ";
}
file << "\n";
}
file.close();
}
*/
//===================================================================
void add(PointW3D *&array, int &lon, float _x, float _y, float _z, float _w){
lon++;
PointW3D *array_aux;
cudaMallocManaged(&array_aux, lon*sizeof(PointW3D));
for (int i=0; i<lon-1; i++){
array_aux[i].x = array[i].x;
array_aux[i].y = array[i].y;
array_aux[i].z = array[i].z;
array_aux[i].w = array[i].w;
}
cudaFree(array);
array = array_aux;
array[lon-1].x = _x;
array[lon-1].y = _y;
array[lon-1].z = _z;
array[lon-1].z = _w;
}
void make_nodos(Node ***nod, PointW3D *dat, unsigned int partitions, float size_node, unsigned int np){
/*
Función para crear los nodos con los datos y puntos random
Argumentos
nod: arreglo donde se crean los nodos.
dat: datos a dividir en nodos.
*/
int row, col, mom;
// Inicializamos los nodos vacíos:
for (row=0; row<partitions; row++){
for (col=0; col<partitions; col++){
for (mom=0; mom<partitions; mom++){
nod[row][col][mom].len = 0;
cudaMallocManaged(&nod[row][col][mom].elements, sizeof(PointW3D));
}
}
}
// Llenamos los nodos con los puntos de dat:
for (int i=0; i<np; i++){
row = (int)(dat[i].x/size_node);
col = (int)(dat[i].y/size_node);
mom = (int)(dat[i].z/size_node);
add(nod[row][col][mom].elements, nod[row][col][mom].len, dat[i].x, dat[i].y, dat[i].z, dat[i].w);
}
}
//====================================================================
//============ Sección de Kernels ==================================
//===================================================================
__device__ void count_distances11(float **XX, PointW3D *elements, int len, float ds, float dd_max){
/*
Funcion para contar las distancias entre puntos en un mismo Nodo.
*/
int bi, bj;
float v;
float x1,y1,z1,w1,x2,y2,z2,w2;
float ddz, dd_ort;
for (int i=0; i<len-1; ++i){
x1 = elements[i].x;
y1 = elements[i].y;
z1 = elements[i].z;
w1 = elements[i].w;
for (int j=i+1; j<len; ++j){
x2 = elements[j].x;
y2 = elements[j].y;
z2 = elements[j].z;
w2 = elements[j].w;
dd_ort = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1);
ddz = (z2-z1)*(z2-z1);
if (ddz < dd_max && dd_ort < dd_max){
bi = int(sqrt(ddz)*ds);
bj = int(sqrt(dd_ort)*ds);
v = 2*w1*w2;
atomicAdd(&XX[bi][bj],2);
}
}
}
}
__device__ void count_distances12(float **XX, PointW3D *elements1, int len1, PointW3D *elements2, int len2, float ds, float dd_max){
/*
Funcion para contar las distancias entre puntos en un mismo Nodo.
*/
int bi, bj;
float d, v;
float x1,y1,z1,w1,x2,y2,z2,w2;
float ddz, dd_ort;
for (int i=0; i<len1; ++i){
x1 = elements1[i].x;
y1 = elements1[i].y;
z1 = elements1[i].z;
w1 = elements1[i].w;
for (int j=0; j<len2; ++j){
x2 = elements2[j].x;
y2 = elements2[j].y;
z2 = elements2[j].z;
w2 = elements2[j].w;
dd_ort = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1);
ddz = (z2-z1)*(z2-z1);
if (ddz < dd_max && dd_ort < dd_max){
bi = int(sqrt(ddz)*ds);
bj = int(sqrt(dd_ort)*ds);
v = 2*w1*w2;
atomicAdd(&XX[bi][bj],2);
}
}
}
}
__global__ void make_histoXX(float **XX_A, float **XX_B, Node ***nodeD, int partitions, float ds, float dd_max, int did_max, int did_max2){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
if (nodeD[row][col][mom].len > 0){
if (idx%2==0){
count_distances11(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, ds, dd_max);
} else {
count_distances11(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, ds, dd_max);
}
int u,v,w; //Posicion del nodo 2
unsigned int dx_nod12, dy_nod12, dz_nod12, dd_nod12;
//Nodo2 solo movil en z
for(w = mom+1; w<partitions && w-row<=did_max; w++){
if (idx%2==0){ //Si es par lo guarda en histograma A, si no en el B
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][col][w].elements, nodeD[row][col][w].len, ds, dd_max);
} else {
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][col][w].elements, nodeD[row][col][w].len, ds, dd_max);
}
}
//Nodo2 movil en ZY
for(v=col+1; v<partitions && v-col<=did_max; v++){
dy_nod12 = v-col;
for(w=(mom-did_max)*(mom>did_max); w<partitions && w-mom<=did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){ //Si es par lo guarda en histograma A, si no en el B
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][v][w].elements, nodeD[row][v][w].len, ds, dd_max);
} else {
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[row][v][w].elements, nodeD[row][v][w].len, ds, dd_max);
}
}
}
}
//Nodo movil en XYZ
for(u = row+1; u < partitions && u-row< did_max; u++){
dx_nod12 = u-row;
for(v = (col-did_max)*(col>did_max); v < partitions && v-col< did_max; v++){
dy_nod12 = v-col;
for(w = (mom-did_max)*(mom>did_max); w < partitions && w-mom< did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12 + dx_nod12*dx_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){ //Si es par lo guarda en histograma A, si no en el B
count_distances12(XX_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[u][v][w].elements, nodeD[u][v][w].len, ds, dd_max);
} else {
count_distances12(XX_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeD[u][v][w].elements, nodeD[u][v][w].len, ds, dd_max);
}
}
}
}
}
}
}
}
__global__ void make_histoXY(float **XY_A, float **XY_B, Node ***nodeD, Node ***nodeR, int partitions, float ds, float dd_max, int did_max, int did_max2){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<(partitions*partitions*partitions)){
//Get the node positon in this thread
int mom = (int) (idx/(partitions*partitions));
int col = (int) ((idx%(partitions*partitions))/partitions);
int row = idx%partitions;
if (nodeD[row][col][mom].len > 0){
int u,v,w; //Posicion del nodo 2
unsigned int dx_nod12, dy_nod12, dz_nod12, dd_nod12;
//Nodo2 solo movil en z
w = 0;//(mom-did_max)*(mom>did_max);
for(w = (mom-did_max)*(mom>did_max); w<partitions && w-row<=did_max; w++){
if (idx%2==0){ //Si es par lo guarda en histograma A, si no en el B
count_distances12(XY_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[row][col][w].elements, nodeR[row][col][w].len, ds, dd_max);
} else {
count_distances12(XY_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[row][col][w].elements, nodeR[row][col][w].len, ds, dd_max);
}
}
//Nodo2 movil en ZY
for(v = (col-did_max)*(col>did_max); v<partitions && v-col<=did_max; v++){
dy_nod12 = v-col;
for(w = (mom-did_max)*(mom>did_max); w<partitions && w-mom<=did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){ //Si es par lo guarda en histograma A, si no en el B
count_distances12(XY_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[row][v][w].elements, nodeR[row][v][w].len, ds, dd_max);
} else {
count_distances12(XY_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[row][v][w].elements, nodeR[row][v][w].len, ds, dd_max);
}
}
}
}
//Nodo movil en XYZ
for(u = (row-did_max)*(row>did_max); u < partitions && u-row< did_max; u++){
dx_nod12 = u-row;
for(v = (col-did_max)*(col>did_max); v < partitions && v-col< did_max; v++){
dy_nod12 = v-col;
for(w = (mom-did_max)*(mom>did_max); w < partitions && w-mom< did_max; w++){
dz_nod12 = w-mom;
dd_nod12 = dz_nod12*dz_nod12 + dy_nod12*dy_nod12 + dx_nod12*dx_nod12;
if (dd_nod12<=did_max2){
if (idx%2==0){ //Si es par lo guarda en histograma A, si no en el B
count_distances12(XY_A, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[u][v][w].elements, nodeR[u][v][w].len, ds, dd_max);
} else {
count_distances12(XY_B, nodeD[row][col][mom].elements, nodeD[row][col][mom].len, nodeR[u][v][w].elements, nodeR[u][v][w].len, ds, dd_max);
}
}
}
}
}
}
}
}
int main(int argc, char **argv){
unsigned int np = stoi(argv[3]), bn = stoi(argv[4]);
float dmax = stof(argv[5]);
float ds = (float)(bn)/dmax, dd_max=dmax*dmax, size_box = 250.0, alpha = 2.176;
float size_node = alpha*(size_box/pow((float)(np),1/3.));
int did_max = (int)(ceil(dmax/size_node));
int did_max2 = (int)(ceil(dd_max/(size_node*size_node)));
unsigned int partitions = (int)(ceil(size_box/size_node));
//int np = 32768, bn = 10;
//float dmax = 180.0;
float **DD_A, **RR_A, **DR_A, **DD_B, **RR_B, **DR_B;
double **DD, **RR, **DR;
PointW3D *dataD;
PointW3D *dataR;
cudaMallocManaged(&dataD, np*sizeof(PointW3D));// Asignamos meoria a esta variable
cudaMallocManaged(&dataR, np*sizeof(PointW3D));
// Nombre de los archivos
string nameDD = "DDiso.dat", nameRR = "RRiso.dat", nameDR = "DRiso.dat";
// Asignamos memoria para los histogramas
DD = new double*[bn];
RR = new double*[bn];
DR = new double*[bn];
cudaMallocManaged(&DD_A, bn*sizeof(float*));
cudaMallocManaged(&RR_A, bn*sizeof(float*));
cudaMallocManaged(&DR_A, bn*sizeof(float*));
cudaMallocManaged(&DD_B, bn*sizeof(float*));
cudaMallocManaged(&RR_B, bn*sizeof(float*));
cudaMallocManaged(&DR_B, bn*sizeof(float*));
for (int i=0; i<bn; ++i){
*(DD+i) = new double[bn];
*(RR+i) = new double[bn];
*(DR+i) = new double[bn];
cudaMallocManaged(&*(DD_A+i), bn*sizeof(float));
cudaMallocManaged(&*(RR_A+i), bn*sizeof(float));
cudaMallocManaged(&*(DR_A+i), bn*sizeof(float));
cudaMallocManaged(&*(DD_B+i), bn*sizeof(float));
cudaMallocManaged(&*(RR_B+i), bn*sizeof(float));
cudaMallocManaged(&*(DR_B+i), bn*sizeof(float));
}
//Inicializar en 0 los histogramas
for (int i = 0; i<bn; i++){
for (int j=0; j<bn; j++){
DD[i][j] = 0.0;
RR[i][j] = 0.0;
DR[i][j] = 0.0;
DD_A[i][j] = 0.0;
RR_A[i][j] = 0.0;
DR_A[i][j] = 0.0;
DD_B[i][j] = 0.0;
RR_B[i][j] = 0.0;
DR_B[i][j] = 0.0;
}
}
// Abrimos y guardamos los datos en los en los arrays correspondientes
open_files(argv[1], np, dataD);
open_files(argv[2], np, dataR);
//Iniciar los nodos.
Node ***nodeD;
Node ***nodeR;
cudaMallocManaged(&nodeR, partitions*sizeof(Node**));
cudaMallocManaged(&nodeD, partitions*sizeof(Node**));
for (int i=0; i<partitions; i++){
cudaMallocManaged(&*(nodeR+i), partitions*sizeof(Node*));
cudaMallocManaged(&*(nodeD+i), partitions*sizeof(Node*));
for (int j=0; j<partitions; j++){
cudaMallocManaged(&*(*(nodeR+i)+j), partitions*sizeof(Node));
cudaMallocManaged(&*(*(nodeD+i)+j), partitions*sizeof(Node));
}
}
//Clasificar los puntos en los nodos
make_nodos(nodeD, dataD, partitions, size_node, np);
make_nodos(nodeR, dataR, partitions, size_node, np);
int blocks = (int)(ceil((float)((partitions*partitions*partitions)/(float)(1024))));
dim3 grid(blocks,1,1);
dim3 block(1024,1,1);
clock_t begin = clock();
make_histoXX<<<grid,block>>>(DD_A, DD_B, nodeD, partitions, ds, dd_max, did_max, did_max2);
make_histoXX<<<grid,block>>>(RR_A, RR_B, nodeR, partitions, ds, dd_max, did_max, did_max2);
make_histoXY<<<grid,block>>>(DR_A, DR_B, nodeD, nodeR, partitions, ds, dd_max, did_max, did_max2);
//Waits for the GPU to finish
cudaDeviceSynchronize();
//Check here for errors
cudaError_t error = cudaGetLastError();
cout << "The error code is " << error << endl;
if(error != 0)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nTiempo en CPU usado = %.4f seg.\n", time_spent );
for (int i = 0; i < bn; i++){
for (int j = 0; j < bn; j++){
DD[i][j] = (double)(DD_A[i][j]+ DD_B[i][j]);
RR[i][j] = (double)(RR_A[i][j]+ RR_B[i][j]);
DR[i][j] = (double)(DR_A[i][j]+ DR_B[i][j]);
}
}
cout << "Termine de hacer todos los histogramas" << endl;
// Mostramos los histogramas
cout << "\nHistograma DD:" << endl;
int sum = 0;
for (int i = 0; i<bn; i++){
for (int k = 0; k<bn; k++){
cout << DD[i][k] << "\t";
sum += DD[i][k];
}
cout << "\n";
}
cout << "Total: " << sum << endl;
cout << "\nHistograma RR:" << endl;
for (int i = 0; i<bn; i++){
for (int k = 0; k<bn; k++){
cout << RR[i][k] << "\t";
sum += RR[i][k];
}
}
cout << "\nHistograma DR:" << endl;
for (int i = 0; i<bn; i++){
for (int k = 0; k<bn; k++){
cout << DR[i][k] << "\t";
sum += DR[i][k];
}
cout << "\n";
}
// Guardamos los histogramas
//save_histogram(nameDD, bn, DD);
cout << "Guarde histograma DD..." << endl;
//save_histogram(nameRR, bn, RR);
cout << "Guarde histograma RR..." << endl;
//save_histogram(nameDR, bn, DR);
cout << "Guarde histograma DR..." << endl;
cudaFree(&dataD);
cudaFree(&dataR);
for (int i=0; i<bn; ++i){
delete[] *(DD+i);
delete[] *(RR+i);
delete[] *(DR+i);
}
delete[] DD;
delete[] DR;
delete[] RR;
for (int i=0; i<bn; ++i){
cudaFree(&*(DD_A+i));
cudaFree(&*(RR_A+i));
cudaFree(&*(DR_A+i));
cudaFree(&*(DD_B+i));
cudaFree(&*(RR_B+i));
cudaFree(&*(DR_B+i));
}
cudaFree(&DD_A);
cudaFree(&RR_A);
cudaFree(&DR_A);
cudaFree(&DD_B);
cudaFree(&RR_B);
cudaFree(&DR_B);
for (int i=0; i<partitions; i++){
for (int j=0; j<partitions; j++){
cudaFree(&*(*(nodeR+i)+j));
cudaFree(&*(*(nodeD+i)+j));
}
cudaFree(&*(nodeR+i));
cudaFree(&*(nodeD+i));
}
cudaFree(&nodeR);
cudaFree(&nodeD);
cout << "Programa Terminado..." << endl;
return 0;
}
|
12,992 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
__global__ void kernel( int *a, int dimx, int dimy ) {
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
a[idx] = a[idx]+1;
}
int main() {
int dimx = 16, dimy = 16;
int num_bytes = dimx*dimy*sizeof(int);
int *d_a=0, *h_a=0; // device and host pointers h_a = (int*)malloc(num_bytes);
cudaMalloc( (void**)&d_a, num_bytes );
if( 0==h_a || 0==d_a ) {
printf("couldn't allocate memory\n"); return 1;
}
cudaMemset( d_a, 0, num_bytes );
dim3 grid, block;
block.x = 4; block.y = 4;
grid.x = dimx / block.x;
grid.y = dimy / block.y;
kernel<<<grid, block>>>( d_a, dimx, dimy );
cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
free( h_a ); cudaFree( d_a );
return 0;
} |
12,993 | #include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#include <time.h>
#define X 1
#define O -1
#define BLANK 0
#define NUM_THREAD 500
#define RUNS_PER_THREAD 20
#define NUM_BLOCKS 20
#define NUM_SEQ_LOOPS 200000
//When a subBoard is won, it should be filled with that mark. (All X or all O) This is needed as a speed optimization
__device__ __host__ void printSquare(int x){
switch(x)
{
case X:
printf("X");
break;
case O:
printf("O");
break;
case BLANK:
printf("_");
break;
}
}
bool printSquareWithNumber(int x, int num){
switch(x)
{
case X:
printf("XX");
return false;
case O:
printf("OO");
return false;
case BLANK:
printf("%d", num);
if(num<=9) { printf(" ");}
return true;
}
return false;
}
void PrintBoardWithNumbers(int* board){
int x,y,j,i, num;
num = 0;
for(x = 0; x <3; x++)
{
printf("_____________\n");
for(i = 0; i < 3; i++)
{
printf("|");
for(y = 0; y < 3; y++)
{
for(j = 0; j< 3; j++)
{
if(printSquareWithNumber(board[(3*x+y)*9+(3*i+j)],num))
{
num++;
}
}
printf("|");
}
printf("\n");
}
}
printf("_____________\n");
}
void PrintBoard(int* board){
int x,y,j,i;
for(x = 0; x <3; x++)
{
printf("_____________\n");
for(i = 0; i < 3; i++)
{
printf("|");
for(y = 0; y < 3; y++)
{
for(j = 0; j< 3; j++)
{
printSquare(board[(3*x+y)*9+(3*i+j)]);
}
printf("|");
}
printf("\n");
}
}
printf("_____________\n");
}
void PrintSubBoardWithNumbers(int* subBoard){
int x,y, num;
num = 0;
for(x = 0; x < 3; x++)
{
printf("|");
for(y = 0; y < 3; y++)
{
if(printSquareWithNumber(subBoard[3*x+y],num))
{
num++;
}
}
printf("|\n");
}
}
void PrintSubBoard(int* subBoard){
int x,y;
for(x = 0; x < 3; x++)
{
printf("|");
for(y = 0; y < 3; y++)
{
printSquare(subBoard[3*x+y]);
}
printf("|\n");
}
}
__device__ __host__ int SubBoardWinner(int* subBoard){
int i, total;
//left to right wins
for(i = 0; i < 3; i++)
{
total = subBoard[3*i] + subBoard[3*i +1 ] + subBoard[3*i+2];
// printf("total: %d\n",total);
if(abs(total) == 3)
{
return (total/3);
}
}
//up to down
for(i = 0; i < 3; i++)
{
total = subBoard[i] + subBoard[3+i] + subBoard[6+i];
// printf("total: %d\n",total);
if(abs(total) == 3)
{
return (total/3);
}
}
//Diagonals
total = subBoard[0] + subBoard[4] + subBoard[8];
//printf("total: %d\n",total);
if(abs(total) == 3)
{
return (total/3);
}
total = subBoard[2] + subBoard[4] + subBoard[6];
// printf("total: %d\n",total);
if(abs(total) == 3)
{
return (total/3);
}
return 0;
}
int SubBoardWinner(double* subBoard){
int i, total;
//left to right wins
for(i = 0; i < 3; i++)
{
total = subBoard[3*i] + subBoard[3*i +1 ] + subBoard[3*i+2];
if(abs(total) == 3)
{
return (total/3);
}
}
//up to down
for(i = 0; i < 3; i++)
{
total = subBoard[i] + subBoard[ 3+i ] + subBoard[6+i];
if(abs(total) == 3)
{
return (total/3);
}
}
//Diagonals
total = subBoard[0] + subBoard[4] + subBoard[8];
if(abs(total) == 3)
{
return (total/3);
}
total = subBoard[2] + subBoard[4] + subBoard[6];
if(abs(total) == 3)
{
return (total/3);
}
return 0;
}
__device__ __host__ int BoardWinner(int* board){
int i,metaBoard[9];
for(i = 0; i < 9; i++)
{
metaBoard[i] = SubBoardWinner(board+(i*9));
}
return SubBoardWinner(metaBoard);
}
__device__ __host__ bool IsSubBoardFull(int* subBoard){
int i;
for(i = 0; i < 9; i++)
{
if(subBoard[i] == 0)
{
return false;
}
}
return true;
}
__device__ __host__ bool IsSubBoardFull(double* subBoard){
int i;
for(i = 0; i < 9; i++)
{
if(subBoard[i] != 1 || subBoard[i] != -1)
{
return false;
}
}
return true;
}
__device__ __host__ bool IsBoardFull(int* board){
for(int i = 0; i < 81; i++)
{
if(board[i] == BLANK)
{
return false;
}
}
return true;
}
__device__ __host__ int NumberOfFreeSquaresInFullBoard(int* board){
int i, count = 0;
for(i = 0; i < 81; i++)
{
if(board[i] == 0)
{
count++;
}
}
return count;
}
__device__ __host__ int NumberOfFreeSquaresInSubBoard(int* subBoard){
int i, count = 0;
for(i = 0; i < 9; i++)
{
if(subBoard[i] == 0)
{
count++;
}
}
return count;
}
__device__ __host__ int NumberOfPossibleMoves(int* board, int lastMove, bool fullBoard){
int subBoard = lastMove % 9;
if(fullBoard)
{
return NumberOfFreeSquaresInFullBoard(board);
}
else
{
return NumberOfFreeSquaresInSubBoard(board + 9*subBoard);
}
}
int DoEvalRow(int a, int b, int c){
int count = 0;
int sum = a + b + c;
if(a != 0) {count++;}
if(b != 0) {count++;}
if(c != 0) {count++;}
return sum * count;
}
double DoEvalRow(double a, double b, double c){
int count = 0;
double sum = a + b + c;
if(sum > 0)
{
if(a > 0) {count++;}
if(b > 0) {count++;}
if(c > 0) {count++;}
}
else
{
if(a < 0) {count++;}
if(b < 0) {count++;}
if(c < 0) {count++;}
}
return sum * (double)count;
}
int EvalRow(int a, int b, int c){
if( a >= 0 && b >= 0 && c >= 0)
{
return DoEvalRow(a,b,c);
}
else if( a <= 0 && b <= 0 && c <= 0)
{
return DoEvalRow(a,b,c);
}
else
{
return 0;
}
}
double EvalSubBoard(int* subBoard){
double sum = 0;
int winner = SubBoardWinner(subBoard);
switch (winner)
{
case BLANK:
if(IsSubBoardFull(subBoard))
{
return 0;
}
sum += EvalRow(subBoard[0],subBoard[1],subBoard[2]);
sum += EvalRow(subBoard[3],subBoard[4],subBoard[5]);
sum += EvalRow(subBoard[6],subBoard[7],subBoard[8]);
sum += EvalRow(subBoard[0],subBoard[3],subBoard[6]);
sum += EvalRow(subBoard[1],subBoard[4],subBoard[7]);
sum += EvalRow(subBoard[2],subBoard[5],subBoard[8]);
sum += EvalRow(subBoard[0],subBoard[4],subBoard[8]);
sum += EvalRow(subBoard[2],subBoard[4],subBoard[6]);
sum /= 21;
break;
case X:
sum = 1;
break;
case O:
sum = -1;
break;
}
return sum;
}
double EvalMetaRow(double a, double b, double c){
if( a > -1 && b > -1 && c > -1)
{
return DoEvalRow(a,b,c);
}
else if( a < 1 && b < 1 && c < 1)
{
return DoEvalRow(a,b,c);
}
else
{
return 0;
}
}
double EvalMetaBoard(double* subBoard){
double sum = 0;
int winner = SubBoardWinner(subBoard);
switch (winner)
{
case BLANK:
if(IsSubBoardFull(subBoard))
{
return 0;
}
sum += EvalMetaRow(subBoard[0],subBoard[1],subBoard[2]);
sum += EvalMetaRow(subBoard[3],subBoard[4],subBoard[5]);
sum += EvalMetaRow(subBoard[6],subBoard[7],subBoard[8]);
sum += EvalMetaRow(subBoard[0],subBoard[3],subBoard[6]);
sum += EvalMetaRow(subBoard[1],subBoard[4],subBoard[7]);
sum += EvalMetaRow(subBoard[2],subBoard[5],subBoard[8]);
sum += EvalMetaRow(subBoard[0],subBoard[4],subBoard[8]);
sum += EvalMetaRow(subBoard[2],subBoard[4],subBoard[6]);
break;
case X:
sum = 21;
break;
case O:
sum = -21;
break;
}
return sum;
}
double EvalFullBoard(int* board){
int i;
double metaBoard[9];
int winner = BoardWinner(board);
switch(winner)
{
case BLANK:
if(IsBoardFull(board))
{
return 0;
}
for(i = 0; i < 9; i++)
{
metaBoard[i] = EvalSubBoard(board + 9*i);
}
return EvalMetaBoard(metaBoard);
case X:
return (double)21;
case O:
return (double)-21;
}
return 98;
}
__device__ int EvalFullBoardKenel(int* board){
switch(BoardWinner(board))
{
case X:
return 1;
case O:
return -1;
case BLANK:
return 0;
}
return 0;
}
__device__ __host__ int PlaceMoveinSubBoard(int* board, int lastMove, int placement, int mark){
int subBoard, freeSquares, i;
subBoard = lastMove % 9;
freeSquares = 0;
for(i = 0; i < 9; i++)
{
if(board[subBoard* 9 + i] == 0)
{
if(freeSquares == placement)
{
board[subBoard* 9 + i] = mark;
freeSquares = i;
break;
}
freeSquares++;
}
}
if( SubBoardWinner(board + subBoard * 9 ) != 0 )
{
for(i = 0; i < 9; i++)
{
board[subBoard* 9 + i] = mark;
}
}
return subBoard * 9 + freeSquares;
}
__device__ __host__ int PlaceMarkinNthFree(int* board, int lastMove, int placement, int mark){
int subBoard, freeSquares, i;
freeSquares = 0;
for(i = 0; i < 81; i++)
{
if(board[i] == 0)
{
if(freeSquares == placement)
{
board[i] = mark;
freeSquares = i;
break;
}
freeSquares++;
}
}
subBoard = i / 9;
if( SubBoardWinner(board + subBoard * 9 ) != 0 )
{
for(i = 0; i < 9; i++)
{
board[subBoard* 9 + i] = mark;
}
}
return subBoard * 9 + freeSquares;
}
int playRandomMove(int* board, int lastMove, int mark){
int subBoard = lastMove%9;
bool fullBoard = SubBoardWinner(board+9*subBoard) != 0 || IsSubBoardFull(board+subBoard*9) ;
int numOfMoves = NumberOfPossibleMoves(board, lastMove, fullBoard);
int index = rand() % (numOfMoves);
if(fullBoard)
{
return PlaceMarkinNthFree(board, lastMove, index, mark);
}
else
{
return PlaceMoveinSubBoard(board, lastMove, index, mark);
}
}
__device__ int playRandomMove(int* board, int lastMove, int mark, curandState_t state){
int subBoard = lastMove%9;
bool fullBoard = SubBoardWinner(board+9*subBoard) != 0 || IsSubBoardFull(board+subBoard*9) ;
int numOfMoves = NumberOfPossibleMoves(board, lastMove, fullBoard);
int index = curand(&state) % (numOfMoves);
if(fullBoard)
{
return PlaceMarkinNthFree(board, lastMove, index, mark);
}
else
{
return PlaceMoveinSubBoard(board, lastMove, index, mark);
}
}
int MonteCarlo(int* board, int lastMove, int mark, int numRuns){
int fakeBoard[81];
int fakeLastMove;
int fakeMark;
int subBoard = lastMove%9;
bool fullBoard = SubBoardWinner(board+9*subBoard) != 0 || IsSubBoardFull(board+subBoard*9) ;
int numOfMoves = NumberOfPossibleMoves(board, lastMove, fullBoard);
double score [70];
for(int i = 0; i < 70; i++)
{
score[i] = 0;
}
for(int i = 0; i < numRuns; i++)
{
for(int j = 0; j < 81; j++)
{
fakeBoard[j] = board[j];
fakeLastMove = lastMove;
}
int index = i % (numOfMoves);
fakeMark = mark;
if(BoardWinner(fakeBoard) == 0 && !IsBoardFull(fakeBoard)){
if(fullBoard)
{
fakeLastMove = PlaceMarkinNthFree(fakeBoard, fakeLastMove, index, fakeMark);
}
else
{
fakeLastMove = PlaceMoveinSubBoard(fakeBoard, fakeLastMove, index, fakeMark);
}
fakeMark = fakeMark * -1;
while(BoardWinner(fakeBoard) == 0 && !IsBoardFull(fakeBoard))
{
fakeLastMove = playRandomMove(fakeBoard, fakeLastMove, fakeMark);
fakeMark = -1 * fakeMark;
}
}
score[i % numOfMoves] = EvalFullBoard(fakeBoard) + score[i % numOfMoves];
}
int winningIndex = 0;
if(mark == X)
{
double max = score[0];
for(int i = 0; i < numOfMoves; i++)
{
if(score[i] > max)
{
winningIndex = i;
max = score[i];
}
}
}
else
{
double min = score[0];
for(int i = 0; i < numOfMoves; i++)
{
if(score[i] < min)
{
winningIndex = i;
min = score[i];
}
}
}
if(fullBoard)
{
return PlaceMarkinNthFree(board, lastMove, winningIndex, mark);
}
else
{
return PlaceMoveinSubBoard(board, lastMove, winningIndex, mark);
}
}
__global__ void MonteCarloKernel(int* board, int* lastMove, int* mark, bool* fullBoard, int* numOfMoves, int* score, int Runs){
extern __shared__ int shared[];
int tId = threadIdx.x + (blockIdx.x * blockDim.x);
int thread = threadIdx.x;
curandState_t state;
curand_init((unsigned long long)clock() + tId, 0, 0, &state);
int o_board[81];
int fakeBoard[81];
int fakeLastMove;
int fakeMark;
if(thread < *numOfMoves)
{
shared[thread] = 0;
}
for(int j = 0; j < 81; j++)
{
o_board[j] = board[j];
}
__syncthreads();
//offset by tID to reduce collisions on the scores
for(int i = 0+tId; i < Runs +tId; i++)
{
//reset the board in local mem
for(int j = 0; j < 81; j++)
{
fakeBoard[j] = o_board[j];
}
int index = i % (*numOfMoves);
fakeMark = *mark;
fakeLastMove = *lastMove;
if(BoardWinner(fakeBoard) == 0 && !IsBoardFull(fakeBoard)){
if(*fullBoard)
{
fakeLastMove = PlaceMarkinNthFree(fakeBoard, fakeLastMove, index, fakeMark);
}
else
{
fakeLastMove = PlaceMoveinSubBoard(fakeBoard, fakeLastMove, index, fakeMark);
}
fakeMark = fakeMark * -1;
while(BoardWinner(fakeBoard) == 0 && !IsBoardFull(fakeBoard))
{
fakeLastMove = playRandomMove(fakeBoard, fakeLastMove, fakeMark, state);
fakeMark = -1 * fakeMark;
}
}
atomicAdd(&shared[i%(*numOfMoves)], EvalFullBoardKenel(fakeBoard));
}
__syncthreads();
if(thread < *numOfMoves)
{
atomicAdd(&score[thread], shared[thread]);
}
}
int ParMonteCarlo(int* board, int lastMove, int mark, int Runs)
{
int *d_board, *d_score ,*d_numOfMoves, *d_mark, *d_lastMove;
bool *d_fullBoard;
int score[70];
memset(score, 0, sizeof(int) * 70);
int subBoard = lastMove%9;
bool fullBoard = SubBoardWinner(board+9*subBoard) != 0 || IsSubBoardFull(board+subBoard*9);
int numOfMoves = NumberOfPossibleMoves(board, lastMove, fullBoard);
cudaMalloc(&d_board, sizeof(int) * 81);
cudaMalloc(&d_score, sizeof(int) * 70);
cudaMalloc(&d_mark ,sizeof(int));
cudaMalloc(&d_lastMove ,sizeof(int));
cudaMalloc(&d_numOfMoves ,sizeof(int));
cudaMalloc(&d_fullBoard ,sizeof(bool));
cudaMemset(d_score, 0, sizeof(int) * 70);
cudaMemcpy(d_board, board, sizeof(int) *81, cudaMemcpyHostToDevice);
cudaMemcpy(d_mark,&mark,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_numOfMoves,&numOfMoves,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_lastMove,&lastMove,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_fullBoard,&fullBoard,sizeof(bool),cudaMemcpyHostToDevice);
MonteCarloKernel<<<NUM_BLOCKS,NUM_THREAD, sizeof(int) * 70>>>(d_board, d_lastMove, d_mark, d_fullBoard, d_numOfMoves, d_score, Runs);
cudaDeviceSynchronize();
cudaMemcpy(score, d_score, sizeof(int)*70, cudaMemcpyDeviceToHost);
int winningIndex = 0;
if(mark == X)
{
double max = score[0];
for(int i = 0; i < 70; i++)
{
if(score[i] > max)
{
winningIndex = i;
max = score[i];
}
}
}
else
{
double min = score[0];
for(int i = 0; i < 70; i++)
{
if(score[i] < min)
{
winningIndex = i;
min = score[i];
}
}
}
cudaFree(d_board);
cudaFree(d_score);
cudaFree(d_mark);
cudaFree(d_numOfMoves);
cudaFree(d_lastMove);
cudaFree(d_fullBoard);
if(fullBoard)
{
return PlaceMarkinNthFree(board, lastMove, winningIndex, mark);
}
else
{
return PlaceMoveinSubBoard(board, lastMove, winningIndex, mark);
}
}
int main()
{
clock_t start;
clock_t diff;
clock_t end;
clock_t ParTime = 0;
clock_t SeqTime = 0;
int Xwin = 0;
int Ywin = 0;
srand(time(NULL));
for(int i = 0; i < 10; i++)
{
ParTime = 0;
SeqTime = 0;
int board[81];
memset(board, BLANK, sizeof(int)*81);
int lastMove = 0;
int mark = 1;
bool test= true;
while(BoardWinner(board) == 0 && !IsBoardFull(board) )
{
if(test)
{
printf("Monte Carlo Turn in Parallel\n");
start = clock();
lastMove = ParMonteCarlo(board, lastMove, mark, RUNS_PER_THREAD);
end = clock();
diff =end -start;
ParTime += diff;
printf("Par Time: %d\n", diff);
}
else
{
printf("Monte Carlo Turn in Sequence\n");
start = clock();
lastMove = MonteCarlo(board, lastMove, mark, NUM_SEQ_LOOPS);
end = clock();
diff = end-start;
SeqTime += diff;
printf("Seq Time: %d\n", diff);
}
mark = mark * -1;
test = !test;
PrintBoard(board);
}
if(BoardWinner(board) == X)
{
Xwin++;
}
else if (BoardWinner(board)== O)
{
Ywin++;
}
printf("Parallel Time Total %d, Seq Time Total: %d\n",ParTime, SeqTime );
printf("BoardWinner: ");
printSquare(BoardWinner(board));
printf("\n");
}
printf("X won %d times\n out of 10\n",Xwin);
printf("O won %d times\n out of 10\n",Ywin);
return 0;
}
|
12,994 | #include "includes.h"
__global__ void BFS_kernel_one_block_spill( volatile unsigned int *frontier, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, const unsigned int max_local_mem)
{
extern volatile __shared__ unsigned int s_mem[];
//block queues
unsigned int *b_q=(unsigned int *)&s_mem[0];
unsigned int *b_q2=(unsigned int *)&s_mem[max_local_mem];
volatile __shared__ unsigned int b_offset[1];
volatile __shared__ unsigned int b_q_length[1];
//get the threadId
unsigned int tid=threadIdx.x;
//copy frontier queue from global queue to local block queue
if(tid<frontier_len)
{
b_q[tid]=frontier[tid];
}
unsigned int f_len=frontier_len;
while(1)
{
//Initialize the block queue size to 0
if(tid==0)
{
b_q_length[0]=0;
b_offset[0]=0;
}
__syncthreads();
if(tid<f_len)
{
//get the nodes to traverse from block queue
unsigned int node_to_process=*(volatile unsigned int *)&b_q[tid];
//remove from frontier
visited[node_to_process]=0;
//get the offsets of the vertex in the edge list
unsigned int offset = edgeArray[node_to_process];
unsigned int next = edgeArray[node_to_process+1];
//Iterate through the neighbors of the vertex
while(offset<next)
{
//get neighbor
unsigned int nid=edgeArrayAux[offset];
//get its cost
unsigned int v=atomicMin((unsigned int *)&cost[nid],
cost[node_to_process]+1);
//if cost is less than previously set add to frontier
if(v>cost[node_to_process]+1)
{
int is_in_frontier=atomicExch((int *)&visited[nid],1);
//if node already in frontier do nothing
if(is_in_frontier==0)
{
//increment the warp queue size
unsigned int t=
atomicAdd((unsigned int *)&b_q_length[0],1);
if(t< max_local_mem)
{
b_q2[t]=nid;
}
//write to global memory if shared memory full
else
{
int off=atomicAdd((unsigned int *)&b_offset[0],
1);
frontier[off]=nid;
}
}
}
offset++;
}
}
__syncthreads();
if(tid<max_local_mem)
b_q[tid]=*(volatile unsigned int *)&b_q2[tid];
__syncthreads();
//Traversal complete exit
if(b_q_length[0]==0)
{
if(tid==0)
frontier_length[0]=0;
return;
}
// If frontier exceeds one block in size copy warp queues to
//global frontier queue and exit
else if( b_q_length[0] > blockDim.x || b_q_length[0] > max_local_mem)
{
if(tid<(b_q_length[0]-b_offset[0]))
frontier[b_offset[0]+tid]= *(volatile unsigned int *)&b_q[tid];
if(tid==0)
{
frontier_length[0] = b_q_length[0];
}
return;
}
f_len=b_q_length[0];
__syncthreads();
}
} |
12,995 | #include <sys/time.h>
#include <cuda.h>
#include <stdio.h>
#include <cuda_runtime_api.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
//#include <cuda.h>
//#include <helper_cuda.h>
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
for (int i =0;i<nx;i++){
for(int j=0;j<ny;j++){
C[i*ny+j] = A[i*ny+j]+B[i*ny+j];
}
}
return;
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
int ix = threadIdx.x + blockIdx.x*(blockDim.x) ;
int iy = threadIdx.y + blockIdx.y*(blockDim.y) ;
int idx = ix*ny + iy ; //iy*ny + ix previously with <= instead of =
//printf("Thread %d %d\n",ix,iy);
if( (ix<nx) && (iy<ny) ){
C[idx] = A[idx] + B[idx] ;
//printf("Thread %d %d\n",ix,iy);
}
}
void initData(float *M, int x, int y, int width, int flag ){ //remove and put it in main assigining values in a single lool
if(flag)
{
printf("A\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*width+j] = (float)(i+j)/3.0;
//printf("%f ",M[i*y+j]);
}
//printf("\n");
}
}
else
{
printf("B\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*width+j] = (float)3.14*(i+j) ;
//printf("%f ",M[i*y+j]);
}
//printf("\n");
}
}
}
int main( int argc, char *argv[] ) {
// get program arguments
if (argc!=3){
printf("Fail");
exit(1);
//printf("Fail");
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
int mx=0,my=0;
//if((nx%16) != 0){
// mx = 0;
// //mx = 16 - (nx%16);
//}
if((ny%1024) != 0){
my = 1024 - (ny%1024);
}
int noElems = (nx+mx)*(ny+my) ;
printf ("%d %d %d %d \n",(nx*ny),(noElems),mx,my);
int bytes = noElems * sizeof(float) ;
// but you may want to pad the matrices…
// alloc memory host-side
//float *h_A = (float *) malloc( bytes ) ;
//float *h_B = (float *) malloc( bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
//float *h_dC = (float *) malloc( bytes ) ; // gpu result
// init matrices with random data
//initData(h_A,nx,ny,1); initData(h_B,nx,ny,0);
// alloc memory dev-side
float *d_A, *d_B, *d_C ;
cudaMalloc( (void **) &d_A, bytes ) ;
cudaMalloc( (void **) &d_B, bytes ) ;
cudaMalloc( (void **) &d_C, bytes ) ;
float *h_Ap, *h_Bp, *h_dCp;
cudaMallocHost( (float **) &h_Ap, bytes ) ;
cudaMallocHost( (float **) &h_Bp, bytes ) ;
cudaMemset(h_Ap,0,bytes);
cudaMemset(h_Bp,0,bytes);
initData(h_Ap,nx,ny,ny+my,1); initData(h_Bp,nx,ny,ny+my,0);
for(int i=0;i<(nx+mx);i++){
for(int j=0;j<(ny+my);j++){
//if(h_hC[i*(ny+my)+j] != h_dCp[i*(ny+my)+j])
//printf("%d ",j);
//printf("%f %f %d %d\n",h_Ap[i*(ny+my)+j],h_Bp[i*(ny+my)+j],i,j);
//if(h_hC[i*(ny+my)+j] != h_dCp[i*(ny+my)+j])
// flag=1;
}
//printf("\n");
}
cudaMallocHost( (float **) &h_dCp, bytes ) ;
cudaMemset(h_dCp,0,bytes);
double timeStampA = getTimeStamp() ;
//transfer data to dev
cudaMemcpy( d_A, h_Ap, bytes, cudaMemcpyHostToDevice ) ;
cudaMemcpy( d_B, h_Bp, bytes, cudaMemcpyHostToDevice ) ;
// note that the transfers would be twice as fast if h_A and h_B
// matrices are pinned
double timeStampB = getTimeStamp() ;
// invoke Kernel
dim3 block( 1, 1024) ; // you will want to configure this
dim3 grid( (nx+block.x-1)/block.x, (ny+my)/block.y) ; //(ny+block.y-1)/block.y)
printf("Grid %d %d \n",(nx+mx)/block.x,(ny+my)/block.y);
f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny+my ) ;
cudaDeviceSynchronize() ;
double timeStampC = getTimeStamp() ;
//copy data back
cudaMemcpyAsync(h_dCp, d_C, bytes, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize() ;
//learn how to comment and uncomment in one go
/*
printf("C\n");
for(int i=0;i<nx;i++){
for(int j=0;j<ny;j++){
//printf("%f ",h_dC[i*ny+j]);
}
//printf("\n");
}
*/
double timeStampD = getTimeStamp() ;
//for(int i=0; i<nx; i++){
// for(int j=0; j<ny; j++){
// printf("%f ",h_dC[i*ny+j]);
// }
// printf("\n");
//}
// free GPU resources
cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ;
//cudaFreeHost(h_Ap); cudaFreeHost(h_Bp);
//cudaFreeHost(h_dCp);
//cudaDeviceReset() ;
// check result
printf("%f %f %f %f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC));
h_addmat( h_Ap, h_Bp, h_hC, nx+mx, ny+my ) ;
int flag = 0;
for(int i=0;i<(nx+mx);i++){
for(int j=0;j<(ny+my);j++){
//if(h_hC[i*(ny+my)+j] != h_dCp[i*(ny+my)+j])
//printf("%d ",j);
//printf("%f %f %d %d\n",h_hC[i*(ny+my)+j],h_dCp[i*(ny+my)+j],i,j);
if(h_hC[i*(ny+my)+j] != h_dCp[i*(ny+my)+j])
flag=1;
}
//printf("\n");
}
cudaFreeHost(h_Ap); cudaFreeHost(h_Bp); cudaFreeHost(h_dCp);
free(h_hC);
cudaDeviceReset() ;
printf("\n %d \n",flag);
}
|
12,996 | #include <iostream>
#include <random>
#include <functional>
#include <chrono>
#include <unistd.h>
#ifndef BUF_KIND
#define BUF_KIND 0
#endif
#if(BUF_KIND == 0)
#define BUF_TYPE float
#else
#define BUF_TYPE double
#endif
#define CudaWrap(EXP) \
{ \
auto ret = EXP; \
if (ret != cudaSuccess) { \
std::cerr << "Error! " << cudaGetErrorString(ret) << " (" << ret << ")" << std::endl; \
return 1; \
}\
}
__global__
void addKernel(const BUF_TYPE* A, BUF_TYPE* B, size_t size) {
size_t i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < size) {
B[i] = A[i] + B[i];
}
}
int main() {
// Initialize random number generator
std::mt19937_64 generator;
generator.seed(42);
std::uniform_real_distribution<BUF_TYPE> distribution(-1., 1.);
auto gen = std::bind(distribution, generator);
// Initialize arrays
size_t num_gen = (1<<(30))/sizeof(BUF_TYPE);
// Allocate arrays.
BUF_TYPE* array1 = new BUF_TYPE[num_gen];
BUF_TYPE* array2 = new BUF_TYPE[num_gen];
// Allocate Device arrays.
BUF_TYPE* dev_1 = nullptr;
BUF_TYPE* dev_2 = nullptr;
CudaWrap(cudaMalloc(&dev_1, num_gen*sizeof(BUF_TYPE)));
CudaWrap(cudaMalloc(&dev_2, num_gen*sizeof(BUF_TYPE)));
// Fill arrays with values
for(size_t i=0; i < num_gen; ++i) {
array1[i] = gen();
array2[i] = gen();
}
// Copy data to Device
cudaMemcpy(dev_1, array1, num_gen*sizeof(BUF_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(dev_2, array2, num_gen*sizeof(BUF_TYPE), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto start = std::chrono::high_resolution_clock::now();
// Compute on Device
int blockSize = 256;
int numBlocks = (num_gen+blockSize-1)/(blockSize);
addKernel<<<numBlocks,blockSize>>>(array1, array2, num_gen);
// Copy data out of Device
cudaMemcpy(array2, dev_2, num_gen*sizeof(BUF_TYPE), cudaMemcpyDeviceToHost);
// Wait for operations to finish.
cudaDeviceSynchronize();
auto stop = std::chrono::high_resolution_clock::now();
// Do something with the arrays so the addition isn't optimized out.
BUF_TYPE sum = 0.;
for(size_t i=0; i < num_gen; ++i) {
sum += array2[i];
}
std::cout << sum << std::endl;
std::cout << std::hexfloat;
std::cout << sum << std::endl;
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop-start);
std::cout << "Took " << duration.count() << " milliseconds" << std::endl;
cudaFree(dev_1);
cudaFree(dev_2);
delete [] array1;
delete [] array2;
return 0;
}
|
12,997 | #include <cstdio>
int main(void)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("-gencode arch=compute_%d%d,code=sm_%d%d\n",
prop.major, prop.minor, prop.major, prop.minor);
return 0;
}
|
12,998 | #include "includes.h"
#define max(a, b) ((a > b)?a:b)
#define THREADSPERDIM 16
#define FALSE 0
#define TRUE !FALSE
// mX has order rows x cols
// vectY has length rows
// mX has order rows x cols
// vectY has length rows
__global__ void getRestricted(int countx, int county, int rows, int cols, float * mX, int mXdim, float * vY, int vYdim, float * mQ, int mQdim, float * mR, int mRdim, float * vectB, int vectBdim) {
int
m = blockIdx.x * THREADSPERDIM + threadIdx.x, n,
i, j, k;
float
sum, invnorm,
* X, * Y, * Q, * R, * B,
* coli, * colj,
* colQ, * colX;
if(m >= county) return;
if(m == 1) n = 0;
else n = 1;
X = mX + (m * mXdim);
// initialize the intercepts
for(i = 0; i < rows; i++)
X[i] = 1.f;
Y = vY + (m * countx + n) * vYdim;
B = vectB + m * vectBdim;
Q = mQ + m * mQdim;
R = mR + m * mRdim;
// initialize Q with X ...
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++)
Q[i+j*rows] = X[i+j*rows];
}
// gramm-schmidt process to find Q
for(j = 0; j < cols; j++) {
colj = Q+rows*j;
for(i = 0; i < j; i++) {
coli = Q+rows*i;
sum = 0.f;
for(k = 0; k < rows; k++)
sum += coli[k] * colj[k];
for(k = 0; k < rows; k++)
colj[k] -= sum * coli[k];
}
sum = 0.f;
for(i = 0; i < rows; i++)
sum += colj[i] * colj[i];
invnorm = 1.f / sqrtf(sum);
for(i = 0; i < rows; i++)
colj[i] *= invnorm;
}
for(i = cols-1; i > -1; i--) {
colQ = Q+i*rows;
// matmult Q * X -> R
for(j = 0; j < cols; j++) {
colX = X+j*rows;
sum = 0.f;
for(k = 0; k < rows; k++)
sum += colQ[k] * colX[k];
R[i+j*cols] = sum;
}
sum = 0.f;
// compute the vector Q^t * Y -> B
for(j = 0; j < rows; j++)
sum += colQ[j] * Y[j];
// back substitution to find the x for Rx = B
for(j = cols-1; j > i; j--)
sum -= R[i+j*cols] * B[j];
B[i] = sum / R[i+i*cols];
}
} |
12,999 | // NV GPU compute capability detection.
// build: nvcc arch-test.cu && ./a.out
//
// Refs: <https://github.com/BVLC/caffe/blob/master/cmake/Cuda.cmake#L18-L31>
#include <cstdio>
int main()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;
if (count == 0) return -1;
for (int device = 0; device < count; ++device)
{
cudaDeviceProp prop;
if (cudaSuccess == cudaGetDeviceProperties(&prop, device))
std::printf("%d.%d \n", prop.major, prop.minor);
}
return 0;
}
|
13,000 | //
// Average extreme spread of five-shot group assuming impact coordinates follow standard normal distribution
//
// Building:
// nvcc -std=c++11 es_cuda.cu -o es_cuda -lcurand
//
// Running:
// for run in {1..10}; do ./es_cuda 15 | tee -a es_cuda.csv; done
//
#include <string>
#include <vector>
#include <numeric>
#include <stdexcept>
#include <typeinfo>
#include <cooperative_groups.h>
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <cuda_runtime.h>
#include <math.h>
#include <chrono>
#include <curand.h>
namespace cg = cooperative_groups;
using std::string;
using std::vector;
// First level of reduction
__device__ double reduce_sum(double in, cg::thread_block cta)
{
extern __shared__ double sdata[];
// Write to shared memory
unsigned ltid = threadIdx.x;
sdata[ltid] = in;
cg::sync(cta);
// Do reduction in shared memory
for (unsigned s = blockDim.x / 2 ; s > 0 ; s >>= 1) {
if (ltid < s) {
sdata[ltid] += sdata[ltid + s];
}
cg::sync(cta);
}
return sdata[0];
}
// Estimator kernel
__global__ void computeValue(double* const results,
const double* const points,
const unsigned numGroups)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Determine thread ID
unsigned bid = blockIdx.x;
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned step = gridDim.x * blockDim.x;
// Shift the input/output pointers
const double* pointx = points + tid;
const double* pointy = pointx + 5 * numGroups;
// Sum up extreme spread of all groups
double sum = 0;
for (unsigned i = tid ; i < numGroups; i += step, pointx += step * 5, pointy += step * 5) {
// Pairwise distances
double dx[10], dy[10];
// Unroll nested comparison loops
dx[0] = pointx[0] - pointx[1]; dy[0] = pointy[0] - pointy[1];
dx[1] = pointx[0] - pointx[2]; dy[1] = pointy[0] - pointy[2];
dx[2] = pointx[0] - pointx[3]; dy[2] = pointy[0] - pointy[3];
dx[3] = pointx[0] - pointx[4]; dy[3] = pointy[0] - pointy[4];
dx[4] = pointx[1] - pointx[2]; dy[4] = pointy[1] - pointy[2];
dx[5] = pointx[1] - pointx[3]; dy[5] = pointy[1] - pointy[3];
dx[6] = pointx[1] - pointx[4]; dy[6] = pointy[1] - pointy[4];
dx[7] = pointx[2] - pointx[3]; dy[7] = pointy[2] - pointy[3];
dx[8] = pointx[2] - pointx[4]; dy[8] = pointy[2] - pointy[4];
dx[9] = pointx[3] - pointx[4]; dy[9] = pointy[3] - pointy[4];
double max_d2 = 0;
for (unsigned j = 0; j < 10; j++) {
auto candidate_d2 = dx[j] * dx[j] + dy[j] * dy[j];
max_d2 = max(max_d2, candidate_d2);
}
double es = sqrt(max_d2);
sum += es;
}
// Reduce within the block
sum = reduce_sum(sum, cta);
// Store the result
if (threadIdx.x == 0) {
results[bid] = sum;
}
}
double es_cuda(unsigned power_of_4, unsigned seed)
{
// Get device properties
struct cudaDeviceProp deviceProperties;
cudaError_t cudaResult = cudaGetDeviceProperties(&deviceProperties, 0);
if (cudaResult != cudaSuccess) {
string msg("Could not get device properties: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Check precision is valid
if (deviceProperties.major < 1 || (deviceProperties.major == 1 && deviceProperties.minor < 3)) {
throw std::runtime_error("Device does not have double precision support");
}
// Check requested size is valid
const unsigned threadBlockSize = 128;
if (threadBlockSize > (deviceProperties.maxThreadsPerBlock)) {
throw std::runtime_error("Thread block size is greater than maxThreadsPerBlock");
}
dim3 block;
block.x = threadBlockSize;
// Attach to GPU
cudaResult = cudaSetDevice(0);
if (cudaResult != cudaSuccess) {
string msg("Could not set CUDA device: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Aim to launch around ten or more times as many blocks as there
// are multiprocessors on the target device.
dim3 grid;
const unsigned numGroups = 1 << (2 * power_of_4);
grid.x = numGroups / threadBlockSize;
while (grid.x > 20 * deviceProperties.multiProcessorCount) {
grid.x >>= 1;
}
// Get computeValue function properties and check the maximum block size
struct cudaFuncAttributes funcAttributes;
cudaResult = cudaFuncGetAttributes(&funcAttributes, computeValue);
if (cudaResult != cudaSuccess) {
string msg("Could not get function attributes: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
if (block.x > (unsigned)funcAttributes.maxThreadsPerBlock) {
throw std::runtime_error("Block X dimension is too large for computeValue kernel");
}
// Check the dimensions are valid
if (block.x > (unsigned)deviceProperties.maxThreadsDim[0]) {
throw std::runtime_error("Block X dimension is too large for device");
}
if (grid.x > (unsigned)deviceProperties.maxGridSize[0]) {
throw std::runtime_error("Grid X dimension is too large for device");
}
// Allocate memory for points
// Each simulation has ten random numbers to give five pairs of X and Y coordinates
double* d_points = 0;
cudaResult = cudaMalloc((void **)&d_points, 10 * numGroups * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for random numbers: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Allocate memory for result
// Each thread block will produce one result
double* d_results = 0;
cudaResult = cudaMalloc((void**)&d_results, grid.x * sizeof(double));
if (cudaResult != cudaSuccess) {
string msg("Could not allocate memory on device for partial results: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Generate random points
curandStatus_t curandResult;
curandGenerator_t prng;
curandResult = curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not create pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandSetPseudoRandomGeneratorSeed(prng, seed);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not set seed for pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandGenerateNormalDouble(prng, (double*)d_points, 10 * numGroups, 0, 1);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not generate pseudo-random numbers: ");
msg += curandResult;
throw std::runtime_error(msg);
}
curandResult = curandDestroyGenerator(prng);
if (curandResult != CURAND_STATUS_SUCCESS) {
string msg("Could not destroy pseudo-random number generator: ");
msg += curandResult;
throw std::runtime_error(msg);
}
// Calculate and average group size
computeValue<<<grid, block, block.x * sizeof(double)>>>(d_results, d_points, numGroups);
// Copy the results back to host
vector<double> results(grid.x);
cudaResult = cudaMemcpy(&results[0], d_results, grid.x * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaResult != cudaSuccess) {
string msg("Could not copy results to host: ");
msg += cudaGetErrorString(cudaResult);
throw std::runtime_error(msg);
}
// Complete sum-reduction
double sum = std::accumulate(results.begin(), results.end(), double(0));
// Cleanup
if (d_points) {
cudaFree(d_points);
}
if (d_results) {
cudaFree(d_results);
}
// Divide sum by count to get the average
return sum / numGroups;
}
int main(int argc, char **argv)
{
unsigned power_of_4 = 12;
if (argc == 2) {
power_of_4 = atoi(argv[1]);
}
unsigned nt = 12;
if (power_of_4 > 12) {
nt <<= 2 * (power_of_4 - 12);
power_of_4 = 12;
}
try {
auto start_time = std::chrono::system_clock::now();
double avg = 0, min = 100, max = 0;
__uint128_t mcg128_state = time(NULL) | 1; // can be seeded to any odd number
for (unsigned j = 0; j < nt; j++) {
double r = es_cuda(power_of_4, (unsigned)(mcg128_state >> 64));
avg += r;
min = fmin(r, min);
max = fmax(r, max);
mcg128_state *= 0xda942042e4dd58b5ULL;
}
avg /= nt;
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> seconds = end_time - start_time;
std::cout.precision(14);
std::cout << "code,threads,power_of_4,min,avg,max,time\n";
std::cout << "CUDA," << nt << "," << power_of_4 << "," << min << "," << avg << "," << max << "," << seconds.count() << "\n";
} catch (std::runtime_error &e) { // es_cuda() can throw runtime exceptions
fprintf(stderr, "runtime error (%s)\n", e.what());
return(EXIT_FAILURE);
}
return(EXIT_SUCCESS);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.