serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
24,001 | #include "includes.h"
__global__ void poli1(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = poli[idx];
if (idx < N) {
poli[idx] = 3 * x * x - 7 * x + 5;
}
} |
24,002 | #include <cstdio>
void VecAdd( const float* pA, const float* pB, float* pC, int vectorSize)
{
for( int i = 0 ; i < vectorSize; ++i)
{
pC[i] = pA[i] + pB[i];
}
}
void VecFill( float * pVector, int vectorSize, float firstValue, float increment )
{
for( int i = 0 ; i < vectorSize; ++i)
{
pVector[i] = firstValue+(increment*i);
}
}
void VecPrint( const float * pVector, int vectorSize, const char * pLabel="noname" )
{
printf("%s : \n", pLabel);
for( int i = 0 ; i < vectorSize; ++i)
{
printf("[%d] : %f\n", i, pVector[i]);
}
}
int main(int argc, char* argv[])
{
int vectorSize = 10;
size_t vectorMemSize = vectorSize * sizeof(float);
float *pA = (float *)malloc(vectorMemSize);
float *pB = (float *)malloc(vectorMemSize);
float *pC = (float *)malloc(vectorMemSize);
VecFill(pA, vectorSize, 0.0f, 1.0f);
VecPrint(pA, vectorSize, "A");
VecFill(pB, vectorSize, 10.0f, -0.5f);
VecPrint(pB, vectorSize, "B");
VecAdd(pA, pB, pC, vectorSize);
VecPrint(pC, vectorSize, "A+B");
free(pA);
free(pB);
free(pC);
return 0;
}
|
24,003 | #include "includes.h"
__global__ void marks(float * media, int * final){
int thread = blockIdx.x*blockDim.x + threadIdx.x;
final[thread] = (media[thread] == (int)media[thread]) * (int)media[thread] +
(media[thread] != (int)media[thread] && media[thread] > 4 && media[thread] < 5)* 4 +
(media[thread] != (int)media[thread] && media[thread] > 9)* 9 +
(media[thread] != (int)media[thread] && (media[thread] < 4 || (media[thread] > 5 && media[thread] < 9))) * ((int)media[thread] + 1);
} |
24,004 | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
// Change the code here:
// This should be changed to GPU kernel definition
void matAdd(int width, int height, const float* A, const float* B, float* C)
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int index = i*width + j;
C[index] = A[index] + B[index];
}
}
}
int main()
{
int width = 1000;
int height = 100;
int numElements = width*height;
float* A = (float*)calloc(numElements, sizeof(float));
float* B = (float*)calloc(numElements, sizeof(float));
float* C = (float*)calloc(numElements, sizeof(float));
srand(1214134);
for (int i = 0; i < numElements; i++)
{
A[i] = float(rand())/float(RAND_MAX + 1.0);
B[i] = float(rand())/float(RAND_MAX + 1.0);
}
// Insert your code here:
// 1. Create GPU device buffers
// 2. Copy input data from host to device (matrixes A and B)
// 3. Change the CPU function call to the GPU kernel call
matAdd(width, height, A, B, C);
// 4. Copy the result back (matrix C)
for (int i = 0; i < std::min(5, height); i++)
{
for (int j = 0; j < std::min(5, width); j++)
{
int index = i*width + j;
printf("%3.2f + %3.2f = %3.2f;\t", A[index], B[index], C[index]);
}
printf("...\n");
}
printf("...\n");
free(A);
free(B);
free(C);
// Free GPU memory here
return 0;
}
|
24,005 | #include "includes.h"
__global__ static void ZCalcBrightness(float* DataArray, float* BrightArray, int size, int rows, int cols, int startIndex)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= size * rows) // 超出範圍
return;
// 算 Index
int sizeIndex = id / rows;
int rowIndex = id % rows;
BrightArray[id] = 0;
for (int i = startIndex; i < cols; i++)
{
int currentID = sizeIndex * rows * cols + rowIndex * cols + i;
BrightArray[id] += DataArray[currentID];
}
} |
24,006 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <set>
#include <utility>
using namespace std;
__global__ void factorAKernel ( int *T_i, float *T_d ,float *A, float *B, float *C, float *A_n, int l_i, int l_t, int l_q, int l_d, int k,float beta)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float sum_n = 0.1f, sum_d = 0.1f;
float temp = 0.0f, error = 0.0f;
int start = T_i[i];
int end = ((i+1) < l_i) ? T_i[i+1] : l_d;
int q = 0, t=0;
for(int ind=start+2; ind<end; ind+=3){
t = T_d[ind-2];
q = T_d[ind-1];
error = 0.0f;
for(int j_i=0;j_i<k;j_i++){
error += A[i*k+j_i]*B[t*k+j_i]*C[q*k+j_i];
}
temp = B[t*k+j]*C[q*k+j];
sum_n += temp * T_d[ind] / powf(error,1.0f - beta);
sum_d += temp * powf(error,beta);
}
A_n[i*k+j] = A[i*k+j]*(sum_n/sum_d);
}
__global__ void factorBKernel ( int *T_t, float *T_d, float *A, float *B, float *C, float *B_n, int l_i, int l_t, int l_q, int l_d, int k, float beta){
int t = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float sum_n = 0.1f, sum_d = 0.1f;
float temp = 0.0f, error = 0.0f;
int start = T_t[t];
int end = ((t+1) < l_t) ? T_t[t+1] : l_d;
int i,q;
for(int ind=start+2; ind<end; ind+=3){
q = (int)T_d[ind-2];
i = (int)T_d[ind-1];
error = 0.0f;
for(int j=0;j<k;j++){
error += A[i*k+j]*B[t*k+j]*C[q*k+j];
}
temp = A[i*k+j]*C[q*k+j];
sum_n += temp * T_d[ind] / powf(error, 1.0f - beta);
sum_d += temp * powf(error, beta);
}
B_n[t*k+j] = B[t*k+j]*(sum_n/sum_d);
}
__global__ void factorCKernel ( int *T_q, float *T_d, float *A, float *B, float *C, float *C_n, int l_i, int l_t, int l_q, int l_d, int k, float beta){
int q = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float sum_n = 0.1f, sum_d = 0.1f;
float temp = 0.0f, error = 0.0f;
int start = T_q[q];
int end = ((q+1) < l_q) ? T_q[q+1] : l_d;
int i,t;
for(int ind=start+2; ind<end; ind+=3){
i = (int)T_d[ind-2];
t = (int)T_d[ind-1];
error = 0.0f;
for(int j=0;j<k;j++){
error += A[i*k+j]*B[t*k+j]*C[q*k+j];
}
temp = A[i*k+j]*B[t*k+j];
sum_n += temp * T_d[ind] / powf(error, 1.0f - beta);
sum_d += temp * powf(error, beta);
}
C_n[q*k+j] = C[q*k+j]*sum_n/sum_d;
}
typedef struct tensor_elem{
int i,t,q;
float val;
} tensor_elem;
int comp_i(const void* el1, const void* el2){
tensor_elem first = *((tensor_elem*)el1);
tensor_elem second = *((tensor_elem*)el2);
return first.i > second.i || (first.i == second.i && first.t > second.t) || (first.i == second.i && first.t == second.t && first.q > second.q);
}
int comp_t(const void* el1, const void* el2){
tensor_elem first = *((tensor_elem*)el1);
tensor_elem second = *((tensor_elem*)el2);
return first.t > second.t || (first.t == second.t && first.q < second.q) || (first.t == second.t && first.q == second.q && first.i > second.i);
}
int comp_q(const void* el1, const void* el2){
tensor_elem first = *((tensor_elem*)el1);
tensor_elem second = *((tensor_elem*)el2);
return first.q > second.q || (first.q == second.q && first.i > second.i) || (first.q == second.q && first.i == second.i && first.t > second.t);
}
void unfoldTensor(tensor_elem* T, int** Ti_ind, float** Ti_data, int** Tt_ind, float** Tt_data, int** Tq_ind, float** Tq_data, int* leni_ind, int* lent_ind, int* lenq_ind, int* num_values){
int n = *num_values;
qsort(T, n, sizeof(tensor_elem),comp_i);
*Ti_ind = (int*)malloc((*leni_ind)*sizeof(int));
int* Ti_ind_cur = *Ti_ind;
*Ti_data = (float*)malloc(3*n*sizeof(float));
float* Ti_data_cur = *Ti_data;
int ind_data = 0;
int ind_sparse = 0;
Ti_ind_cur[0]=0;
for(int ind=0;ind<((*leni_ind) - 1);ind++){
while((ind_sparse < (3*n)) && (T[ind_data].i == ind)){
Ti_data_cur[ind_sparse] = T[ind_data].t;
Ti_data_cur[ind_sparse+1] = T[ind_data].q;
Ti_data_cur[ind_sparse+2] = T[ind_data].val;
ind_data++;
ind_sparse+=3;
}
Ti_ind_cur[ind+1]=ind_sparse;
}
while(ind_sparse < (3*n)){
Ti_data_cur[ind_sparse] = T[ind_data].t;
Ti_data_cur[ind_sparse+1] = T[ind_data].q;
Ti_data_cur[ind_sparse+2] = T[ind_data].val;
ind_data++;
ind_sparse+=3;
}
qsort(T, n, sizeof(tensor_elem),comp_t);
*Tt_ind = (int*)malloc((*lent_ind)*sizeof(int));
int* Tt_ind_cur = *Tt_ind;
*Tt_data = (float*)malloc(3*n*sizeof(float));
float* Tt_data_cur = *Tt_data;
ind_data = 0;
ind_sparse = 0;
Tt_ind_cur[0]=0;
for(int ind=0;ind<((*lent_ind) - 1);ind++){
while((ind_sparse < (3*n)) && (T[ind_data].t == ind)){
Tt_data_cur[ind_sparse] = T[ind_data].i;
Tt_data_cur[ind_sparse+1] = T[ind_data].q;
Tt_data_cur[ind_sparse+2] = T[ind_data].val;
ind_data++;
ind_sparse+=3;
}
Tt_ind_cur[ind+1]=ind_sparse;
}
while(ind_sparse < (3*n)){
Tt_data_cur[ind_sparse] = T[ind_data].i;
Tt_data_cur[ind_sparse+1] = T[ind_data].q;
Tt_data_cur[ind_sparse+2] = T[ind_data].val;
ind_data++;
ind_sparse+=3;
}
qsort(T, n, sizeof(tensor_elem),comp_q);
*Tq_ind = (int*)malloc((*lenq_ind)*sizeof(int));
int* Tq_ind_cur = *Tq_ind;
*Tq_data = (float*)malloc(3*n*sizeof(float));
float* Tq_data_cur = *Tq_data;
ind_data = 0;
ind_sparse = 0;
Tq_ind_cur[0]=0;
for(int ind=0;ind<((*lenq_ind) - 1);ind++){
while((ind_sparse < (3*n)) && (T[ind_data].q == ind)){
Tq_data_cur[ind_sparse] = T[ind_data].i;
Tq_data_cur[ind_sparse+1] = T[ind_data].t;
Tq_data_cur[ind_sparse+2] = T[ind_data].val;
ind_data++;
ind_sparse+=3;
}
Tq_ind_cur[ind+1]=ind_sparse;
}
while(ind_sparse < (3*n)){
Tq_data_cur[ind_sparse] = T[ind_data].i;
Tq_data_cur[ind_sparse+1] = T[ind_data].t;
Tq_data_cur[ind_sparse+2] = T[ind_data].val;
ind_data++;
ind_sparse+=3;
}
}
void parseTensorFile(char* fileName, int** Ti_ind, float** Ti_data, int** Tt_ind, float** Tt_data, int** Tq_ind, float** Tq_data, int* leni_ind, int* lent_ind, int* lenq_ind, int* num_values){
FILE* f = fopen(fileName, "r");
int n = 0;
int i,t,q;
float val;
fscanf(f,"%d %d %d",&i,&t,&q);
*leni_ind = i;
*lent_ind = t;
*lenq_ind = q;
fscanf(f,"%d",&n);
*num_values = n;
tensor_elem* T = (tensor_elem*)malloc(n*sizeof(tensor_elem));
for(int ind=0;ind<n;ind++){
fscanf(f,"%d %d %d %f",&i,&t,&q,&val);
tensor_elem cur;
cur.i = i; cur.t = t; cur.q = q; cur.val = val;
T[ind] = cur;
}
fclose(f);
unfoldTensor(T,Ti_ind,Ti_data,Tt_ind,Tt_data,Tq_ind,Tq_data,leni_ind,lent_ind,lenq_ind,num_values);
free(T);
}
void printTensorToFile(char* fileName, tensor_elem* T, int i, int t, int q, int n){
FILE* f = fopen(fileName, "w");
fprintf(f,"%d %d %d\n",i,t,q);
fprintf(f,"%d\n",n);
qsort(T, n, sizeof(tensor_elem),comp_i);
for(int ind=0;ind<n;ind++){
tensor_elem cur = T[ind];
fprintf(f,"%d %d %d %f\n",cur.i,cur.t, cur.q, cur.val);
}
fclose(f);
}
void genTensor(int** Ti_ind, float** Ti_data, int** Tt_ind, float** Tt_data, int** Tq_ind, float** Tq_data, int leni_ind, int lent_ind, int lenq_ind, int num_values){
int n = num_values;
tensor_elem* T = (tensor_elem*)malloc(n*sizeof(tensor_elem));
//tensor_elem* T = new tensor_elem[num_values];
set<pair<int,pair<int,int> > > index_set;
int ind=0;
while(ind<n){
tensor_elem cur;
int i = rand() % leni_ind;
int t = rand() % lent_ind;
int q = rand() % lenq_ind;
if( index_set.find(make_pair(i,make_pair(t,q))) == index_set.end() ){
cur.i = i;
cur.t = t;
cur.q = q;
cur.val = rand() % 100;
T[ind] = cur;
index_set.insert(make_pair(i,make_pair(t,q)));
ind++;
}
}
qsort(T, n, sizeof(tensor_elem),comp_i);
printTensorToFile("tensor_input.txt",T,leni_ind,lent_ind,lenq_ind,num_values);
unfoldTensor(T,Ti_ind,Ti_data,Tt_ind,Tt_data,Tq_ind,Tq_data,&leni_ind,&lent_ind,&lenq_ind,&num_values);
free(T);
}
tensor_elem* buildErrorTensor(float* A, float* B,float* C,int k,int leni_ind, int lent_ind, int lenq_ind){
//tensor_elem* T = new tensor_elem[leni_ind*lent_ind*lenq_ind];
tensor_elem* T = (tensor_elem*)malloc(leni_ind*lent_ind*lenq_ind*sizeof(tensor_elem));
int ind = 0;
for(int i=0;i<leni_ind;i++){
for(int t=0;t<lent_ind;t++){
for(int q=0;q<lenq_ind;q++){
float cur = 0.0f;
for(int j=0;j<k;j++){
cur += A[i*k+j]*B[t*k+j]*C[q*k+j];
}
T[ind].i = i;
T[ind].t = t;
T[ind].q = q;
T[ind].val = cur;
ind++;
}
}
}
return T;
}
void printToFile(char* fileNameA, char* fileNameB, char* fileNameC, int k, float* A, int i, float* B, int t, float* C, int q){
//open file
FILE* fA = fopen(fileNameA, "w");
FILE* fB = fopen(fileNameB, "w");
FILE* fC = fopen(fileNameC, "w");
for(int ind=1;ind<=(k*i);ind++){
fprintf(fA,"%f",A[ind-1]);
if(ind % k == 0) fprintf(fA,"\n");
else fprintf(fA," ");
}
for(int ind=1;ind<=(k*t);ind++){
fprintf(fB,"%f",B[ind-1]);
if(ind % k == 0) fprintf(fB,"\n");
else fprintf(fB," ");
}
for(int ind=1;ind<=(k*q);ind++){
fprintf(fC,"%f",C[ind-1]);
if(ind % k == 0) fprintf(fC,"\n");
else fprintf(fC," ");
}
fclose(fA);
fclose(fB);
fclose(fC);
}
int main ( int argc, char * argv [] )
{
char fileName[] = "tensor_input.txt";
char fileA[] = "A.txt";
char fileB[] = "B.txt";
char fileC[] = "C.txt";
int *Ti_ind, *Tt_ind, *Tq_ind;
float* Ti_data, *Tt_data, *Tq_data;
//int i, t, q, n;
//parseTensorFile(fileName,&Ti_ind,&Ti_data,&Tt_ind,&Tt_data,&Tq_ind,&Tq_data,&i,&t,&q,&n);
int i = 3, t = 3, q = 3, n = 27;
genTensor(&Ti_ind,&Ti_data,&Tt_ind,&Tt_data,&Tq_ind,&Tq_data,i,t,q,n);
/*
for(int ind=0;ind<i;ind++){
printf("%d ", Ti_ind[ind]);
}
printf("\n\n");
*/
/*
for(int ind=0;ind<(3*n);ind+=3){
printf("%f %f %f\n",Ti_data[ind],Ti_data[ind+1],Ti_data[ind+2]);
}
*/
printf("\n");
int k = 2;
//scanf("%d", &k);
float beta = 0.0f;
float* A = new float[k*i];
float* B = new float[k*t];
float* C = new float[k*q];
for(int ind=0;ind<(k*i);ind++) A[ind] = (float)rand();
for(int ind=0;ind<(k*t);ind++) B[ind] = (float)rand();
for(int ind=0;ind<(k*q);ind++) C[ind] = (float)rand();
float *Ti_data_cuda = NULL, *Tt_data_cuda = NULL, *Tq_data_cuda = NULL;
int *Ti_ind_cuda = NULL, *Tt_ind_cuda = NULL, *Tq_ind_cuda = NULL;
cudaMalloc ( (void**)&Ti_data_cuda, 3*n*sizeof(float) );
cudaMalloc ( (void**)&Tt_data_cuda, 3*n*sizeof(float) );
cudaMalloc ( (void**)&Tq_data_cuda, 3*n*sizeof(float) );
cudaMalloc ( (void**)&Ti_ind_cuda, i*sizeof(int) );
cudaMalloc ( (void**)&Tt_ind_cuda, t*sizeof(int) );
cudaMalloc ( (void**)&Tq_ind_cuda, q*sizeof(int) );
float *A_cuda = NULL, *B_cuda = NULL, *C_cuda = NULL, *A_next_cuda = NULL, *B_next_cuda = NULL, *C_next_cuda = NULL;
int numBytesA = (k*i)*sizeof(float);
int numBytesB = (k*t)*sizeof(float);
int numBytesC = (k*q)*sizeof(float);
cudaMalloc ( (void**)&A_cuda, numBytesA );
cudaMalloc ( (void**)&B_cuda, numBytesB );
cudaMalloc ( (void**)&C_cuda, numBytesC );
cudaMalloc ( (void**)&A_next_cuda, numBytesA );
cudaMalloc ( (void**)&B_next_cuda, numBytesB );
cudaMalloc ( (void**)&C_next_cuda, numBytesC );
dim3 blocks = dim3(1, 1);
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate ( &start );
cudaEventCreate ( &stop );
cudaEventRecord ( start, 0 );
cudaDeviceSynchronize();
cudaMemcpy ( Ti_data_cuda, Ti_data, 3*n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( Ti_ind_cuda, Ti_ind, i*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy ( Tt_data_cuda, Tt_data, 3*n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( Tt_ind_cuda, Tt_ind, t*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy ( Tq_data_cuda, Tq_data, 3*n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy ( Tq_ind_cuda, Tq_ind, q*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy ( A_cuda, A, numBytesA, cudaMemcpyHostToDevice );
cudaMemcpy ( B_cuda, B, numBytesB, cudaMemcpyHostToDevice );
cudaMemcpy ( C_cuda, C, numBytesC, cudaMemcpyHostToDevice );
bool flag = true;
for(int ind=0;ind<100;ind++){
if(flag){
cudaDeviceSynchronize();
factorAKernel<<<blocks, dim3(k,i)>>>(Ti_ind_cuda, Ti_data_cuda, A_cuda, B_cuda, C_cuda, A_next_cuda,i,t,q,3*n,k,beta);
cudaDeviceSynchronize();
factorBKernel<<<blocks, dim3(k,t)>>>(Tt_ind_cuda, Tt_data_cuda, A_next_cuda, B_cuda, C_cuda, B_next_cuda, i,t,q,3*n,k,beta);
cudaDeviceSynchronize();
factorCKernel<<<blocks, dim3(k,q)>>>(Tq_ind_cuda, Tq_data_cuda, A_next_cuda, B_next_cuda,C_cuda,C_next_cuda,i,t,q,3*n,k,beta);
}
else{
cudaDeviceSynchronize();
factorAKernel<<<blocks, dim3(k,i)>>>(Ti_ind_cuda, Ti_data_cuda, A_next_cuda, B_next_cuda, C_next_cuda, A_cuda, i,t,q,3*n,k,beta);
cudaDeviceSynchronize();
factorBKernel<<<blocks, dim3(k,t)>>>(Tt_ind_cuda, Tt_data_cuda, A_cuda, B_next_cuda, C_next_cuda, B_cuda, i,t,q,3*n,k,beta);
cudaDeviceSynchronize();
factorCKernel<<<blocks, dim3(k,q)>>>(Tq_ind_cuda, Tq_data_cuda, A_cuda, B_cuda, C_next_cuda, C_cuda, i,t,q,3*n,k,beta);
}
flag = !flag;
}
cudaDeviceSynchronize();
if(flag == false){
cudaMemcpy ( A, A_next_cuda, numBytesA, cudaMemcpyDeviceToHost );
cudaMemcpy ( B, B_next_cuda, numBytesB, cudaMemcpyDeviceToHost );
cudaMemcpy ( C, C_next_cuda, numBytesC, cudaMemcpyDeviceToHost );
}
else{
cudaMemcpy ( A, A_cuda, numBytesA, cudaMemcpyDeviceToHost );
cudaMemcpy ( B, B_cuda, numBytesB, cudaMemcpyDeviceToHost );
cudaMemcpy ( C, C_cuda, numBytesC, cudaMemcpyDeviceToHost );
}
cudaEventRecord ( stop, 0 );
cudaEventSynchronize ( stop );
cudaEventElapsedTime ( &gpuTime, start, stop );
printf("\ntime spent executing by the GPU: %.2f millseconds\n", gpuTime );
printToFile(fileA,fileB,fileC,k,A,i,B,t,C,q);
tensor_elem* Q = buildErrorTensor(A,B,C,k,i,t,q);
printTensorToFile("error_tensor.txt",Q,i,t,q,i*t*q);
free(Q);
cudaEventDestroy ( start );
cudaEventDestroy ( stop );
cudaFree(Ti_ind_cuda);
cudaFree(Ti_data_cuda);
cudaFree(Tt_ind_cuda);
cudaFree(Tt_data_cuda);
cudaFree(Tq_ind_cuda);
cudaFree(Tq_data_cuda);
cudaFree(A_cuda);
cudaFree(B_cuda);
cudaFree(C_cuda);
cudaFree(A_next_cuda);
cudaFree(B_next_cuda);
cudaFree(C_next_cuda);
delete[] Ti_ind;
delete[] Ti_data;
delete[] Tt_ind;
delete[] Tt_data;
delete[] Tq_ind;
delete[] Tq_data;
delete[] A;
delete[] B;
delete[] C;
return 0;
} |
24,007 | #ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <stdio.h>
#include <random>
#include <iomanip>
#include <iostream>
#define N 256
#define BLOCKSIZE 16
cudaError_t histogramCuda(int *freq, int *freq2, const int *vals, int bin, float &time, float &shared_time);
__global__ void naivehistKernel(int *freq, const int *vals) {
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&freq[vals[id]], 1);
}
__global__ void sharedhistKernel(int *freq, const int *vals) {
__shared__ int temp[BLOCKSIZE];
unsigned int tid = threadIdx.x;
temp[tid] = 0;
__syncthreads();
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&temp[vals[id]], 1);
__syncthreads();
atomicAdd(&(freq[tid]), temp[tid]);
}
int main() {
const int bin = 10 + 1;
const int vals[N] = {1,9,0,6,10,1,2,4,6,9,9,5,10,10,8,3,
7,7,5,0,4,8,5,10,0,8,9,10,10,5,1,0,
0,3,10,5,4,3,5,1,4,4,1,8,1,6,10,3,
0,2,5,7,10,4,1,6,6,5,4,0,5,0,4,4,
4,1,4,6,8,9,0,0,9,4,10,10,10,1,4,9,
0,1,7,9,7,10,10,0,5,9,1,6,7,0,3,9,
8,5,4,8,4,1,0,6,9,2,1,2,3,6,10,6,
4,9,6,0,2,6,2,6,3,8,6,0,2,2,1,1,
3,10,6,7,4,5,3,8,4,9,5,9,7,9,5,8,
9,6,8,8,7,10,10,7,9,6,3,7,5,3,8,10,
2,5,8,6,9,1,1,2,3,7,7,8,2,2,10,5,
7,3,9,4,1,9,7,7,6,9,3,5,8,8,8,2,
7,2,7,6,1,6,8,7,10,1,2,6,5,6,1,0,
6,8,9,6,1,9,10,4,1,7,1,8,5,0,9,10,
5,6,2,9,6,3,10,0,0,6,1,8,7,0,6,2,
3,10,1,1,10,10,5,6,9,0,2,8,5,5,10,4};
int freq[bin] = { 0 };
int freq2[bin] = { 0 };
float time = 0.0f;
float shared_time = 0.0f;
cudaError_t cudaStatus = histogramCuda(freq, freq2, vals, bin, time, shared_time);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "histogramCuda failed!");
return 1;
}
for(int i = 0; i < bin; i++) {
std::cout << std::left;
std::cout << std::setw(5) << i;
std::cout << std::setw(5) << freq[i];
std::cout << std::endl;
}
std::cout << "Histogram GPU Implementation" << std::endl;
std::cout << "Execution Time : " << time / 1000 << " seconds" << std::endl;
std::cout << "Effective Bandwidth : " << (N*sizeof(int)*2) / (time / 1000) << " GB/s" << std::endl;
std::cout << std::endl;
for(int i = 0; i < bin; i++) {
std::cout << std::left;
std::cout << std::setw(5) << i;
std::cout << std::setw(5) << freq2[i];
std::cout << std::endl;
}
std::cout << "Histogram Shared Memory GPU Implementation" << std::endl;
std::cout << "Execution Time : " << shared_time / 1000 << " seconds" << std::endl;
std::cout << "Effective Bandwidth : " << (N*sizeof(int)*2) / (shared_time / 1000) << " GB/s" << std::endl;
std::cout << std::endl;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t histogramCuda(int *freq, int *freq2, const int *vals, int bin, float &time, float &shared_time) {
int *dev_vals = 0;
int *dev_freq = 0;
int *dev_freq2 = 0;
float milliseconds = 0.0f;
float milliseconds1 = 0.0f;
cudaError_t cudaStatus;
dim3 dimBlock(BLOCKSIZE);
dim3 dimGrid(N/BLOCKSIZE);
cudaEvent_t start, stop;
cudaEvent_t start1, stop1;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_freq, bin * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemset(dev_freq, 0, bin * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_freq2, bin * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemset(dev_freq2, 0, bin * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_vals, N * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_vals, vals, N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventRecord(start);
naivehistKernel<<<dimGrid, dimBlock>>>(dev_freq, dev_vals);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaThreadSynchronize();
cudaEventRecord(start1);
sharedhistKernel<<<dimGrid, dimBlock>>>(dev_freq2, dev_vals);
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "histKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching histKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(freq, dev_freq, bin * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(freq2, dev_freq2, bin * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventElapsedTime(&milliseconds1, start1, stop1);
time = milliseconds;
shared_time = milliseconds1;
Error:
cudaFree(dev_freq);
cudaFree(dev_freq2);
cudaFree(dev_vals);
return cudaStatus;
}
|
24,008 | #include "matrix.cuh"
#define PRINT_MAX 10
#define TRUNC(A) ((A) < PRINT_MAX ? (A) : PRINT_MAX)
void Matrix::print(int sz) const // prints the entire matrix
{
if (sz)
{
}
else
{
// print everything
if (gpu_enabled)
{
float* a = new float[dim1*dim2];
cudaMemcpy(a, matrix, sizeof(float)*dim1*dim2, cudaMemcpyDeviceToHost);
for (int i = 0; i < dim1; ++i)
{
for (int j = 0; j < dim2; ++j)
std::cout << a[INDEX(i,j,dim1,dim2)] << ",";
std::cout << "\n";
}
delete[] a;
}
else
{
for (int i = 0; i < dim1; ++i)
{
for (int j = 0; j < dim2; ++j)
std::cout << matrix[INDEX(i,j,dim1,dim2)] << ",";
std::cout << "\n";
}
}
}
}
std::ostream& operator<<(std::ostream& os, const Matrix& mat) // sets the cout to the default print size
{
os << "MATRIX " << (mat.is_gpu() ? "(GPU): (" : "(CPU): (");
os << mat.get_dim1() << " x " << mat.get_dim2() << ")\n";
int d1 = mat.get_dim1(), d2 = mat.get_dim2();
if (mat.is_gpu()) // gpu version
{
// copy every line of the matrix
if (mat.get_dim1() * mat.get_dim2() < 4096) // if there are less than 1024 elements, then just copy the cpu version
{
float *a = new float[d1*d2];
cudaMemcpy(a, mat.get_matrix(), sizeof(float)*d1*d2, cudaMemcpyDeviceToHost);
if (d2 == 1)
{
os << "[";
for (int i = 0; i < TRUNC(d1); ++i)
os << a[i] << ",";
os << "...]\n";
}
else
{
for (int i = 0; i < TRUNC(d1); ++i)
{
for (int j = 0; j < TRUNC(d2); ++j)
os << a[INDEX(i,j,d1,d2)] << ",";
os << "...\n";
}
os << "......\n";
}
delete[] a;
}
else
{
// matrix is too big. too slow to copy the entire thing. we just copy the 100 numbers we want
float a[PRINT_MAX];
if (d2 == 1)
{
cudaMemcpy(a, mat.get_matrix(), sizeof(float)*PRINT_MAX, cudaMemcpyDeviceToHost);
os << "[";
for (int i = 0; i < TRUNC(d1); ++i)
os << a[i] << ",";
os << "...]\n";
}
else
{
for (int i = 0; i < TRUNC(d1); ++i)
{
// copy the memory of only one of the lines (of 10 things)
cudaMemcpy(a, mat.get_matrix() + INDEX(i,0,d1,d2), sizeof(float)*PRINT_MAX, cudaMemcpyDeviceToHost);
// print the stuff
for (int j = 0; j < TRUNC(d2); ++j)
os << a[j] << ",";
os << "...\n";
}
os << "......\n";
}
}
}
else // cpu version
{
float *a = mat.get_matrix();
if (d2 == 1) // if it's just a column matrix, we can save some space on the terminal by putting it as a row. We will use square brackets to denote this
{
os << "[";
for (int i = 0; i < TRUNC(d1); ++i)
os << a[i] << ",";
os << "...]\n";
}
else
{
for (int i = 0; i < TRUNC(d1); ++i)
{
for (int j = 0; j < TRUNC(d2); ++j)
{
os << a[INDEX(i, j, d1, d2)] << ",";
}
os << "...\n";
}
os << "......\n";
}
}
return os;
}
|
24,009 | #include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <cuda_runtime.h>
__global__ void sumArraysOnGpu(float *A, float *B, float *C, int fatorUnroll) {
unsigned int idx = blockIdx.x * blockDim.x * fatorUnroll + threadIdx.x;
for (int i = 1; i <= fatorUnroll; i++) {
int index = idx + fatorUnroll;
C[index] = A[index] + B[index];
}
}
void initialData(float *ip, int size){
// generate different seed for random number
time_t t;
srand((unsigned int) time (&t) - ip[0]);
for (int i=0; i<size; i++){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void linearData(float *input, int size) {
for (int i = 0; i < size; i++) {
input[i] = i + (size / (1024 * 1e3));
}
}
int main(int argc, char **argv){
int expoente = atoi(argv[1]); // Primeiro argumento é o expoente onde 2^X = tamanho do elemento
int threads = atoi(argv[2]); // Segundo argumento é o numero de threads
int fatorUnroll = atoi(argv[3]); // Terceiro argumento é o fator de unroll
size_t nBytes = (2 << (expoente + 1)) / sizeof(float);
int nElem = nBytes / sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initialData(h_A, nElem);
linearData(h_B, nElem);
printf("Quantidade de elementos: %d \n Quantidade de MB: %lu MB\n\n", nElem, (nBytes / (1024*1024)));
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the
// parameter cudaMemcpyHostToDevice specifying the transfer direction.
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
sumArraysOnGpu<<<(nElem / fatorUnroll) / threads, threads>>>(d_A, d_B, d_C, fatorUnroll);
cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost);
free(h_A);
free(h_B);
free(h_C);
// use cudaFree to release the memory used on the GPU
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
return (0);
}
|
24,010 | // Date March 28 2029
//Programer: Hemanta Bhattarai
// Progarm : To add two arrays and compare computation time in host and device
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h> //for random numbers
#include <time.h>
#include <sys/time.h>
#define gpuErrchk(ans){ gpuAssert((ans),__FILE__, __LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if(code != cudaSuccess)
{
fprintf(stderr, "GPUassert : %s %s %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
// device kernal
__global__ void vecAdd(float *A, float *B, float *C, float *D, int array_size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
while(i < array_size)
{
D[i] = A[i] + B[i] + C[i];
i += blockDim.x * gridDim.x; // each iteration will move the block-grid to access other element in matrix
}
}
int main()
{
// host function definition
float get_random();
//variable definition
float *hA, *hB, *hC, *hD, *dA, *dB, *dC;
float *dD, *hE;
float size_of_array;
//define size of array
printf("Enter the size of array");
scanf("%f",&size_of_array);
float size = sizeof(int) * size_of_array;
//memory allocation in host
hA = (float*)malloc(size);
hB = (float*)malloc(size);
hC = (float*)malloc(size);
hD = (float*)malloc(size);
hE = (float*)malloc(size);
//memory allocation in device
gpuErrchk(cudaMalloc((void**)&dA,size));
gpuErrchk(cudaMalloc((void**)&dB,size));
gpuErrchk(cudaMalloc((void**)&dC,size));
gpuErrchk(cudaMalloc((void**)&dD,size));
//array initilization
for(int i=0; i<size_of_array; ++i) hA[i] = get_random();
for(int i=0; i<size_of_array; ++i) hB[i] = get_random();
for(int i=0; i<size_of_array; ++i) hC[i] = get_random();
clock_t host_begin, host_end;
//record begin of host computation
host_begin = clock();
//add vectors in host
for(int i=0; i<size_of_array; ++i) hE[i] = hA[i] + hB[i] + hC[i];
//record end of host computation
host_end = clock();
clock_t device_begin, device_end;
//record of device computation
device_begin = clock();
//copy host data to memory
gpuErrchk(cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dC, hC, size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dD, hD, size, cudaMemcpyHostToDevice));
//record start of device computation
// add array in device
vecAdd<<<128,128>>>(dA, dB, dC, dD, size_of_array);
gpuErrchk(cudaDeviceSynchronize());
//record end of device computation
device_end = clock();
//copy data from device to host
gpuErrchk(cudaMemcpy(hD, dD, size, cudaMemcpyDeviceToHost));
double host_time, device_time;
host_time = (double)((double)(host_end - host_begin)/(CLOCKS_PER_SEC));
device_time = (double)((double)(device_end - device_begin)/(CLOCKS_PER_SEC));
//print the time of host and device computation
printf("Host computation time: %f\n",host_time);
printf("Device computation time: %f\n",device_time);
//display the devation of device and host result
float sum = 0;
for(int i=0; i< size_of_array; ++i)
{
sum += hE[i] - hD[i];
}
printf("The deviation of host and device result is %f\n",sum);
//free host memory
free(hA);
free(hB);
free(hC);
free(hD);
free(hE);
//free device memory
gpuErrchk(cudaFree(dA));
gpuErrchk(cudaFree(dB));
gpuErrchk(cudaFree(dC));
gpuErrchk(cudaFree(dD));
}
//random number generator
float get_random()
{
return rand() % 100 + 1;
}
|
24,011 | #include "includes.h"
__global__ void hierarchical_scan_kernel_phase1(int *X, int *Y, int *S) {
__shared__ int XY[SECTION_SIZE];
__shared__ int AUS[BLOCK_DIM];
int tx = threadIdx.x, bx = blockIdx.x;
int i = bx * SECTION_SIZE + tx;
if (i < INPUT_SIZE) {
// collaborative load in a coalesced manner
for (int j = 0; j < SECTION_SIZE; j+=BLOCK_DIM) {
XY[tx + j] = X[i + j];
}
__syncthreads();
// PHASE 1: scan inner own subsection
// At the end of this phase the last element of each subsection contains the sum of all alements in own subsection
for (int j = 1; j < SUBSECTION_SIZE; j++) {
XY[tx * (SUBSECTION_SIZE) + j] += XY[tx * (SUBSECTION_SIZE)+j - 1];
}
__syncthreads();
// PHASE 2: perform iterative kogge_stone_scan of the last elements of each subsections of XY loaded first in AUS
AUS[tx] = XY[tx * (SUBSECTION_SIZE)+(SUBSECTION_SIZE)-1];
int in;
for (unsigned int stride = 1; stride < BLOCK_DIM; stride *= 2) {
__syncthreads();
if (tx >= stride) {
in = AUS[tx - stride];
}
__syncthreads();
if (tx >= stride) {
AUS[tx] += in;
}
}
__syncthreads();
// PHASE 3: each thread adds to its elements the new value of the last element of its predecessor's section
if (tx > 0) {
for (unsigned int stride = 0; stride < (SUBSECTION_SIZE); stride++) {
XY[tx * (SUBSECTION_SIZE)+stride] += AUS[tx - 1]; // <--
}
}
__syncthreads();
// store the result into output vector
for (int j = 0; j < SECTION_SIZE; j += BLOCK_DIM) {
Y[i + j] = XY[tx + j];
}
//The last thread in the block writes the output value of the last element in the scan block to the blockIdx.x position of S
if (tx == BLOCK_DIM - 1) {
S[bx] = XY[SECTION_SIZE - 1];
}
}
} |
24,012 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define MULTIPLICATIONS 4096
/**
* Multiply square matrix (n x n) by the vector of size n.
*
*
* @param mat Input matrix.
* @param vec Input vector.
* @param out Output vector.
* @param n Dimension.
*/
__global__ void matrix_vector_multiplication(float *mat, float *vec, float *out, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
float sum = 0;
for (int k = 0; k < MULTIPLICATIONS; k++)
for (int i = 0; i < n; i++)
sum += mat[i * n + tid] * vec[i];
out[tid] = sum;
}
}
int main(int argc, char const *argv[])
{
if (argc != 2) {
fprintf(stderr, "usage: %s <n>\n", argv[0]);
exit(1);
}
int n = atoi(argv[1]);
if (n < 0) {
fprintf(stderr, "Invalid size\n");
exit(1);
}
// Host & device inputs and outputs
float *h_mat, *h_vec, *h_out;
float *d_mat, *d_vec, *d_out;
// Allocate host memory
h_mat = (float*) malloc(sizeof(float) * n * n);
h_vec = (float*) malloc(sizeof(float) * n);
h_out = (float*) malloc(sizeof(float) * n);
// Allocate device memory
cudaMalloc((void**) &d_mat, sizeof(float) * n * n);
cudaMalloc((void**) &d_vec, sizeof(float) * n);
cudaMalloc((void**) &d_out, sizeof(float) * n);
// Initialize host matrix
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
h_mat[i * n + j] = 0.05;
// Initialize host vector
for (int i = 0; i < n; i++)
h_vec[i] = 0.05;
// Transfer data from host to device memory
cudaMemcpy(d_mat, h_mat, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_vec, h_vec, sizeof(float) * n, cudaMemcpyHostToDevice);
cudaEvent_t start, finish;
float elapsed_milliseconds;
// Use event to calculate time
cudaEventCreate(&start);
cudaEventCreate(&finish);
cudaEventRecord(start, 0);
matrix_vector_multiplication<<<n/THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(d_mat, d_vec, d_out, n);
cudaEventRecord(finish, 0);
cudaEventSynchronize(finish); // Wait for stop event to complete
cudaEventElapsedTime(&elapsed_milliseconds, start, finish); // Calculate the time difference (millisecond level)
// Transfer data back to host memory
cudaMemcpy(h_out, d_out, sizeof(float) * n, cudaMemcpyDeviceToHost);
printf("%f\n", h_out[0]);
printf("GPU Elapsed time = %.2fs\n", elapsed_milliseconds/1000.0);
// Deallocate device memory
cudaFree(d_mat);
cudaFree(d_vec);
cudaFree(d_vec);
// Deallocate host memory
free(h_mat);
free(h_vec);
free(h_out);
return 0;
} |
24,013 | #include "includes.h"
__global__ void kernel_fill_nn_cuda(unsigned int *d_nn, int *nearest_neighbour_indexes, unsigned int number_nearest_neighbour_indexes)
{
int ind=blockIdx.x*blockDim.x+threadIdx.x;
if(ind < number_nearest_neighbour_indexes)
{
if(nearest_neighbour_indexes[ind] < 0)
{
d_nn[ind] = 0;
}else
{
d_nn[ind] = 1;
}
}
} |
24,014 | #include <stdio.h>
__global__ void doGPUWork(int numData, int *data) {
if (threadIdx.x < numData) {
data[threadIdx.x] = threadIdx.x;
}
}
void sayHello(int *numDevices) {
int numData = 2;
int data[numData];
int dev_data[numData];
int i;
cudaGetDeviceCount(numDevices);
cudaMalloc((void**)&dev_data, numData);
doGPUWork<<<1, numData>>>(numData, dev_data);
cudaMemcpy(data, dev_data, numData, cudaMemcpyDeviceToHost);
// BUGFIX: This should print 0, 1, etc., but does not yet
for (i = 0; i < numData; i++) {
printf("%d\n", data[i]);
}
}
|
24,015 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include<random>
using namespace std;
#define X_trn(x, y) X_trn[x * size_train + y] // 196 * 964
#define X_tst(x, y) X_tst[x * size_test + y] // 196 * 414
#define Y_trn(x, y) Y_trn[x * size_train + y] // 1 * 964
#define Y_tst(x, y) Y_tst[x * size_test + y] // 1 * 414
#define X(x, y) X[x * size_batch + y] // 196 * 964
#define Y(x, y) Y[x * size_batch + y] // 1 * 414
#define W1(x, y) W1[x * size_input + y] // 20 * 196
#define b1(x, y) b1[x * 1 + y] // 20 * 1
#define W2(x, y) W2[x * size_hidden + y] // 2 * 20
#define b2(x, y) b2[x * 1 + y] // 2 * 1
#define dW1(x, y) dW1[x * size_input + y] // 20 * 196
#define db1(x, y) db1[x * 1 + y] // 20 * 1
#define dW2(x, y) dW2[x * size_hidden + y] // 2 * 20
#define db2(x, y) db2[x * 1 + y] // 2 * 1
#define Z1(x, y) Z1[x * size_batch + y] // 20 * 964
#define A1(x, y) A1[x * size_batch + y] // 20 * 964
#define Z2(x, y) Z2[x * size_batch + y] // 2 * 964
#define A2(x, y) A2[x * size_batch + y] // 2 * 964
#define dZ1(x, y) dZ1[x * size_batch + y] // 20 * 964
#define dA1(x, y) dA1[x * size_batch + y] // 20 * 964
#define dZ2(x, y) dZ2[x * size_batch + y] // 2 * 964
#define dA2(x, y) dA2[x * size_batch + y] // 2 * 964
#define max_index(x, y) max_index[y] // 1 * 964
#define dev_X_trn(x, y) dev_X_trn[x * size_train + y] // 196 * 964
#define dev_X_tst(x, y) dev_X_tst[x * size_test + y] // 196 * 414
#define dev_Y_trn(x, y) dev_Y_trn[x * size_train + y] // 1 * 964
#define dev_Y_tst(x, y) dev_Y_tst[x * size_test + y] // 1 * 414
#define dev_X(x, y) dev_X[x * size_batch + y] // 196 * 964
#define dev_Y(x, y) dev_Y[x * size_batch + y] // 1 * 414
#define dev_W1(x, y) dev_W1[x * size_input + y] // 20 * 196
#define dev_b1(x, y) dev_b1[x * 1 + y] // 20 * 1
#define dev_W2(x, y) dev_W2[x * size_hidden + y] // 2 * 20
#define dev_b2(x, y) dev_b2[x * 1 + y] // 2 * 1
#define dev_dW1(x, y) dev_dW1[x * size_input + y] // 20 * 196
#define dev_db1(x, y) dev_db1[x * 1 + y] // 20 * 1
#define dev_dW2(x, y) dev_dW2[x * size_hidden + y] // 2 * 20
#define dev_db2(x, y) dev_db2[x * 1 + y] // 2 * 1
#define dev_Z1(x, y) dev_Z1[x * size_batch + y] // 20 * 964
#define dev_A1(x, y) dev_A1[x * size_batch + y] // 20 * 964
#define dev_Z2(x, y) dev_Z2[x * size_batch + y] // 2 * 964
#define dev_A2(x, y) dev_A2[x * size_batch + y] // 2 * 964
#define dev_dZ1(x, y) dev_dZ1[x * size_batch + y] // 20 * 964
#define dev_dA1(x, y) dev_dA1[x * size_batch + y] // 20 * 964
#define dev_dZ2(x, y) dev_dZ2[x * size_batch + y] // 2 * 964
#define dev_dA2(x, y) dev_dA2[x * size_batch + y] // 2 * 964
#define dev_max_index(x, y) dev_max_index[y] // 1 * 964
#define size_train 964
#define size_test 414
#define size_input 196
#define size_hidden 20
#define size_output 2
#define size_X size_input*size_batch
#define size_Y size_batch
#define size_W1 size_hidden*size_input
#define size_b1 size_hidden*1
#define size_W2 size_output*size_hidden
#define size_b2 size_output*1
#define size_dW1 size_hidden*size_input
#define size_db1 size_hidden*1
#define size_dW2 size_output*size_hidden
#define size_db2 size_output*1
#define size_Z1 size_hidden*size_batch
#define size_A1 size_hidden*size_batch
#define size_Z2 size_output*size_batch
#define size_A2 size_output*size_batch
#define size_dZ1 size_hidden*size_batch
#define size_dA1 size_hidden*size_batch
#define size_dZ2 size_output*size_batch
#define size_dA2 size_output*size_batch
#define size_max_index 1*size_batch
#define size_dev_max_index 1*size_batch
int size_batch = 0;
int *Y_trn, *Y_tst, *max_index, *dev_Y, *dev_max_index;
double *X_trn, *X_tst, *X, *W1, *b1, *W2, *b2, *dW1, *db1, *dW2, *db2, *Z1, *A1, *Z2, *A2, *dZ1, *dA1, *dZ2, *dA2;
double *dev_X, *dev_W1, *dev_b1, *dev_W2, *dev_b2, *dev_dW1, *dev_db1, *dev_dW2, *dev_db2, *dev_Z1, *dev_A1, *dev_Z2, *dev_A2, *dev_dZ1, *dev_dA1, *dev_dZ2, *dev_dA2;
void read_X(string data_path, double* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
void read_Y(string data_path, int* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
/* Set the value and reading data */
void read_data()
{
X_trn = (double *) malloc(size_input*size_train * sizeof(double)); // 196*964
Y_trn = (int *) malloc(size_train * sizeof(int)); // 1*964
X_tst = (double *) malloc(size_input*size_test * sizeof(double)); // 196*414
Y_tst = (int *) malloc(size_train * sizeof(int)); // 1*414
string X_trn_path = "X_trn.csv"; // Defined the name of cvs file
string Y_trn_path = "Y_trn.csv";
string X_tst_path = "X_tst.csv";
string Y_tst_path = "Y_tst.csv";
read_X(X_trn_path, X_trn); //Execution
read_Y(Y_trn_path, Y_trn);
read_X(X_tst_path, X_tst);
read_Y(Y_tst_path, Y_tst);
}
/* init W b */
void initialize_Wb() {
W1 = (double *) malloc(size_W1*sizeof(double)); // 20*196
b1 = (double *) malloc(size_b1*sizeof(double)); // 20*1
W2 = (double *) malloc(size_W2*sizeof(double)); // 2*20
b2 = (double *) malloc(size_b2*sizeof(double)); // 2*1
dW1 = (double *) malloc(size_dW1*sizeof(double)); // 20*196
db1 = (double *) malloc(size_db1*sizeof(double)); // 20*1
dW2 = (double *) malloc(size_dW2*sizeof(double)); // 2*20
db2 = (double *) malloc(size_db2*sizeof(double)); // 2*1
default_random_engine e;
uniform_real_distribution<double> u(-1,1);
for (int i = 0; i < size_W1; i++) {
W1[i] = u(e);
}
for (int i = 0; i < size_W2; i++) {
W2[i] = u(e);
}
for (int i = 0; i < size_b1; i++) {
b1[i] = 0;
}
for (int i = 0; i < size_b2; i++) {
b2[i] = 0;
}
}
/* init Z and A in the host */
void initialize_ZA(int size_batch)
{
Z1 = (double *) malloc(size_Z1*sizeof(double)); // 20*964
A1 = (double *) malloc(size_A1*sizeof(double)); // 20*964
Z2 = (double *) malloc(size_Z2*sizeof(double)); // 2*964
A2 = (double *) malloc(size_A2*sizeof(double)); // 2*964
dZ1 = (double *) malloc(size_dZ1*sizeof(double)); // 20*964
dA1 = (double *) malloc(size_dA1*sizeof(double)); // 20*964
dZ2 = (double *) malloc(size_dZ2*sizeof(double)); // 2*964
dA2 = (double *) malloc(size_dA2*sizeof(double)); // 2*964
max_index = (int *) malloc(size_max_index*sizeof(int)); // 1*964
}
/* init Z and A in the device */
void initialize_dev_ZA(int size_batch)
{
cudaMalloc((void**)&dev_X, size_X * sizeof(double));
cudaMalloc((void**)&dev_Y, size_Y * sizeof(int));
cudaMalloc((void**)&dev_max_index, size_dev_max_index * sizeof(int));
cudaMalloc((void**)&dev_Z1, size_Z1 * sizeof(double));
cudaMalloc((void**)&dev_A1, size_A1 * sizeof(double));
cudaMalloc((void**)&dev_Z2, size_Z2 * sizeof(double));
cudaMalloc((void**)&dev_A2, size_A2 * sizeof(double));
}
/* free Z and A in the device */
void free_dev_ZA()
{
cudaFree(dev_X);
cudaFree(dev_Y);
cudaFree(dev_max_index);
cudaFree(dev_Z1);
cudaFree(dev_A1);
cudaFree(dev_Z2);
cudaFree(dev_A2);
}
/* init W and b in the device */
void initialize_dev_Wb()
{
cudaMalloc((void**)&dev_W1, size_W1 * sizeof(double));
cudaMalloc((void**)&dev_b1, size_b1 * sizeof(double));
cudaMalloc((void**)&dev_W2, size_W2 * sizeof(double));
cudaMalloc((void**)&dev_b2, size_b2 * sizeof(double));
cudaMalloc((void**)&dev_dW1, size_dW1 * sizeof(double));
cudaMalloc((void**)&dev_db1, size_db1 * sizeof(double));
cudaMalloc((void**)&dev_dW2, size_dW2 * sizeof(double));
cudaMalloc((void**)&dev_db2, size_db2 * sizeof(double));
}
/* free W and b in the device */
void free_dev_Wb()
{
cudaFree(dev_W1);
cudaFree(dev_b1);
cudaFree(dev_W2);
cudaFree(dev_b2);
cudaFree(dev_dW1);
cudaFree(dev_db1);
cudaFree(dev_dW2);
cudaFree(dev_db2);
}
/* init dZ and dA in the host */
void initialize_dev_dZA(int size_batch)
{
cudaMalloc((void**)&dev_dZ1, size_dZ1 * sizeof(double));
cudaMalloc((void**)&dev_dA1, size_dA1 * sizeof(double));
cudaMalloc((void**)&dev_dZ2, size_dZ2 * sizeof(double));
cudaMalloc((void**)&dev_dA2, size_dA2 * sizeof(double));
}
/* free dZ and dA in the device */
void free_dev_dZA()
{
cudaFree(dev_dZ1);
cudaFree(dev_dA1);
cudaFree(dev_dZ2);
cudaFree(dev_dA2);
}
__global__ void HiddenLayer_Sigmoid(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = 1 / (1 + exp(0 - dev_Z1(i,j)));
}
__global__ void HiddenLayer_ReLU(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = dev_Z1(i,j) * (dev_Z1(i,j) > 0);
}
__global__ void OutputLayer(double* dev_A1, double* dev_W2, double* dev_b2, double* dev_Z2, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W2(i,k) * dev_A1(k,j);
dev_Z2(i,j) = partial + dev_b2(i,0);
}
// parallel for column part
__global__ void Softmax(double* dev_Z2, double* dev_A2, int* dev_max_index, int size_batch, int max_row, int max_col)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(j >= max_col)
return;
double max = dev_Z2(0, j), sum = 0;
dev_max_index[j] = 1;
for (int i = 1; i < max_row; i++) {
if (dev_Z2(i, j) > max){
max = dev_Z2(i, j);
dev_max_index[j] = 0;
}
}
for (int i = 0; i < max_row; i++)
sum += exp(dev_Z2(i, j));
for (int i = 0; i < max_row; i++)
dev_A2(i, j) = exp(dev_Z2(i, j)) / sum;
}
__global__ void Back_dZ2 (double* dev_A2, int* dev_Y_trn, double* dev_dZ2, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ2(0, j) = (dev_A2(0, j) - dev_Y_trn(0, j)) / size_batch;
dev_dZ2(1, j) = (dev_Y_trn(0, j) - dev_A2(0, j)) / size_batch;
}
// dW1(20*196) = dZ1(20*964) * X(196*964)
// dW2(2*20) = dZ2(2*964) * A1(20*964)
__global__ void Back_dW (double* dev_A, double* dev_dZ, double* dev_dW, int size_batch, int W_col, int max_row, int max_col)
{
int k;
double tmp = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < size_batch; k++)
tmp += dev_dZ[i*size_batch+k] * dev_A[j*size_batch+k];
dev_dW[i*W_col+j] = tmp;
}
// db1(20*1) is from dZ1(20*964)
// db2(2*1) is from dZ1(2*964)
__global__ void Back_db(double* dev_dZ, double* dev_db, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
if(i >= max_row)
return;
double tmp = 0;
for(int j = 0; j < max_col; j++) {
tmp += dev_dZ[i*size_batch+j];
}
dev_db[i*1+0] = tmp;
}
__global__ void Back_dA1 (double* dev_W2, double* dev_dZ2, double* dev_dA1, int size_batch, int K, int max_row, int max_col)
{
// dA1(20*964) = dZ2(2*964) * W2(2*20)
int k;
double partial = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < K; k++)
partial += dev_W2(k,i) * dev_dZ2(k,j);
dev_dA1(i,j) = partial;
}
__global__ void Back_dZ1_Sigmoid (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ1(i, j) = dev_dA1(i, j) * dev_A1(i, j) * (1-dev_A1(i, j)); // dZ1 = dA1*A1*(1-A1)
}
__global__ void Back_dZ1_ReLU (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
if(dev_Z1(i, j) < 0)
dev_dZ1(i, j) = 0;
else
dev_dZ1(i, j) = dev_dA1(i, j); //dZ1 = dA1*Z1_mask
}
__global__ void update_Wb(double* dev_dWb, double* dev_Wb, int col, double learn_rate, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
dev_Wb[i*col+j] = dev_Wb[i*col+j] - learn_rate * dev_dWb[i*col+j];
}
/* forward to calculate A Z preY */
void forward(double* X, int* Y, string type, int acti_type, int block_size){
if(type == "train"){
size_batch = size_train;
}
else{
size_batch = size_test;
}
// init Z and A in the host
initialize_ZA(size_batch);
// init X Y W b Z A in the device
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// hidden layer and activation function to get Z1 and A1
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden+ dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W1, W1, size_W1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b1, b1, size_b1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_X, X, size_X * sizeof(double), cudaMemcpyHostToDevice);
if(acti_type == 1)
HiddenLayer_Sigmoid<<<dimGrid1, dimBlock>>>(dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
HiddenLayer_ReLU<<<dimGrid1, dimBlock>>>(dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
cudaMemcpy(Z1, dev_Z1, size_Z1 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(A1, dev_A1, size_A1 * sizeof(double), cudaMemcpyDeviceToHost);
// output layer to get Z2
dim3 dimGrid2((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b2, b2, size_b2 * sizeof(double), cudaMemcpyHostToDevice);
OutputLayer<<<dimGrid2, dimBlock>>>(dev_A1, dev_W2, dev_b2, dev_Z2, size_hidden, size_batch, size_output, size_batch);
cudaMemcpy(Z2, dev_Z2, size_Z2 * sizeof(double), cudaMemcpyDeviceToHost);
// softmax layer to get A2 and max_index
dim3 dimGrid3((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
Softmax<<<dimGrid3, dimBlock>>>(dev_Z2, dev_A2, dev_max_index, size_batch, size_output, size_batch);
cudaMemcpy(A2, dev_A2, size_A2 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(max_index, dev_max_index, size_max_index * sizeof(int), cudaMemcpyDeviceToHost);
free_dev_ZA();
}
/* calculate loss */
double cross_entropy_loss(int* Y, double* A2, int col)
{
double loss = 0;
for(int c = 0; c < col; c++) {
loss += -log(A2(0, c)) * Y(0, c) - log(A2(1, c)) * (1-Y(0, c));
}
return loss/col;
}
/* backward to calculate dW db */
void backprop(double* X, int* Y, int acti_type, int block_size) { // type = 1 is Sigmoid
size_batch = size_train;
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// get dZ2
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (1 + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_A2, A2, size_A2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Y, Y, size_Y * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dZ2, dZ2, size_dZ2 * sizeof(double), cudaMemcpyHostToDevice);
Back_dZ2<<<dimGrid1, dimBlock>>>(dev_A2, dev_Y, dev_dZ2, size_batch, 1, size_batch);
cudaMemcpy(dZ2, dev_dZ2, size_dZ2 * sizeof(double), cudaMemcpyDeviceToHost);
// get dw2
dim3 dimGrid2((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_A1, A1, size_A1 * sizeof(double), cudaMemcpyHostToDevice);
Back_dW<<<dimGrid2, dimBlock>>>(dev_A1, dev_dZ2, dev_dW2, size_batch, size_hidden, size_output, size_hidden);
cudaMemcpy(dW2, dev_dW2, size_dW2 * sizeof(double), cudaMemcpyDeviceToHost);
// get db2
dim3 dimGrid3((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
Back_db<<<dimGrid3, dimBlock>>>(dev_dZ2, dev_db2, size_batch, size_output, size_batch);
cudaMemcpy(db2, dev_db2, size_db2 * sizeof(double), cudaMemcpyDeviceToHost);
// get dA1
dim3 dimGrid4((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice);
Back_dA1<<<dimGrid4, dimBlock>>> (dev_W2, dev_dZ2, dev_dA1, size_batch, size_output, size_hidden, size_batch);
cudaMemcpy(dA1, dev_dA1, size_dA1 * sizeof(double), cudaMemcpyDeviceToHost);
// get dZ1
dim3 dimGrid5((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_A1, A1, size_A1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Z1, Z1, size_Z1 * sizeof(double), cudaMemcpyHostToDevice);
if(acti_type == 1)
Back_dZ1_Sigmoid<<<dimGrid5, dimBlock>>>(dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
Back_dZ1_ReLU<<<dimGrid5, dimBlock>>>(dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
cudaMemcpy(dZ1, dev_dZ1, size_dZ1 * sizeof(double), cudaMemcpyDeviceToHost);
// get dW1
dim3 dimGrid6((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_X, X, size_X * sizeof(double), cudaMemcpyHostToDevice);
Back_dW<<<dimGrid6, dimBlock>>>(dev_X, dev_dZ1, dev_dW1, size_batch, size_input, size_hidden, size_input);
cudaMemcpy(dW1, dev_dW1, size_dW1 * sizeof(double), cudaMemcpyDeviceToHost);
// get b1
dim3 dimGrid7((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
Back_db<<<dimGrid7, dimBlock>>>(dev_dZ1, dev_db1, size_batch, size_hidden, size_batch);
cudaMemcpy(db1, dev_db1, size_db1 * sizeof(double), cudaMemcpyDeviceToHost);
// free ZA on device
free_dev_ZA();
}
/* update W b */
void updateParameter(double learn_rate, int block_size)
{
dim3 dimBlock(block_size, block_size);
// update w1
cudaMemcpy(dev_dW1, dW1, size_dW1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W1, W1, size_W1 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid1((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid1, dimBlock>>>(dev_dW1, dev_W1, size_input, learn_rate, size_hidden, size_input);
cudaMemcpy(W1, dev_W1, size_W1 * sizeof(double), cudaMemcpyDeviceToHost);
// update b1
cudaMemcpy(dev_db1, db1, size_db1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b1, b1, size_b1 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid2((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid2, dimBlock>>>(dev_db1, dev_b1, 1, learn_rate, size_hidden, 1);
cudaMemcpy(b1, dev_b1, size_b1 * sizeof(double), cudaMemcpyDeviceToHost);
// update w2
cudaMemcpy(dev_dW2, dW2, size_dW2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid3((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid3, dimBlock>>>(dev_dW2, dev_W2, size_hidden, learn_rate, size_output, size_hidden);
cudaMemcpy(W2, dev_W2, size_W2 * sizeof(double), cudaMemcpyDeviceToHost);
// update b2
cudaMemcpy(dev_db2, db2, size_db2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b2, b2, size_b2 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid4((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid4, dimBlock>>>(dev_db2, dev_b2, 1, learn_rate, size_output, 1);
cudaMemcpy(b2, dev_b2, size_b2 * sizeof(double), cudaMemcpyDeviceToHost);
}
double accuracy(int* max_index, int* Y, int size_batch)
{
int i;
double count = 0;
for(i = 0; i < size_batch; i++) {
if(Y(0, i) == max_index(0, i))
count += 1;
}
return count/double(size_batch);
}
void train(double* X_trn, int* Y_trn, int acti_type, int block_size)
{
forward(X_trn, Y_trn, "train", acti_type, block_size);
backprop(X_trn, Y_trn, acti_type, block_size); // 1 Sigmoid 2 ReLU
updateParameter(0.01, block_size);
}
double test(double* X, int* Y, string type, int acti_type, int block_size)
{
forward(X, Y, type, acti_type, block_size);
if(type == "train")
return accuracy(max_index, Y, size_train);
else
return accuracy(max_index, Y, size_test);
}
int main(int argc, char *argv[])
{
int block_size;
int epochs = 20000;
int acti_type;
double acc_trn, acc_tst;
if ( argc < 3 ){
printf(" Usage: first argument: dimension of square matrix \n");
printf(" second argument: size of CUDA block \n");
return -1;
} else if ( argc > 3 ) {
printf("\n Too many arguments. \n");
return -1;
} else {
block_size = atoi(argv[1]);
acti_type = atoi(argv[2]);
}
initialize_Wb();
initialize_dev_Wb();
initialize_dev_dZA(size_train);
read_data();
float elapsed_time = 0.0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int e = 0; e < epochs; e++) {
train(X_trn, Y_trn, acti_type, block_size);
// double loss = cross_entropy_loss(Y_trn, A2, size_train);
// printf("%f\n", loss);
// printf("the %d epoch, the training loss is: %f \n", e, loss);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "Elapsed Time: %.4e msec. \n", elapsed_time );
acc_trn = test(X_trn, Y_trn, "train", acti_type, block_size);
acc_tst = test(X_tst, Y_tst, "test", acti_type, block_size);
printf("the training accuracy is: %f, the test accuracy is: %f\n", acc_trn, acc_tst);
free_dev_Wb();
free_dev_dZA();
} |
24,016 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void isinblock_kernel(float *xy, float x_low, float y_low, float size, bool *is_block, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
while (idx < N){
float x = xy[idx*2];
float y = xy[idx*2+1];
float x_up = x_low+size;
float y_up = y_low+size;
if (x>x_low && x<x_up && y>y_low && y<y_up)
is_block[idx] = true;
idx += blockDim.x*gridDim.x;
}
}
extern "C" void is_in_block(float *xy_host, bool *is_block_host, float &x_low, float &y_low, float &size, int &N){
float *xy;
bool *is_block;
cudaError_t error;
cudaMalloc((void**)&xy, sizeof(float)* N*2);
cudaMalloc((void**)&is_block, sizeof(bool)* N);
cudaMemcpy(xy, xy_host, sizeof(float)* N*2, cudaMemcpyHostToDevice);
cudaMemcpy(is_block, is_block_host, sizeof(bool)* N, cudaMemcpyHostToDevice);
isinblock_kernel<<<32786, 256>>>(xy, x_low, y_low, size, is_block, N);
error = cudaDeviceSynchronize();
if(error != cudaSuccess){
printf("code: %d, reason: %s\n",error,cudaGetErrorString(error));
}
cudaMemcpy(is_block_host, is_block, sizeof(float)*N, cudaMemcpyDeviceToHost);
cudaFree(xy);
cudaFree(is_block);
}
|
24,017 | #include <stdio.h>
#include <math.h>
#include <png.h>
#include <string.h>
#include <stdlib.h>
#define MAX_ITERATION 100
#define CEIL(a, b) (((a) + (b) - 1)/(b))
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration, float *buffer);
int writeImage(const char* filename, int width, int height, float *buffer, const char* title);
static void setRGB(png_byte *ptr, float val);
#define CEIL(a, b) (((a) + (b) - 1)/(b))
/* ./exec <min_real> <min_imag> <max_real> <max_imag> <W> <H> <CPU/GPU> <Treadhs> <Saida> */
int main(int argc, char *argv[]){
if(argc != 10){
printf("Please specify output file\n");
return 1;
}
/* Set variables */
float min_real = atof(argv[1]);
float min_imag = atof(argv[2]);
float max_real = atof(argv[3]);
float max_imag = atof(argv[4]);
int width = atoi(argv[5]);
int height = atoi(argv[6]);
char accelerator[50];
strcpy(accelerator, argv[7]);
int threads = atoi(argv[8]);
char file_name[50];
strcpy(file_name, argv[9]);
int iteration = MAX_ITERATION;
float *buffer;
float *d_buffer;
/* Aloco no PC */
buffer = (float*)malloc(width*height*sizeof(float));
/* Aloco na placa de video */
cudaMalloc(&d_buffer, width*height*sizeof(float));
/* Setar os bocos para trabalhar na placa de video */
int threads_per_block = threads;
int num_blocks = CEIL((width*height), threads_per_block);
/* Se eu chamar a funcao em cuda */
kernel<<<num_blocks, threads_per_block>>>(width, height, min_real, min_imag, max_imag, max_real, iteration, d_buffer);
/* Passei N blocos, sendo uma x threads por bloco */
cudaDeviceSynchronize();
/* Pego o buffer da Placa de video e trago para o PC */
cudaMemcpy(buffer, d_buffer, width*height*sizeof(float), cudaMemcpyDeviceToHost);
/* So printa a imagem */
printf("Saving PNG\n");
int result = writeImage(file_name, width, height, buffer, "MANDELBROT");
/* Free no buffer na placa de video */
cudaFree(buffer);
/* Free no buffer no PC */
free(buffer);
return 0;
}
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int iteration_max, float *buffer){
int index_vector = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = index_vector / width;
int j = index_vector % width;
float del_x = (max_real - min_real)/width;
float del_y= (max_imag - min_imag)/height;
float z_y = 0.0;
float z_x = 0.0;
int iteration = 0;
float xtemp;
float x_point = min_real + del_x * j;
float y_point = max_imag - del_y * i;
while((((z_x * z_x) + (z_y * z_y)) <= 4) && (iteration < iteration_max)){
xtemp = ((z_x * z_x) - (z_y * z_y)+x_point);
z_y = (2.0 * z_x * z_y)+ y_point;
z_x = xtemp;
iteration++;
}
buffer[index_vector] = iteration;
};
__host__ static void setRGB(png_byte *ptr, float val){
int v = 255 - (int)(val/MAX_ITERATION) * 255;
if(v == 0){
ptr[0] = v;
ptr[1] = v;
ptr[2] = v;
}else{
if(val < 10){
ptr[0] = 192;
ptr[1] = 217;
ptr[2] = 217;
}else if(val < 15){
ptr[0] = 95;
ptr[1] = 159;
ptr[2] = 159;
}else if(val < 25){
ptr[0] = 0;
ptr[1] = 255;
ptr[2] = 255;
}else if(val < 50){
ptr[0] = 255;
ptr[1] = 0;
ptr[2] = 255;
}else if(val < 75){
ptr[0] = 234;
ptr[1] = 173;
ptr[2] = 234;
}else{
ptr[0] = 79;
ptr[1] = 47;
ptr[2] = 79;
}
}
};
__host__ int writeImage(const char* filename, int width, int height, float *buffer, const char* title){
int code = 0;
FILE *fp = NULL;
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_bytep row = NULL;
// Open file for writing (binary mode)
fp = fopen(filename, "wb");
if (fp == NULL) {
printf( "Could not open file %s for writing\n", filename);
code = 1;
goto finalise;
}
// Initialize write structure
png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (png_ptr == NULL) {
printf( "Could not allocate write struct\n");
code = 1;
goto finalise;
}
// Initialize info structure
info_ptr = png_create_info_struct(png_ptr);
if (info_ptr == NULL) {
printf( "Could not allocate info struct\n");
code = 1;
goto finalise;
}
// Setup Exception handling
if (setjmp(png_jmpbuf(png_ptr))) {
printf( "Error during png creation\n");
code = 1;
goto finalise;
}
png_init_io(png_ptr, fp);
// Write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, width, height,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info_ptr);
// Allocate memory for one row (3 bytes per pixel - RGB)
row = (png_bytep) malloc(3 * width * sizeof(png_byte));
// Write image data
int x, y;
for (y=0 ; y<height ; y++) {
for (x=0 ; x<width ; x++) {
setRGB(&(row[x*3]), buffer[y*width + x]);
}
png_write_row(png_ptr, row);
}
// End write
png_write_end(png_ptr, NULL);
finalise:
if (fp != NULL) fclose(fp);
if (info_ptr != NULL) png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
if (png_ptr != NULL) png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
if (row != NULL) free(row);
return code;
};
|
24,018 |
__global__ void self_initialization(){
}
|
24,019 | __global__
void iterateKernel(int w, int h, int maxIterations, double xOrigin, double yOrigin, double zoomFactor, int* result) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int p = index; p < w * h; p += stride) {
// deliniarize
int i = p / w;
int j = p % w;
// convert to complex number
double cx = xOrigin - (2 / zoomFactor) * (1 - 2 * ((double) j / w));
double cy = yOrigin - (2 / zoomFactor) * (1 - 2 * ((double) (i+(w-h)/2) / w));
// do the iterations
double zx = cx;
double zy = cy;
double tx;
double ty;
bool inMandelbrot = true;
for(int k = 0; k < maxIterations; ++ k)
{
if(zx * zx + zy * zy > 4) {
result[i*w+j] = 255 * (1 - (double) k / maxIterations);
inMandelbrot = false;
break;
}
tx = zx * zx - zy * zy + cx;
ty = 2 * zx * zy + cy;
zx = tx;
zy = ty;
}
if(inMandelbrot)
result[i*w+j] = 0;
}
}
extern "C"
int* iterateGPU(int w, int h, int maxIterations, double xOrigin, double yOrigin, double zoomFactor) {
int* resultOnGPU;
cudaMalloc(&resultOnGPU, w * h * sizeof(int));
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int blockSize = deviceProp.maxThreadsPerBlock;
int numBlocks = (w * h - 1) / blockSize + 1;
iterateKernel<<<numBlocks, blockSize>>>(w, h, maxIterations, xOrigin, yOrigin, zoomFactor, resultOnGPU);
cudaDeviceSynchronize();
auto result = (int*) malloc(w * h * sizeof(int));
cudaMemcpy(result, resultOnGPU, w * h * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(resultOnGPU);
return result;
}
|
24,020 | #include <stdio.h>
#define N 10000000000
__global__ void add_gpu(float *a, float *b, float *c) {
long long tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void) {
float a[N], b[N], c[N];
float *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N * sizeof(float));
cudaMalloc((void**)&dev_b, N * sizeof(float));
cudaMalloc((void**)&dev_c, N * sizeof(float));
for (long long i=0; i<N; i++) {
a[i] = -i;
b[i] = i*3;
}
cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
add_gpu<<<N, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(float), cudaMemcpyDeviceToHost);
// for (int i=0; i<N; i++) {
// printf("%d + %d = %d\n", a[i], b[i], c[i]);
// }
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
24,021 | #include <stdio.h>
#include <cuda.h>
#define CUDA_CHECK(value, label) { \
cudaError_t c = (value); \
if (c != cudaSuccess) { \
fprintf(stderr, \
"Error: '%s' at line %d in %s\n", \
cudaGetErrorString(c),__LINE__,__FILE__); \
goto label; \
} }
static __global__ void prefix_scan_device(float *in, float *out, int size) {
// Do CUDA stuff
}
void prefix_scan(float *in, float *out, int size) {
float *d_in=0, *d_out=0;
CUDA_CHECK(cudaMalloc(&d_in, size * sizeof(float)), cuda_error)
CUDA_CHECK(cudaMalloc(&d_out, size * sizeof(float)), cuda_error)
CUDA_CHECK(cudaMemcpy(d_in, in, size * sizeof(float), cudaMemcpyHostToDevice), cuda_error)
prefix_scan_device<<<128, 1>>>(d_in, d_out, size);
CUDA_CHECK(cudaMemcpy(out, d_out, size * sizeof(float), cudaMemcpyDeviceToHost), cuda_error)
cuda_error:
if(d_in) cudaFree(d_in);
if(d_out) cudaFree(d_out);
}
|
24,022 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <string.h> /* needed library to use strcmp function */
#include <cuda.h>
typedef struct {
float posx;
float posy;
float range;
float temp;
} heatsrc_t;
typedef struct {
unsigned maxiter; /* maximum number of iterations */
unsigned resolution; /* spatial resolution */
int algorithm; /* 0=>Jacobi, 1=>Gauss */
unsigned visres; /* visualization resolution */
float *u, *uhelp;
float *uvis;
unsigned numsrcs; /* number of heat sources */
heatsrc_t *heatsrcs;
} algoparam_t;
/* function declarations */
int read_input(FILE *infile, algoparam_t *param);
void print_params(algoparam_t *param);
int initialize(algoparam_t *param);
int finalize(algoparam_t *param);
void write_image(FILE * f, float *u, unsigned sizex, unsigned sizey);
int coarsen(float *uold, unsigned oldx, unsigned oldy, float *unew,
unsigned newx, unsigned newy);
__device__ float dev_residual = 0.0;
__global__ void gpu_Heat(float *h, float *g, int N) {
/* In this case, the stride is N */
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
/* Assert if current thread belongs to first row or column of input matrix */
bool is_first_row = threadIdx.y == 0 && blockIdx.y == 0;
bool is_first_col = threadIdx.x == 0 && blockIdx.x == 0;
if (i < N - 1 && j < N - 1 && !is_first_row && !is_first_col) {
int pos = i * N + j;
g[pos] = 0.25 * (h[pos - 1] + h[pos - N] + h[pos + 1] + h[pos + N]);
float diff = g[pos] - h[pos];
atomicAdd(&dev_residual, diff * diff);
}
}
#define NB 8
#define min(a,b) ( ((a) < (b)) ? (a) : (b) )
float cpu_residual(float *u, float *utmp, unsigned sizex, unsigned sizey) {
float diff, sum = 0.0;
for (int i = 1; i < sizex - 1; i++)
for (int j = 1; j < sizey - 1; j++) {
diff = utmp[i * sizey + j] - u[i * sizey + j];
sum += diff * diff;
}
return (sum);
}
float cpu_jacobi(float *u, float *utmp, unsigned sizex, unsigned sizey) {
float diff, sum = 0.0;
int nbx, bx, nby, by;
nbx = NB;
bx = sizex / nbx;
nby = NB;
by = sizey / nby;
for (int ii = 0; ii < nbx; ii++)
for (int jj = 0; jj < nby; jj++)
for (int i = 1 + ii * bx; i <= min((ii+1)*bx, sizex-2); i++)
for (int j = 1 + jj * by; j <= min((jj+1)*by, sizey-2); j++) {
utmp[i * sizey + j] = 0.25 * (u[i * sizey + (j - 1)] + /* left */
u[i * sizey + (j + 1)] + /* right */
u[(i - 1) * sizey + j] + /* top */
u[(i + 1) * sizey + j]); /* bottom */
diff = utmp[i * sizey + j] - u[i * sizey + j];
sum += diff * diff;
}
return (sum);
}
void usage(char *s) {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", s);
fprintf(stderr,
" -t number of threads per block in each dimension (e.g. 16)\n");
}
int main(int argc, char *argv[]) {
unsigned iter;
FILE *infile, *resfile;
char *resfilename;
/* algorithmic parameters */
algoparam_t param;
int np;
/* check arguments */
if (argc < 4) {
usage(argv[0]);
return 1;
}
/* check input file */
if (!(infile = fopen(argv[1], "r"))) {
fprintf(stderr, "\nError: Cannot open \"%s\" for reading.\n\n",
argv[1]);
usage(argv[0]);
return 1;
}
/* check result file */
resfilename = "heat.ppm";
if (!(resfile = fopen(resfilename, "w"))) {
fprintf(stderr, "\nError: Cannot open \"%s\" for writing.\n\n",
resfilename);
usage(argv[0]);
return 1;
}
/* check input */
if (!read_input(infile, ¶m)) {
fprintf(stderr, "\nError: Error parsing input file.\n\n");
usage(argv[0]);
return 1;
}
/* full size (param.resolution are only the inner points) */
np = param.resolution + 2;
int Grid_Dim, Block_Dim; /* Grid and Block structure values */
if (strcmp(argv[2], "-t") == 0) {
Block_Dim = atoi(argv[3]);
Grid_Dim = np / Block_Dim + ((np % Block_Dim) != 0);
;
if ((Block_Dim * Block_Dim) > 512) {
printf("Error -- too many threads in block, try again\n");
return 1;
}
} else {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n",
argv[0]);
fprintf(stderr,
" -t number of threads per block in each dimension (e.g. 16)\n");
return 0;
}
fprintf(stderr, "\nSolving Heat equation on the CPU and the GPU\n");
fprintf(stderr, "--------------------------------------------\n");
print_params(¶m);
fprintf(stdout,
"\nExecution on CPU (sequential)\n-----------------------------\n");
if (!initialize(¶m)) {
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
/* starting time */
float elapsed_time_ms; /* which is applicable for asynchronous code also */
cudaEvent_t start, stop; /* using cuda events to measure time */
cudaEventCreate(&start); /* instrument code to measure start time */
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
iter = 0;
float residual;
while (1) {
residual = cpu_jacobi(param.u, param.uhelp, np, np);
float * tmp = param.u;
param.u = param.uhelp;
param.uhelp = tmp;
iter++;
/* solution good enough ? */
if (residual < 0.00005)
break;
/* max. iteration reached ? (no limit with maxiter=0) */
if (iter >= param.maxiter)
break;
}
cudaEventRecord(stop, 0); /* instrument code to measue end time */
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
/* Flop count after iter iterations */
float flop = iter * 11.0 * param.resolution * param.resolution;
fprintf(stdout, "Time on CPU in ms.= %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n", flop / 1000000000.0,
flop / elapsed_time_ms / 1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual,
iter);
finalize(¶m);
fprintf(stdout, "\nExecution on GPU\n----------------\n");
fprintf(stderr, "Number of threads per block in each dimension = %d\n",
Block_Dim);
fprintf(stderr, "Number of blocks per grid in each dimension = %d\n",
Grid_Dim);
if (!initialize(¶m)) {
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
dim3 Grid(Grid_Dim, Grid_Dim);
dim3 Block(Block_Dim, Block_Dim);
/* starting time */
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
float *dev_u, *dev_uhelp;
/* Allocation on GPU for matrices u and uhelp */
size_t size = np * np * sizeof(float);
cudaMalloc(&dev_u, size);
cudaMalloc(&dev_uhelp, size);
/* Copy initial values in u and uhelp from host to GPU */
cudaMemcpy(dev_u, param.u, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_uhelp, param.uhelp, size, cudaMemcpyHostToDevice);
iter = 0;
residual = 0.0;
float residual_aux = 0.0, residual_acc;
while (1) {
if (iter > 0) {
residual_aux = residual_acc;
}
gpu_Heat<<<Grid, Block>>>(dev_u, dev_uhelp, np);
cudaThreadSynchronize(); /* wait for all threads to complete */
/* residual is computed on host, we need to get from GPU values computed in u and uhelp */
/* cudaMemcpy(param.u, dev_u, size, cudaMemcpyDeviceToHost); */
/* cudaMemcpy(param.uhelp, dev_uhelp, size, cudaMemcpyDeviceToHost); */
/* residual = cpu_residual (param.u, param.uhelp, np, np); */
/* printf("Residual computation on GPU is ==> %f\n", residual); */
/* Get the residual computation from global memory (on device)
* and copy it into residual variable (host memory)
*/
cudaMemcpyFromSymbol(&residual_acc, dev_residual, sizeof(float), 0, cudaMemcpyDeviceToHost);
/* printf("Acumulate Residual computation on GPU is ==> %f\n", residual_acc); */
/* Update the value of residual */
residual = residual_acc - residual_aux;
printf("Residual computation on GPU is ==> %f\n", residual);
float * tmp = dev_u;
dev_u = dev_uhelp;
dev_uhelp = tmp;
iter++;
/* solution good enough ? */
if (residual < 0.00005)
break;
/* max. iteration reached ? (no limit with maxiter=0) */
if (iter >= param.maxiter)
break;
}
/* get result matrix from GPU */
cudaMemcpy(param.u, dev_u, size, cudaMemcpyDeviceToHost);
cudaMemcpy(param.uhelp, dev_uhelp, size, cudaMemcpyDeviceToHost);
/* free memory used in GPU */
cudaFree(dev_u);
cudaFree(dev_uhelp);
cudaEventRecord( stop, 0 ); /* instrument code to measue end time */
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
fprintf(stdout, "\nTime on GPU in ms. = %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n", flop / 1000000000.0,
flop / elapsed_time_ms / 1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual,
iter);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* for plot... */
coarsen(param.u, np, np, param.uvis, param.visres + 2, param.visres + 2);
write_image(resfile, param.uvis, param.visres + 2, param.visres + 2);
finalize(¶m);
return 0;
}
/*
* Initialize the iterative solver
* - allocate memory for matrices
* - set boundary conditions according to configuration
*/
int initialize(algoparam_t *param)
{
int i, j;
float dist;
/* total number of points (including border) */
const int np = param->resolution + 2;
/* allocate memory */
(param->u) = (float*) calloc(sizeof(float), np * np);
(param->uhelp) = (float*) calloc(sizeof(float), np * np);
(param->uvis) = (float*) calloc(sizeof(float),
(param->visres + 2) * (param->visres + 2));
if (!(param->u) || !(param->uhelp) || !(param->uvis)) {
fprintf(stderr, "Error: Cannot allocate memory\n");
return 0;
}
for (i = 0; i < param->numsrcs; i++) {
/* top row */
for (j = 0; j < np; j++) {
dist = sqrt(
pow((float) j / (float) (np - 1) - param->heatsrcs[i].posx,
2) + pow(param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j] += (param->heatsrcs[i].range - dist)
/ param->heatsrcs[i].range * param->heatsrcs[i].temp;
}
}
/* bottom row */
for (j = 0; j < np; j++) {
dist = sqrt(
pow((float) j / (float) (np - 1) - param->heatsrcs[i].posx,
2) + pow(1 - param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[(np - 1) * np + j] += (param->heatsrcs[i].range
- dist) / param->heatsrcs[i].range
* param->heatsrcs[i].temp;
}
}
/* leftmost column */
for (j = 1; j < np - 1; j++) {
dist = sqrt(
pow(param->heatsrcs[i].posx, 2)
+ pow(
(float) j / (float) (np - 1)
- param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j * np] += (param->heatsrcs[i].range - dist)
/ param->heatsrcs[i].range * param->heatsrcs[i].temp;
}
}
/* rightmost column */
for (j = 1; j < np - 1; j++) {
dist = sqrt(
pow(1 - param->heatsrcs[i].posx, 2)
+ pow(
(float) j / (float) (np - 1)
- param->heatsrcs[i].posy, 2));
if (dist <= param->heatsrcs[i].range) {
(param->u)[j * np + (np - 1)] += (param->heatsrcs[i].range
- dist) / param->heatsrcs[i].range
* param->heatsrcs[i].temp;
}
}
}
/* Copy u into uhelp */
float *putmp, *pu;
pu = param->u;
putmp = param->uhelp;
for (j = 0; j < np; j++)
for (i = 0; i < np; i++)
*putmp++ = *pu++;
return 1;
}
/* free used memory */
int finalize(algoparam_t *param) {
if (param->u) {
free(param->u);
param->u = 0;
}
if (param->uhelp) {
free(param->uhelp);
param->uhelp = 0;
}
if (param->uvis) {
free(param->uvis);
param->uvis = 0;
}
return 1;
}
/*
* write the given temperature u matrix to rgb values
* and write the resulting image to file f
*/
void write_image(FILE * f, float *u, unsigned sizex, unsigned sizey) {
/* RGB table */
unsigned char r[1024], g[1024], b[1024];
int i, j, k;
float min, max;
j = 1023;
/* prepare RGB table */
for (i = 0; i < 256; i++) {
r[j] = 255;
g[j] = i;
b[j] = 0;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 255 - i;
g[j] = 255;
b[j] = 0;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 0;
g[j] = 255;
b[j] = i;
j--;
}
for (i = 0; i < 256; i++) {
r[j] = 0;
g[j] = 255 - i;
b[j] = 255;
j--;
}
min = DBL_MAX;
max = -DBL_MAX;
/* find minimum and maximum */
for (i = 0; i < sizey; i++) {
for (j = 0; j < sizex; j++) {
if (u[i * sizex + j] > max)
max = u[i * sizex + j];
if (u[i * sizex + j] < min)
min = u[i * sizex + j];
}
}
fprintf(f, "P3\n");
fprintf(f, "%u %u\n", sizex, sizey);
fprintf(f, "%u\n", 255);
for (i = 0; i < sizey; i++) {
for (j = 0; j < sizex; j++) {
k = (int) (1024.0 * (u[i * sizex + j] - min) / (max - min));
fprintf(f, "%d %d %d ", r[k], g[k], b[k]);
}
fprintf(f, "\n");
}
}
int coarsen(float *uold, unsigned oldx, unsigned oldy, float *unew,
unsigned newx, unsigned newy) {
int i, j;
int stepx;
int stepy;
int stopx = newx;
int stopy = newy;
if (oldx > newx)
stepx = oldx / newx;
else {
stepx = 1;
stopx = oldx;
}
if (oldy > newy)
stepy = oldy / newy;
else {
stepy = 1;
stopy = oldy;
}
/* NOTE: this only takes the top-left corner,
* and doesnt' do any real coarsening
*/
for (i = 0; i < stopy - 1; i++) {
for (j = 0; j < stopx - 1; j++) {
unew[i * newx + j] = uold[i * oldx * stepy + j * stepx];
}
}
return 1;
}
#define BUFSIZE 100
int read_input(FILE *infile, algoparam_t *param) {
int i, n;
char buf[BUFSIZE];
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->maxiter));
if (n != 1)
return 0;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->resolution));
if (n != 1)
return 0;
param->visres = param->resolution;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->numsrcs));
if (n != 1)
return 0;
(param->heatsrcs) = (heatsrc_t*) malloc(
sizeof(heatsrc_t) * (param->numsrcs));
for (i = 0; i < param->numsrcs; i++) {
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%f %f %f %f", &(param->heatsrcs[i].posx),
&(param->heatsrcs[i].posy), &(param->heatsrcs[i].range),
&(param->heatsrcs[i].temp));
if (n != 4)
return 0;
}
return 1;
}
void print_params(algoparam_t *param) {
int i;
fprintf(stdout, "Iterations : %u\n", param->maxiter);
fprintf(stdout, "Resolution : %u\n", param->resolution);
fprintf(stdout, "Num. Heat sources : %u\n", param->numsrcs);
for (i = 0; i < param->numsrcs; i++) {
fprintf(stdout, " %2d: (%2.2f, %2.2f) %2.2f %2.2f \n", i + 1,
param->heatsrcs[i].posx, param->heatsrcs[i].posy,
param->heatsrcs[i].range, param->heatsrcs[i].temp);
}
}
|
24,023 | #include <stdio.h>
int main()
{
int NbDevice = 0;
if (cudaSuccess != cudaGetDeviceCount(&NbDevice))
return -1;
if (!NbDevice)
return -1;
for (int device = 0; device < NbDevice; ++device)
{
cudaDeviceProp propri;
if (cudaSuccess != cudaGetDeviceProperties(&propri, device))
{
continue;
}
printf("%d.%d ", propri.major, propri.minor);
}
return 0;
}
|
24,024 | __global__ void fillOneFloatArrayKernel(
int numberRows,
int numberEntries,
float* array,
float constant) {
int index = blockIdx.x * numberEntries + blockIdx.y * numberRows + threadIdx.x;
array[index] = constant;
} |
24,025 | #define BLOCK_DIM 4
#define TILE_DIM BLOCK_DIM
#include <stdio.h>
#include <stdlib.h>
#include<time.h>
void Print_Matrix( int* mtxArray , int n, int m );
void PrintMatrixToText(int* mtxArray, int height, int width, const char* fileName);
// Matrix Mult Kernel
__global__ void matrixMult(int* A, int* B, int* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) {
__shared__ int As[TILE_DIM][TILE_DIM];
__shared__ int Bs[TILE_DIM][TILE_DIM];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
int CValue = 0;
int NUM_PHASE = (int)ceil((float)(ACols + BLOCK_DIM - 1) / (float)BLOCK_DIM);
for (int k = 0; k < NUM_PHASE; ++k) {
if (k*TILE_DIM + tx < ACols && Row < ARows)
As[ty][tx] = A[Row*ACols + k*TILE_DIM + tx];
else
As[ty][tx] = 0;
if (k*TILE_DIM + ty < BRows && Col < BCols)
Bs[ty][tx] = B[(k*TILE_DIM + ty)*BCols + Col];
else
Bs[ty][tx] = 0;
__syncthreads();
for (int i = 0; i < TILE_DIM; ++i)
CValue += As[ty][i] * Bs[i][tx];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C[(Row*CCols)+Col] = CValue;
}
int main(){
int *h_a, *h_b, *h_c; // Host Variables
int *d_a, *d_b, *d_c; // Device Variables
int n,m,k;
// Receive Parameters
/*Todo:Write Receive Parameters through console commands*/
printf("n = ");
scanf("%d", &n);
printf("m = ");
scanf("%d", &m);
printf("k = ");
scanf("%d", &k);
printf("\n");
// Alloc & Initialize Host Input Matrices
int sizeA = (n) * (m) * sizeof(int);
int sizeB = (m) * (k) * sizeof(int);
int sizeC = (n) * (k) * sizeof(int);
h_a = (int*)malloc( sizeA );
h_b = (int*)malloc( sizeB );
h_c = (int*)malloc( sizeC );
/*Todo: Initialize A and B with Random Varialbles Ranged (-10, 10)*/
srand(time(NULL));
for ( int i=0 ; i<n ; i++ )
for ( int j=0 ; j<m ; j++ )
h_a[i*m+j] = rand() % 20 - 10;
for ( int i=0 ; i<m ; i++ )
for ( int j=0 ; j<k ; j++ )
h_b[i*k+j] = rand() % 20 - 10;
// Allocate Device Memory
cudaMalloc((void **) &d_a, sizeA);
cudaMalloc((void **) &d_b, sizeB);
cudaMalloc((void **) &d_c, sizeC);
// Copy Input Matrices to Device Memory
cudaMemcpy(d_a, h_a, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeB, cudaMemcpyHostToDevice);
// Set Grid/Block Dimensions
dim3 dimGrid((int)ceil((float)(BLOCK_DIM+n-1)/(float)BLOCK_DIM), (int)ceil((float)(BLOCK_DIM+k-1)/(float)BLOCK_DIM));
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
// Launch Kernel
matrixMult<<<dimGrid ,dimBlock >>>(d_a , d_b , d_c , n, m, m, k, n, k);
// Copy Result to Host
cudaMemcpy( h_c, d_c, sizeC, cudaMemcpyDeviceToHost );
// Free Device Memory
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
// Print Results
printf("[Input matrix A]\n"); Print_Matrix( h_a , n, m );
printf("[Input matrix B]\n"); Print_Matrix( h_b , m, k );
printf("[GPU Result]\n"); Print_Matrix( h_c , n, k );
// Print Results
PrintMatrixToText(h_a,n,m,"matrixA.txt");
PrintMatrixToText(h_b,m,k,"matrixB.txt");
PrintMatrixToText(h_c,n,k,"matrixC.txt");
// Free Host Memory
free(h_a); free(h_b); free(h_c);
return 0;
}
// Utilities
void Print_Matrix( int* mtxArray , int n, int m )
{
for ( int i=0 ; i<n ; i++ )
{
for ( int j=0 ; j<m ; j++ )
{
printf( "%d " , mtxArray[i*m+j] );
}
printf( "\n" );
}
printf( "\n" );
}
void PrintMatrixToText(int* mtxArray, int height, int width, const char* fileName){
FILE *f = fopen(fileName, "w");
for ( int i=0 ; i<height ; i++ )
{
for ( int j=0 ; j<width ; j++ )
{
fprintf(f, "%d\t" , mtxArray[i*width+j] );
}
fprintf(f,"\n" );
}
fclose(f);
}
|
24,026 | //#include<stdio.h>
#include <iostream>
#include <vector>
__global__ void gaxpy(double *y, double *a, double *x, int m, int n){
int bid = blockIdx.x;
int tid = threadIdx.x;
extern __shared__ double dots_s[];
if(bid<m)
if(tid<n){
dots_s[bid*n+tid] = a[bid*n+tid] * *(x+tid);
__syncthreads();
if(tid == 0){
for(int i=1;i<n;i++){
dots_s[bid*n] +=dots_s[bid*n+i];
// printf("y=%d, dots_s=%d, bid=%d, tid=%d, i=%d, n=%d\n",dots_s[bid*n], dots_s[bid*n+i],bid,tid,i,n);
}
*(y+bid)=dots_s[bid*n];
// printf("y[%d]=%d, bid=%d, tid=%d\n",bid,y[bid],bid,tid);
}
}
}
std::vector<double> matrixVectorMultiplication(double* a, int mc, int nc, double* x){
int* m, *n;
m = &mc;
n = &nc;
std::vector<double> y(mc,0);
double *device_y, *device_a, *device_x;
int *device_m, *device_n;
//alojando en device
cudaMalloc((void **)&device_y, sizeof(double)*mc);
cudaMalloc((void **)&device_a, sizeof(double)*mc*nc);
cudaMalloc((void **)&device_x, sizeof(double)*nc);
cudaMalloc((void **)&device_m, sizeof(int));
cudaMalloc((void **)&device_n, sizeof(int));
//copiamos arreglos a, x a la GPU
cudaMemcpy(device_a,a,mc*nc*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x,x,nc*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_y,y.data(),mc*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_m,m,sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_n,n,sizeof(int), cudaMemcpyHostToDevice);
//mandamos a llamar a suma_vect:
gaxpy<<<mc,nc,sizeof(double)*mc*nc>>>(device_y,device_a,device_x,mc,nc);
// for(unsigned i=0; i<y.size();i++)
// std::cout << "yi[i] = " << y[i] << "\n";
//copia del resultado al arreglo y:
cudaMemcpy(y.data(),device_y,mc*sizeof(double),cudaMemcpyDeviceToHost);
// for(unsigned i=0; i<y.size();i++)
// std::cout << "yf[i] = " << y[i] << "\n";
cudaFree(device_y);
cudaFree(device_a);
cudaFree(device_x);
cudaFree(device_m);
cudaFree(device_n);
return y;
}
|
24,027 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define WIDTH 10
#define HEIGHT 10
#define channels 3
#define Mask_width 5
#define Mask_radius Mask_width/2
#define O_TILE_WIDTH 12
#define BLOCK_WIDTH (O_TILE_WIDTH+Mask_width-1)
#define min(x,y) ((x)<(y)?(x):(y))
#define max(x,y) ((x)>(y)?(x):(y))
#define clamp(x) (min(max((x),0.0),1.0))
void imageConvolution(float *input,float *output,const float* __restrict__ M,int width, int height, int ch)
{
int i=0,j=0,k=0,x=0,y=0,xOffset=0,yOffset=0;
float accum =0.0,maskValue =0.0,imagePixel =0.0;
for( i=0 ;i<height;i++){
for( j=0;j< width;j++){
for(k=0;k<ch;k++){
accum = 0;
for(y = 0 ;y< Mask_width;y++){
for(x= 0;x< Mask_width; x++){
xOffset = j + x - Mask_radius;
yOffset = i + y - Mask_radius;
if (xOffset>=0 && xOffset < width && yOffset>=0 && yOffset < height){
imagePixel = input[(yOffset * width + xOffset) * channels + k];
maskValue = M[y*Mask_width+x];
accum += imagePixel * maskValue;
}
else
accum +=0;
}
}
output[(i * width + j)*channels + k] = accum;// (float) clamp(accum);
}
}
}
}
__global__ void imageTiledConvolution_kernel(float *input,float *output,const float * __restrict__ M,int width, int height, int ch)
{
int i=0,j=0;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*O_TILE_WIDTH+ty;
int col_o = blockIdx.x*O_TILE_WIDTH+tx;
int row_i = row_o - Mask_radius;
int col_i = col_o - Mask_radius;
float cValue = 0.0f;
__shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH][channels];
for(int chIdx=0;chIdx<ch;chIdx++){
if(row_i>=0 && row_i<height && col_i>=0 && col_i<width){
Ns[ty][tx][chIdx] = input[(row_i*width+col_i)*ch+chIdx];
}else{
Ns[ty][tx][chIdx] = 0.0f;
}
__syncthreads();
cValue = 0.0f;
if(ty<O_TILE_WIDTH && tx<O_TILE_WIDTH){
for( i=0;i<Mask_width;i++){
for( j=0;j<Mask_width;j++){
cValue +=M[i*Mask_width+j]*Ns[ty+i][tx+j][chIdx];
}
}
}
__syncthreads();
if(row_o<height && col_o<width && ty<O_TILE_WIDTH && tx<O_TILE_WIDTH)
output[(row_o*width+col_o)*ch+chIdx] = cValue;//min(max(cValue,0),1);
}
}
void loadData(float *input,float *output,float *maskData)
{
int i=0;
for(i=0;i<WIDTH*HEIGHT*channels;i++)
input[i] = 1.0;
for(i=0;i<WIDTH*HEIGHT*channels;i++)
output[i] = 0.0;
for(i=0;i<Mask_width *Mask_width ;i++)
maskData[i] = 1.0;
}
void dispRes(float *arr)
{
int i=0,j=0,k=0;
printf("Results of the calculation\n");
for(k=0;k<channels;k++){
for(i=0;i<HEIGHT;i++){
for(j=0;j<WIDTH;j++){
printf("%2.1f ",arr[(i*WIDTH+j)*channels+k]);
}
printf("\n");
}
printf("k = %d\n",k);system("pause");
}
}
int main(void)
{
int maskRows = Mask_width;
int maskColumns = Mask_width;
int imageChannels = channels;
int imageWidth = WIDTH;
int imageHeight = HEIGHT;
float * hostInputImageData;
float * hostOutputImageData;
float * hostOutputImageDataCPU;
float * hostMaskData;
float * deviceInputImageData;
float * deviceOutputImageData;
float * deviceMaskData;
//allocate Memory on the host
hostInputImageData = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float));
hostOutputImageData = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float));
hostOutputImageDataCPU = (float*)malloc(imageWidth*imageHeight*channels*sizeof(float));
hostMaskData = (float*)malloc(Mask_width*Mask_width*sizeof(float));
//load data to host memory
loadData(hostInputImageData,hostOutputImageData,hostMaskData);
//cuda memory allocation on the device
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float));
//cuda memory copy from host to device
cudaMemcpy(deviceInputImageData,hostInputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData,hostMaskData,maskRows * maskColumns * sizeof(float),cudaMemcpyHostToDevice);
dim3 DimGrid((imageWidth-1)/O_TILE_WIDTH+1,(imageHeight-1)/O_TILE_WIDTH+1,1);
dim3 DimBlock(BLOCK_WIDTH, BLOCK_WIDTH,1);
imageTiledConvolution_kernel<<<DimGrid,DimBlock>>>(deviceInputImageData,deviceOutputImageData,deviceMaskData,imageWidth,imageHeight,imageChannels);
imageConvolution(hostInputImageData,hostOutputImageDataCPU,hostMaskData,imageWidth,imageHeight,imageChannels);
//cuda memory copy from device to host
cudaMemcpy(hostOutputImageData,deviceOutputImageData,imageWidth * imageHeight * imageChannels * sizeof(float),cudaMemcpyDeviceToHost);
//dispRes(hostOutputImageDataCPU);
dispRes(hostOutputImageData);
free(hostInputImageData);
free(hostOutputImageData);
free(hostOutputImageDataCPU);
free(hostMaskData);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
return 0;
} |
24,028 |
/*
Kernel to do temperature update in explicit finite difference
solution to 3-D heat equation. Works for a block size of 16 x 16.
Make copies for other block sizes. Can be easily extended to
arbitrary sized stencils.
*/
# include <stdio.h>
# include <cuda.h>
__global__ void temperature_update16x16(float* temp1_d, float* temp2_d, float alpha,
float dt,
const int N_x, const int N_y, const int N_z,
const float dx, const float dy, const float dz){
#define BDIMX 16
#define BDIMY 16
__shared__ float slice[BDIMX+2][BDIMY+2];
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int tx = threadIdx.x + 1;
int ty = threadIdx.y + 1;
int stride = N_x*N_y;
int i2d = iy*N_x + ix;
int o2d = 0;
bool compute_if = ix > 0 && ix < (N_x-1) && iy > 0 && iy < (N_y-1);
float behind;
float current = temp1_d[i2d]; o2d = i2d; i2d += stride;
float infront = temp1_d[i2d]; i2d += stride;
for(int i=1; i<N_z-1; i++){
// These go in registers:
behind = current;
current= infront;
infront= temp1_d[i2d];
i2d += stride;
o2d += stride;
__syncthreads();
// Shared memory
if (compute_if){
if(threadIdx.x == 0){ // Halo left
slice[ty][tx-1] = temp1_d[o2d - 1];
}
if(threadIdx.x == BDIMX-1){ // Halo right
slice[ty][tx+1] = temp1_d[o2d + 1];
}
if(threadIdx.y == 0){ // Halo bottom
slice[ty-1][tx] = temp1_d[o2d - N_x];
}
if(threadIdx.y == BDIMY-1){ // Halo top
slice[ty+1][tx] = temp1_d[o2d + N_x];
}
}
__syncthreads();
slice[ty][tx] = current;
__syncthreads();
if (compute_if){
temp2_d[o2d] = current + (alpha*dt)*(
(slice[ty][tx-1] - 2*slice[ty][tx]
+slice[ty][tx+1])/(dx*dx) +
(slice[ty-1][tx] - 2*slice[ty][tx]
+slice[ty+1][tx])/(dy*dy) +
(behind - 2*current + infront)/(dz*dz));
}
__syncthreads();
}
}
|
24,029 | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.05
typedef float typeId;
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(typeId *h_Dst, typeId *h_Src, typeId *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
typeId sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
/***************************************************
********** ROW CONVOLUTION GPU ********************
***************************************************
*/
__global__ void convolutionRowGPU(typeId *d_Dst, typeId *d_Src, typeId *d_Filter, int imageW, int imageH, int filterR){
int k;
typeId sum = 0;
int row = blockDim.y * blockIdx.y + threadIdx.y + filterR;
int col = blockDim.x * blockIdx.x + threadIdx.x + filterR;
int padW = imageW + filterR*2;
for (k = -filterR; k <= filterR; k++) {
int d = col + k;
sum += d_Src[row * padW + d] * d_Filter[filterR - k];
}
d_Dst[row * padW + col] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(typeId *h_Dst, typeId *h_Src, typeId *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
typeId sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
/***********************************************************
******** COLUMN CONVOLUTION GPU ***************************
***********************************************************
*/
__global__ void convolutionColumnGPU(typeId *d_Dst, typeId *d_Src, typeId *d_Filter, int imageW, int imageH, int filterR) {
int k;
typeId sum = 0;
int row = blockDim.y * blockIdx.y + threadIdx.y + filterR;
int col = blockDim.x * blockIdx.x + threadIdx.x + filterR;
int padW = imageW + filterR*2;
for (k = -filterR; k <= filterR; k++) {
int d = row + k;
sum += d_Src[col + padW * d] * d_Filter[filterR - k];
}
d_Dst[row * padW + col] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
typeId
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*h_PaddedInput,
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*result,
diff;
float elapsedTime;
cudaSetDevice(0);
struct timespec tv1, tv2;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int imageW;
int imageH;
unsigned int i, j;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
h_Filter = (typeId *)malloc(FILTER_LENGTH * sizeof(typeId));
h_Input = (typeId *)malloc(imageW * imageH * sizeof(typeId));
h_Buffer = (typeId *)malloc(imageW * imageH * sizeof(typeId));
h_OutputCPU = (typeId *)malloc(imageW * imageH * sizeof(typeId));
result = (typeId *)malloc((imageW+2*filter_radius) * (imageH+2*filter_radius)* sizeof(typeId));
h_PaddedInput = (typeId *)malloc((imageW+filter_radius*2 )*(2*filter_radius+ imageH) * sizeof(typeId));
// Memory allocation check if any of them not allocated then error
if(!(h_Filter && h_Input && h_Buffer && h_OutputCPU && h_PaddedInput && result)) {
printf("Error allocating memory\n");
exit(EXIT_FAILURE);
}
// Memory allocation on Device
cudaMalloc(&d_Filter,FILTER_LENGTH*sizeof(typeId));
cudaMalloc(&d_Input,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMalloc(&d_Buffer,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMalloc(&d_OutputGPU,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
// Check memory allocation on Device, if any of them failed, exit
if (!(d_Filter && d_Input && d_Buffer && d_OutputGPU)) {
printf("Cuda memory allocation failed\n");
exit(EXIT_FAILURE);
}
// Initializing device values
cudaMemset(d_OutputGPU,0,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMemset(d_Buffer,0,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMemset(d_Input,0,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (typeId)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (typeId)rand() / ((typeId)RAND_MAX / 255) + (typeId)rand() / (typeId)RAND_MAX;
}
// CPU computation
printf("CPU computation...\n");
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius);
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius);
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
printf ("CPU time: %10g seconds\n\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
dim3 dimGrid((imageH-1)/32 + 1,(imageH-1)/32 + 1);
dim3 dimBlock(32,32);
// init padded Input
for(i=0;i<(imageW+2*filter_radius)*(imageW+2*filter_radius);i++) h_PaddedInput[i]=0;
// filling the cells
for(i=0;i<imageH;i++){
for(j=0;j<imageW;j++){
h_PaddedInput[(i+filter_radius)*(2*filter_radius+imageW)+filter_radius+j]=h_Input[i*imageW+j];
}
}
printf("GPU computation... \n");
cudaMemcpy(d_Filter,h_Filter,FILTER_LENGTH*sizeof(typeId),cudaMemcpyHostToDevice);
cudaMemcpy(d_Input,h_PaddedInput,(imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(typeId),cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
// kernel invocation
convolutionRowGPU <<< dimGrid,dimBlock >>>(d_Buffer,d_Input, d_Filter, imageW, imageH, filter_radius);
cudaThreadSynchronize();
//Check for errors
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess){
printf("Device Error:%s\n",cudaGetErrorString(err));
cudaDeviceReset();
return 0;
}
convolutionColumnGPU <<< dimGrid,dimBlock >>>(d_OutputGPU,d_Buffer, d_Filter, imageW, imageH, filter_radius);
cudaThreadSynchronize();
//Check for errors
err = cudaGetLastError();
if(err != cudaSuccess) {
printf("Device Error:%s\n",cudaGetErrorString(err));
cudaDeviceReset();
return 0;
}
cudaEventRecord(stop,0);
//Copy results to host
cudaMemcpy(result, d_OutputGPU, (imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(typeId), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("GPU time: %f ms.\n\n",elapsedTime);
// Checking accuracy error CPU vs CPU
for(i=0; i<imageW; i++) {
for(j=0; j<imageH; j++){
diff= h_OutputCPU[i*imageW+j]-result[(i+filter_radius)*(imageW+2*filter_radius)+filter_radius+j];
if(ABS(diff) > accuracy) {
printf("Accuracy error <<%f>>\n ",ABS(diff));
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_PaddedInput);
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_PaddedInput);
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
24,030 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <errno.h>
#include <error.h>
#include <unistd.h>
const long BOX_SIZE = 23000; /* size of the data box on one dimension */
#define BLOCK_SIZE 512
#define EXITERROR() error_at_line(errno, errno, __FILE__, __LINE__, "pid %llu", (long long unsigned)getpid())
const char *argv0;
typedef struct hist_entry {
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
struct debuginfo {
int idx;
int ran;
int i;
int j;
float dist;
int which_bucket;
};
bucket * histogram; /* list of all buckets in the histogram */
unsigned long long num_points; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double resolution; /* value of w */
double4 *h_points;
inline __device__ double dist3(double4 a, double4 b) {
return norm3d(a.x - b.x, a.y - b.y, a.z - b.z);
}
__global__
void PDH_kernel(bucket *g_bins, size_t n_bins, double4 *g_points, size_t n_points, double res) {
__shared__ double4 s_block[BLOCK_SIZE]; // R for inter-block, L for intra-block
unsigned int t = threadIdx.x;
unsigned int b = blockIdx.x;
unsigned int B = blockDim.x;
unsigned int M = gridDim.x;
// Is our anchor point past the end of g_points?
if (b*B + t >= n_points)
return;
// Anchor point in L
double4 l_point = g_points[b*B + t];
// Inter-block pairs, with block-level load balancing
for (unsigned int i=1; i<=M/2; i++) {
// Load balancing edge case
if (M%2 == 0 && i >= M/2 && b >= M/2)
continue;
// Get R block
unsigned int r = (b + i) % M;
__syncthreads();
if (r*B + t < n_points)
s_block[t] = g_points[r*B + t];
__syncthreads();
// inter-block pairs
for (unsigned int j=0; j<B; j++) {
if (r*B + j < n_points) {
double d = dist3(l_point, s_block[j]);
unsigned long long bin = (unsigned long long)(d / res);
if (bin <= n_bins)
atomicAdd(&g_bins[bin].d_cnt, 1);
}
}
}
// Get L block
__syncthreads();
if (b*B + t < n_points)
s_block[t] = g_points[b*B + t];
__syncthreads();
// intra-block pairs
for (unsigned int i=t+1; i<B && b*B+i<n_points; i++) {
double d = dist3(s_block[t], s_block[i]);
unsigned long long bin = (unsigned long long)(d / res);
if (bin <= n_bins)
atomicAdd(&g_bins[bin].d_cnt, 1);
}
}
void PDH_gpu() {
unsigned long num_threads = num_points*(num_points - 1)/2;
// allocate histogram
bucket *d_buckets;
cudaMalloc(&d_buckets, sizeof(*histogram) * num_buckets);
cudaMemset(d_buckets, 0, sizeof(*histogram) * num_buckets);
// Copy points to device
double4 *d_points;
cudaMalloc(&d_points, sizeof(*h_points) * num_points);
cudaMemcpy(d_points, h_points, sizeof(*h_points) * num_points, cudaMemcpyHostToDevice);
PDH_kernel<<<(num_points + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE>>>(
d_buckets, num_buckets, d_points, num_points, resolution);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("CUDA ERROR: %s\n", cudaGetErrorString(err));
puts("This is probably due to a too-large block count");
}
// Copy histogram from device and cleanup
cudaFree(d_points);
cudaMemcpy(histogram, d_buckets, sizeof(*histogram) * num_buckets, cudaMemcpyDeviceToHost);
cudaFree(d_buckets);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram() {
int i;
unsigned long long total_cnt = 0;
for(i=0; i<num_buckets; i++) {
if (i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if (i == num_buckets - 1) {
printf("\n Total: %lld", total_cnt);
printf("\n Expected total: %lld \n", num_points*(num_points - 1)/2);
} else {
printf("| ");
}
}
}
void usage(FILE *f, int ret) {
fprintf(f, "USAGE: %s <SAMPLES> <RESOLUTION>\n", argv0);
exit(ret);
}
int main(int argc, char **argv) {
argv0 = argv[0];
if (argc < 3)
usage(stderr, 1);
if (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-h"))
usage(stdout, 0);
errno = 0;
num_points = strtoull(argv[1], NULL, 10);
if (errno != 0)
EXITERROR();
errno = 0;
resolution = strtof(argv[2], NULL);
if (errno != 0)
EXITERROR();
num_buckets = (int)(BOX_SIZE * 1.732 / resolution) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
h_points = (double4 *)malloc(sizeof(double4)*num_points);
srand(1);
/* generate data following a uniform distribution */
for(size_t i = 0; i < num_points; i++) {
h_points[i].x = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
h_points[i].y = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
h_points[i].z = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
// GPU implementation
puts("\nRunning Reg-SHM version...");
memset(histogram, 0, sizeof(*histogram) * num_buckets);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
PDH_gpu();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
printf("Running time for Reg-SHM version: %.6f sec\n", elapsed/1000.0);
cudaEventDestroy(start);
cudaEventDestroy(stop);
output_histogram();
return 0;
}
|
24,031 | // ****
// Implement the phasor method using complex data rather than two times reconstruction
// ****
#define NPOINT 1
#define STRIDE 1
/*__global__ void ThreeD_NLOS_Phasor_General( float* p_xyz, float* p_xyt_real, float* p_xyt_imag,
float* sensor_pos, float* origin, float* laser_pos, float dx, float dz, int NX, int NY, int NZ,
float c_dt, int N_pos, int Nt, float ReceiveDelay)
{
int nz = blockIdx.z * blockDim.z + threadIdx.z; // the nz-column, which is the time direction
int ny = blockIdx.y * blockDim.y + threadIdx.y; // the ny-col
int nx = blockIdx.x * blockDim.x + threadIdx.x; // the nx-row
float CF_real = 0.0, CF_imag = 0.0, CF = 0.0;
float tao = 0.0;
float voxel_int_real = 0.0, voxel_int_imag =0.0;
float voxel_real = 0.0, voxel_imag =0.0;
int Index = 0, counter = 1;
float GPS_x = origin[0] - (nx-NX/2)*dx; // x-coordiante for the voxel
float GPS_y = origin[1] + (ny-NY/2)*dx; // y-coordiante for the voxel
float GPS_z = origin[2] + (nz)*dz; // z-coordiante for the voxel
// time delay from the laser spot to the voxel
float tao_trans = sqrtf( (laser_pos[0]-GPS_x)*(laser_pos[0]-GPS_x) + (laser_pos[1]-GPS_y)*(laser_pos[1]-GPS_y) + (laser_pos[2]-GPS_z)*(laser_pos[2]-GPS_z) );
float x_pos,y_pos,z_pos, cos_theta;
float voxel_int_real_tp[NPOINT], voxel_int_imag_tp[NPOINT], voxel_sq_real_tp[NPOINT], voxel_sq_imag_tp[NPOINT];
for(int K = 0; K<NPOINT; K++)
{
voxel_int_real_tp[K] = 0.0;
voxel_int_imag_tp[K] = 0.0;
voxel_sq_real_tp[K] = 0.0;
voxel_sq_imag_tp[K] = 0.0;
}
for(int K_x = 0;K_x<N_pos; K_x++)
{
x_pos = sensor_pos[K_x];
y_pos = sensor_pos[K_x+N_pos];
z_pos = sensor_pos[K_x+2*N_pos];
tao = sqrtf( (x_pos-GPS_x) * (x_pos-GPS_x) + (y_pos-GPS_y)*(y_pos-GPS_y) + (z_pos-GPS_z)*(z_pos-GPS_z) );
cos_theta = GPS_z/(tao + 1e-6);
Index = int( floorf((tao + tao_trans)/c_dt-ReceiveDelay) );
if((Index<Nt-NPOINT*STRIDE) && (Index>0))
{
for(int P = 0; P<NPOINT; P++)
{
voxel_real = p_xyt_real[Index+K_x*(Nt)+P*STRIDE] * cos_theta;
voxel_imag = p_xyt_imag[Index+K_x*(Nt)+P*STRIDE] * cos_theta;
voxel_int_real_tp[P] = voxel_int_real_tp[P] + voxel_real; //* sqrt(Index*1.0);
voxel_int_imag_tp[P] = voxel_int_imag_tp[P] + voxel_imag; //* sqrt(Index*1.0);
voxel_sq_real_tp[P] = voxel_sq_real_tp[P] + voxel_real * voxel_real;
voxel_sq_imag_tp[P] = voxel_sq_imag_tp[P] + voxel_imag * voxel_imag;
}
counter = counter+1;
}
}
voxel_int_real = voxel_int_real_tp[0];
voxel_int_imag = voxel_int_imag_tp[0];
for (int J=0;J<NPOINT;J++)
{
if((voxel_sq_real_tp[J]>1e-6) && (voxel_sq_imag_tp[J]>1e-6))
{
CF_real = CF_real + powf(voxel_int_real_tp[J],2)/voxel_sq_real_tp[J]/counter;
CF_imag = CF_imag + powf(voxel_int_imag_tp[J],2)/voxel_sq_imag_tp[J]/counter;
}
}
CF = sqrtf( CF_real*CF_real + CF_imag*CF_imag );
p_xyz[nx+NX*ny+nz*(NX*NY)] = sqrtf( voxel_int_real * voxel_int_real + voxel_int_imag * voxel_int_imag ) * (CF);
}*/
// ****
// Implement the phasor method using complex data rather than two times reconstruction
// ****
#define NPOINT 1
#define STRIDE 1
__global__ void ThreeD_NLOS_Phasor_General( float* p_xyz, float* p_xyt_real, float* p_xyt_imag,
float* sensor_pos, float* origin, float* laser_pos, float dx, float dz, int NX, int NY, int NZ,
float c_dt, int N_pos, int Nt, float ReceiveDelay)
{
int nz = blockIdx.z * blockDim.z + threadIdx.z; // the nz-column, which is the time direction
int ny = blockIdx.y * blockDim.y + threadIdx.y; // the ny-col
int nx = blockIdx.x * blockDim.x + threadIdx.x; // the nx-row
float CF_real = 0.0, CF_imag = 0.0, CF = 0.0;
float tao = 0.0;
float voxel_int_real = 0.0, voxel_int_imag =0.0;
float voxel_real = 0.0, voxel_imag =0.0;
int Index = 0, counter = 1;
float GPS_x = origin[0] - (nx-NX/2)*dx; // x-coordiante for the voxel
float GPS_y = origin[1] + (ny-NY/2)*dx; // y-coordiante for the voxel
float GPS_z = origin[2] + (nz)*dz; // z-coordiante for the voxel
// time delay from the laser spot to the voxel
float tao_trans = sqrtf( (laser_pos[0]-GPS_x)*(laser_pos[0]-GPS_x) + (laser_pos[1]-GPS_y)*(laser_pos[1]-GPS_y) + (laser_pos[2]-GPS_z)*(laser_pos[2]-GPS_z) );
float x_pos,y_pos,z_pos, cos_theta;
float voxel_sq_real, voxel_sq_imag;
for(int K_x = 0;K_x<N_pos; K_x++)
{
x_pos = sensor_pos[K_x];
y_pos = sensor_pos[K_x+N_pos];
z_pos = sensor_pos[K_x+2*N_pos];
tao = sqrtf( (x_pos-GPS_x) * (x_pos-GPS_x) + (y_pos-GPS_y)*(y_pos-GPS_y) + (z_pos-GPS_z)*(z_pos-GPS_z) );
cos_theta = GPS_z/(tao + 1e-6);
Index = int( floorf((tao + tao_trans)/c_dt-ReceiveDelay) );
if((Index<Nt) && (Index>0))
{
voxel_real = p_xyt_real[Index+K_x*(Nt)] * cos_theta;
voxel_imag = p_xyt_imag[Index+K_x*(Nt)] * cos_theta;
voxel_int_real = voxel_int_real + voxel_real; //* sqrt(Index*1.0);
voxel_int_imag = voxel_int_imag + voxel_imag; //* sqrt(Index*1.0);
voxel_sq_real = voxel_sq_real + voxel_real * voxel_real;
voxel_sq_imag = voxel_sq_imag + voxel_imag * voxel_imag;
counter = counter+1;
}
}
if((voxel_sq_real>1e-6) && (voxel_sq_imag>1e-6))
{
CF_real = CF_real + powf(voxel_int_real,2)/voxel_sq_real/counter;
CF_imag = CF_imag + powf(voxel_int_imag,2)/voxel_sq_imag/counter;
}
CF = sqrtf( CF_real*CF_real + CF_imag*CF_imag );
p_xyz[nx+NX*ny+nz*(NX*NY)] = sqrtf( voxel_int_real * voxel_int_real + voxel_int_imag * voxel_int_imag ) * (CF);
} |
24,032 | #define CU1DBLOCK 256
__global__
void _copy_low_upp(float* A, int rows, int stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i <= j || i >= rows)
return;
int index_1 = i * stride + j;
int index_2 = j * stride + i;
A[index_2] = A[index_1];
}
// rows = 5, stride = 0, block = (2, 1, 1), thread = (3, 2, 2)
// (0, 0, 0) (1, 0, 1) with (0, 0, 0) (1, 0, 0)
// i = 1, j = 0, index_1 = 0, index_2 = 1; i = 1, j = 0, index_1 = 0, index_2 = 1
// (2, 1, 0) read, (1, 0, 1) write. (1 0 1) write to 1, (2 1 0) read from 1
// i = 2, j = 1, index_1 = 7, index_2 = 5
// i = 5, j = 0, index_1 = 15, index_2 = 5
__global__
void _copy_upp_low(float* A, int rows, int stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j <= i || j >= rows)
return;
int index_1 = i * stride + j;
int index_2 = j * stride + i;
A[index_2] = A[index_1];
}
__global__
void _add_diag_vec_mat(float alpha, float *mat, int stride, int rows, int cols,
const float *vec, const float *mat2,
int mat2_row_stride, int mat2_col_stride,
float beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j * stride + i, index2 = j * mat2_row_stride
+ i * mat2_col_stride;
if (i < cols && j < rows) {
mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index];
}
}
__global__
void _copy_from_tp(float* A, const float* B, int dmat_cols, int dmat_rows, int dmat_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dmat_cols && j < dmat_rows) {
int index_B = (j * (j + 1) / 2) + i;
int index_A = j * dmat_stride + i;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
__global__
void _copy_from_mat(float* mat_out, const float* mat_in,
int d_out_stride, int d_out_rows, int d_out_cols, int d_in_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col-index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row-index.
int index_out = i + j * d_out_stride;
int index_in = i + j * d_in_stride;
if (i < d_out_cols && j < d_out_rows)
mat_out[index_out] = mat_in[index_in];
}
__global__
void _trace_mat_mat_trans(const float* A, const float* B, int dA_rows, int dA_cols, int dA_stride,
int B_stride, float* value) {
__shared__ float ssum[CU1DBLOCK];
// linear thread id;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int grid_height = gridDim.y * blockDim.y;
int i = blockIdx.y * blockDim.y + threadIdx.y;
// Grid reduce
float tsum = 0.0;
if (j < dA_cols) {
while (i < dA_rows) {
tsum += A[i * dA_stride + j] * B[i * B_stride + j];
i += grid_height;
}
}
ssum[tid] = tsum;
__syncthreads();
// Block reduce
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0];
}
}
__global__
void _splice(float* y, const float* x, const int* off,
int d_out_cols, int d_out_rows, int d_out_stride,
int d_in_cols, int d_in_rows, int d_in_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d_out_stride;
if (i < d_out_cols && j < d_out_rows) {
int src_col = i % d_in_cols;
int src_row = j + off[i / d_in_cols];
if (src_row < 0)
src_row = 0;
if (src_row >= d_in_rows)
src_row = d_in_rows - 1;
y[index] = x[src_col + src_row * d_in_stride];
}
} |
24,033 | //Strided convolution output stationary
//In this program , INQ weight sharing property is used weights are quatized
//each thread computes one output element. so the matrix elements with common
//weights are added up then just multiplied once.
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define CUDA_CALL(x) do { cudaError_t err=(x); \
if(err!=cudaSuccess) { \
printf("Error %s at %s: %d",cudaGetErrorString(err),__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define W 64 // Input DIM
#define D 4 // Input and Kernel Depth
#define T 5 // Kernel DIM
#define N 2 // Number of kernels
#define TILE_W 16 //output tile size
#define n1 3 //range for weights in INQ
#define n2 1 //n1 > n2
#define BAND 3
#define STRIDE_LENGTH 1//stride length
#define OWS (W- T + 1) // Output DIM
#define OW (((W - T)/STRIDE_LENGTH) + 1)
void fillMatrix(unsigned char *matrix){
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
for(int i=0;i<W;i++){
for(int j=0;j<W;j++){
for(int k=0;k<D;k++){
m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255;
}
}
}
}
void fillKernel(int *kernel){
int (*t)[T][T][D][2]=(int (*)[T][T][D][2])kernel;
for(int i=0;i<N;i++){
for(int j=0;j<T;j++){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
t[i][j][k][l][0]=((i+j+T+D)%n1 + n2);
t[i][j][k][l][1]=(pow(-1,i+j));
}
}
}
}
}
void printtofile(float *m){
const char *fname = "GPU_TAST";
FILE *f = fopen(fname, "w");
float (*mat)[OW][OW]=(float (*)[OW][OW])m;
for(unsigned i=0; i < N; i++) {
for(unsigned j=0; j < OW; j++){
for(unsigned k=0;k<OW;k++){
fprintf(f,"%4.4f ", mat[i][j][k]);
}
fprintf(f, "\n" );
}
fprintf(f,"\n");
}
fclose(f);
}
__global__ void conv(unsigned char* Dm, int* Dk, float* Do)
{
__shared__ int ker[2*T*T*D];
__shared__ unsigned char tile[(TILE_W)*(TILE_W)*D];
int tx=blockDim.x*blockIdx.x+threadIdx.x;
int ty=blockDim.y*blockIdx.y+threadIdx.y;
int bz=blockIdx.z;
int zk=bz*T*T*D;
int ym,xm;
/* each thread computes one elemement in the output matrix
kernel conntains log2 of abs of weights and sign
*/
for(int d=0;d<D;d++)
{
if(threadIdx.x<T&&threadIdx.y<T){
ker[threadIdx.y*2*T*D+threadIdx.x*2*D+2*d]=Dk[2*zk+threadIdx.y*2*T*D+threadIdx.x*2*D+2*d];
ker[threadIdx.y*2*T*D+threadIdx.x*2*D+2*d+1]=Dk[2*zk+threadIdx.y*2*T*D+threadIdx.x*2*D+2*d + 1];
}
}
//__syncthreads();
for(int d=0;d<D;d++)
{
ym=ty*W*D;
xm=tx*D;
tile[threadIdx.y*(TILE_W)*D+threadIdx.x*D+d]=Dm[ym+xm+d];
if((tx+(TILE_W - T + 1))<W&&(threadIdx.x+(TILE_W - T + 1))<(TILE_W))
{
ym=ty*W*D;
xm=(tx+(TILE_W - T + 1))*D;
tile[threadIdx.y*(TILE_W)*D+(threadIdx.x+(TILE_W - T + 1))*D+d]=Dm[ym+xm+d];
}
if((ty+(TILE_W - T + 1))<W&&(threadIdx.y+(TILE_W - T + 1))<(TILE_W))
{
ym=(ty+(TILE_W - T + 1))*W*D;
xm=(tx)*D;
tile[(threadIdx.y+(TILE_W - T + 1))*(TILE_W)*D+(threadIdx.x)*D+d]=Dm[ym+xm+d];
}
if(((ty+(TILE_W - T + 1))<W&&(threadIdx.y+(TILE_W - T + 1))<(TILE_W))&&((tx+(TILE_W - T + 1))<W&&(threadIdx.x+(TILE_W - T + 1))<(TILE_W)))
{
ym=(ty+(TILE_W - T + 1))*W*D;
xm=(tx+(TILE_W - T + 1))*D;
tile[(threadIdx.y+(TILE_W - T + 1))*(TILE_W)*D+(threadIdx.x+(TILE_W - T + 1))*D+d]=Dm[ym+xm+d];
}
}
__syncthreads();
//matirx multiplication , thread computing one element
//sum array is store in thread array which stores the sum for each weight
if(ty%STRIDE_LENGTH == 0 && tx%STRIDE_LENGTH == 0)
{
float psum=0.0;
float sum[BAND];
for(int i=0; i < BAND; i++){
sum[i] = 0.0;
}
for(int i=0;i<T;i++)
{
int yk1=i*2*T*D;
int ym1=(threadIdx.y+i)*(TILE_W)*D;
for(int j=0;j<T;j++)
{
int xk1=j*2*D;
int xm1=(threadIdx.x+j)*D;
for(int d=0;d<D;d++){
if(ker[yk1+xk1+2*d+1] > 0){
sum[ker[yk1+xk1+2*d] - n2]+=tile[ym1+xm1+d];
}
else{
sum[ker[yk1+xk1+2*d] - n2]-=tile[ym1+xm1+d];
}
}
}
}
for(int i =0;i < BAND; i++){
if(i+n2>0){
psum+=sum[i]*(1<<(i + n2));
}
else{
psum+=sum[i]/(1<<((-1)*(i + n2)));
}
}
if(tx<OWS&&ty<OWS){
Do[bz*OW*OW+(ty/STRIDE_LENGTH)*OW+(tx/STRIDE_LENGTH)]=psum;
}
}
}
int main()
{
//allocating memory on the host
unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D);
int *kernel=(int*)malloc(sizeof(int)*2*T*T*D*N);
float *output=(float *)malloc(sizeof(float)*N*OW*OW);
//filling the matrix and the kernel
fillMatrix(matrix);
fillKernel(kernel);
//allocating memory on the GPU
unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D);
int *Dkernel;cudaMalloc(&Dkernel,sizeof(int)*2*N*T*T*D);
float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW);
int blockdimx=(TILE_W - T + 1);
int blockdimy=(TILE_W - T + 1);
int griddimz=N;
int griddimy=(OWS+blockdimx-1)/blockdimx;
int griddimx=(OWS+blockdimy-1)/blockdimy;
dim3 blocks(griddimx, griddimy, griddimz);
dim3 thrds_per_block(blockdimx, blockdimy);
//copying kernel and the matrix to the GPU
cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice);
cudaMemcpy(Dkernel, kernel, sizeof(int)*2*T*T*D*N,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
//cuda kernel call
conv<<<blocks,thrds_per_block>>>(Dmatrix, Dkernel, Doutput);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n",milliseconds);
cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost);
//Use print_matrix_to_file function only
printtofile(output);
}
|
24,034 | #include <stdlib.h>
#include <stdio.h>
#define ARRAY_LENGTH (100)
#define ARRAY_ELEMENT_SIZE (sizeof(long))
#define ARRAY_SIZE (ARRAY_ELEMENT_SIZE * ARRAY_LENGTH)
#define SAMPLE_LENGTH (5)
#define SAMPLE_SIZE (ARRAY_ELEMENT_SIZE * SAMPLE_LENGTH)
#define QUERY_LENGTH (10)
#define QUERY_SIZE (ARRAY_ELEMENT_SIZE * QUERY_LENGTH)
#define PRINT_ARRAY (1)
/*
First create a downsampled array (create an array and populate it with every Nth element from array)
Search the downsampled array for the number closest but smaller than desired. Then look in the area between that number and next number in the sample
*/
int random(int min, int max){
return (rand()%(max-min))+min;
}
void populateArray(long* array, long i){
array[i] = i;
if(PRINT_ARRAY)
printf("array[%ld] = %ld\n",i,array[i]);
}
void populateSample(long* array, long *sample,long i){
sample[i] = array[i * ARRAY_LENGTH / SAMPLE_LENGTH];
if(PRINT_ARRAY)
printf("sample[%ld] = %ld\n",i,sample[i]);
}
void populateQuery(long* query, long i){
query[i] = random(0,100);
if(PRINT_ARRAY)
printf("query[%ld] = %ld\n",i,query[i]);
}
long binary_search_guess(long *array, long number_of_elements, long key)
{
long low = 0, high = number_of_elements-1, mid;
while(low <= high)
{
mid = (low + high)/2;
if(array[mid] < key)
{
low = mid + 1;
}
else if(array[mid] == key)
{
return mid;
}
else if(array[mid] > key)
{
high = mid-1;
}
}
if(array[mid] > key){
while(mid > 0 && array[mid] > key){
mid--;
}
}
return mid;
}
long binary_search_precise(long *array, long key,long low, long high)
{
long mid;
while(low <= high)
{
mid = (low + high)/2;
if(array[mid] < key)
{
low = mid + 1;
}
else if(array[mid] == key)
{
return mid;
}
else if(array[mid] > key)
{
high = mid-1;
}
}
return -1;
}
void search(long* array, long *sample, long*output, long* query, long index){
/*if(query[index] < *array || query[index] >= sample[SAMPLE_LENGTH-1] + ARRAY_LENGTH / SAMPLE_LENGTH)
output[index] = -1;*/
long guess = binary_search_guess(sample,SAMPLE_LENGTH,query[index]);
output[index] = binary_search_precise(
array,
query[index],
guess * ARRAY_LENGTH / SAMPLE_LENGTH,
(guess + 1) * ARRAY_LENGTH / SAMPLE_LENGTH
);
}
int main(int argc,char* argv[]){
long *array = (long*)malloc(ARRAY_SIZE);
long *sample = (long*)malloc(SAMPLE_SIZE);
long *output = (long*)malloc(QUERY_SIZE);
long *query = (long*)malloc(QUERY_SIZE);
long *device_array;
long *device_sample;
long *device_output;
long *device_query;
cudaMalloc((void**)&device_array,ARRAY_SIZE);
cudaMalloc((void**)&device_sample,SAMPLE_SIZE);
cudaMalloc((void**)&device_output,QUERY_SIZE);
cudaMalloc((void**)&device_query,QUERY_SIZE);
// cpu
printf("Populating Array\n");
for(long i=0;i<ARRAY_LENGTH;i++)
populateArray(array,i);
// cpu
printf("Populating Query\n");
for(long i=0;i<QUERY_LENGTH;i++)
populateQuery(query,i);
// cpu
printf("Populating Sample\n");
for(long i=0;i<SAMPLE_LENGTH;i++)
populateSample(array,sample,i);
// gpu
printf("Processing Query\n");
for(long i=0;i<QUERY_LENGTH;i++)
search(array,sample,output,query,i);
// copy the results from the gpu to the cpu
//cudaMemcpy(output,device_output,QUERY_SIZE,cudaMemcpyDeviceToHost);
printf("Printing Results\n");
for(long i=0;i<QUERY_LENGTH;i++)
printf("results[%ld] = (%ld == [%ld])\n",i,output[i],query[i]);
cudaFree(device_array);
cudaFree(device_sample);
cudaFree(device_output);
cudaFree(device_query);
free(array);
free(sample);
free(output);
free(query);
return 0;
} |
24,035 | #include "includes.h"
__global__ void back(double *h_out_d, double *weights_out_d, double *weights_h_d, double *weights_in_d, double *outputs_d, double *deltas_h_d, double *deltas_h_new_d, double *deltas_o_d, double *weights_in_delta_d, double *weights_out_delta_d, double *weights_h_delta_d, int height, int inputs, int outputs, int layers, double *training_in_d, double *training_out_d, int sample){
int i, j;
int tix = threadIdx.x;
int tiy = threadIdx.y + sample;
int h_offset = tiy * layers * height;
int w_o_d_offset = tiy * outputs * height;
int w_h_d_offset = tiy * (layers-1) * height * height;
int w_i_d_offset = tiy * inputs * height;
int d_h_offset = tiy * height;
double delta_sum, temp;
/*__shared__ double h_out_ds[H_LAYERS*H_HEIGHT];
__shared__ double weights_h_ds[(H_LAYERS-1)*H_HEIGHT*H_HEIGHT];
__shared__ double deltas_h_ds[H_HEIGHT];
__shared__ double deltas_h_new_ds[H_HEIGHT];
for(i=0;i<layers;i++)
h_out_ds[tix*height+i] = h_out_d[tix*height+i];
for(i=0;i<layers-1;i++){
for(j=0;j<height;j++)
weights_h_ds[i*height*height + tix*height + j] = weights_h_d[i*height*height + tix*height + j];
}
deltas_h_ds[tix] = deltas_h_d[tix];
deltas_h_new_ds[tix] = deltas_h_new_d[tix];
__syncthreads();
*/
//output layer
if(tix < outputs){
deltas_o_d[tiy * outputs + tix] = (outputs_d[tiy * outputs + tix] - training_out_d[tiy]);
for(i = 0; i < height; i++){
weights_out_delta_d[w_o_d_offset + (tix * height) + i] = deltas_o_d[tiy * outputs + tix] * h_out_d[h_offset + (layers-1)*height+i];
}
}
__syncthreads();
//hidden layer
//layer connected to output
delta_sum = 0;
for(i = 0; i < outputs; i++){
delta_sum += weights_out_d[tix + (i * height)] * deltas_o_d[tiy * outputs + i];
}
temp = h_out_d[h_offset + (layers-1)*height + tix];
deltas_h_d[d_h_offset + tix] = temp * (1 - temp) * delta_sum;
for(i = 0; i < height; i++){
weights_h_delta_d[w_h_d_offset + (layers-2)*height*height + (tix * height) + i] = deltas_h_d[d_h_offset + tix] * h_out_d[h_offset + (layers-2)*height+i];
}
__syncthreads();
//each hidden layer not connected to input/hidden output layer
for(i = layers - 2; i > 0; i--){
delta_sum = 0;
for(j = 0; j < height; j++){
delta_sum += weights_h_d[i*height*height + j*height + tix] * deltas_h_d[d_h_offset + j];
}
temp = h_out_d[h_offset + i*height + tix];
deltas_h_new_d[d_h_offset + tix] = temp * (1 - temp) * delta_sum;
for(j = 0; j < height; j++){
weights_h_delta_d[w_h_d_offset + (i-1)*height*height + (tix * height) + j] = (deltas_h_new_d[d_h_offset + tix] * h_out_d[h_offset + (i-1)*height+j]);
}
__syncthreads();
//change pointers to simulate copying memory
deltas_h_d[d_h_offset + tix] = deltas_h_new_d[d_h_offset + tix];
__syncthreads();
}
//Layer connected to inputs
delta_sum = 0;
for(i=0; i<height; i++){
delta_sum += weights_h_d[i*height + tix] * deltas_h_d[d_h_offset + i];
}
temp = h_out_d[h_offset + tix];
deltas_h_new_d[d_h_offset + tix] = temp * (1 - temp) * delta_sum;
for(i=0; i<inputs; i++){
weights_in_delta_d[w_i_d_offset + tix*inputs+i] = (deltas_h_new_d[d_h_offset + tix] * training_in_d[tiy * inputs + i]);
}
__syncthreads();
} |
24,036 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10000000
/*
void vectorAddition(int n, float *vec1, float *vec2, float *out) {
for (int i=0; i<n; i++)
out[i] = vec1[i] + vec2[i];
}
*/
__global__ void vectorAddition(int n, float *vec1, float *vec2, float *out) {
for (int i=0; i<n; i++)
out[i] = vec1[i] + vec2[i];
}
int main() {
float *vec1, *vec2, *out;
float *d_vec1, *d_vec2, *d_out;
vec1 = (float *) malloc(sizeof(float) * N);
vec2 = (float *) malloc(sizeof(float) * N);
out = (float *) malloc(sizeof(float) * N);
for (int i=0; i<N; i++) {
vec1[i] = 1.0f;
vec2[i] = 1.0f;
}
cudaMalloc((void**) &d_vec1, sizeof(float) * N);
cudaMalloc((void**) &d_vec2, sizeof(float) * N);
cudaMalloc((void**) &d_out, sizeof(float) * N);
cudaMemcpy(d_vec1, vec1, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_vec2, vec2, sizeof(float) * N, cudaMemcpyHostToDevice);
clock_t begin = clock();
// vectorAddition(N, vec1, vec2, out);
vectorAddition<<<1,1>>>(N, d_vec1, d_vec2, d_out);
clock_t end = clock();
double elapsedTime = (double) (end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
cudaFree(d_vec1);
cudaFree(d_vec2);
cudaFree(d_out);
free(vec1);
free(vec2);
free(out);
printf("%f", elapsedTime);
return 0;
}
|
24,037 | #define OFFSET(row, col, ncols) (row * ncols + col)
#define NO_BOUND -1
#define EPSILON 0.000001
#define NONBASIC_FLAG 0
#define BASIC_FLAG 1
extern "C"
__global__ void check_bounds(
const int n,
const int offset,
const float* const lower,
const float* const upper,
const float* const assigns,
const unsigned char* const flags,
int* const result
)
{
const int idx = offset + blockIdx.x * blockDim.x + threadIdx.x;
// Boundary check and nonbasic variables are skipped
if (idx >= n || flags[idx] == NONBASIC_FLAG)
return;
//printf("[%d] n=%d offset=%d flags=%d\n", idx, n, offset, flags[idx]); return;
//printf("[%d] lowPtr=%p uppPtr=%p offset=%d n=%d flagPtr=%p\n", idx, lower, upper, *offset, n, flags);
const float ass = assigns[idx];
const float low = lower[idx];
const float upp = upper[idx];
//printf("[%d] low=%f ass=%f upp=%f\n", idx, low, ass, upp);
const bool testA = fabsf(ass - low) < EPSILON;
const bool testB = fabsf(ass - upp) < EPSILON;
const bool testC = low != NO_BOUND && ass < low;
const bool testD = upp != NO_BOUND && ass > upp;
if (testA || testB || !(testC || testD)) {
return;
} else {
atomicMin(result, idx);
//printf("Variable %d is broken (result=%d).\n", idx, *result);
}
}
#define NONE_FOUND -1
#define IS_INCREASABLE(low, upp, ass) (upp == NO_BOUND || ass < upp)
#define IS_DECREASABLE(low, upp, ass) (low == NO_BOUND || ass > low)
/**
* If found, returns the index of a suitable variable; otherwise, returns
* NONE_FOUND. The return value is stored in the output argument called
* suitable_idx.
*/
extern "C"
__global__ void find_suitable(
const int ncols,
const int broken_idx,
const int offset,
const float* const tableau,
const float* const lower,
const float* const upper,
const float* const assigns,
const unsigned char* const flags,
const int* const varToTableau,
const int* const colToVar,
int* const suitable_idx
){
// Determine variable index assigned to this thread
const int idx = offset + (blockIdx.x * blockDim.x + threadIdx.x);
const int var = colToVar[idx];
// Boundary check and "basic" variables are skipped
if (idx >= ncols || flags[var] == BASIC_FLAG)
return;
// Determine if the broken variable needs to be increased or decreased
const bool increase = assigns[broken_idx] < lower[broken_idx];
// Read bounds information needed to determine if potential suitable variable
// is increaseable or decreaseable
const float ass = assigns[var];
const float low = lower[var];
const float upp = upper[var];
// Obtain coefficient value in the tableau
const float coeff = tableau[varToTableau[broken_idx] * ncols + varToTableau[var]];
//printf("[%d] offset=%d ncols=%d low=%f ass=%f upp=%f increase=%d coeff=%f\n",
// idx, offset, ncols, low, ass, upp, increase, coeff);
if (increase){
if ((IS_INCREASABLE(low, upp, ass) && coeff > 0)
|| (IS_DECREASABLE(low, upp, ass) && coeff < 0)) {
atomicMin(suitable_idx, var);
//printf("Variable %d is suitable\n", idx, suitable_idx);
}
}
else {
if ((IS_INCREASABLE(low, upp, ass) && coeff < 0)
|| (IS_DECREASABLE(low, upp, ass) && coeff > 0)) {
atomicMin(suitable_idx, var);
//printf("Variable %d is suitable\n", idx, suitable_idx);
}
}
}
extern "C"
__global__ void find_suitable_complete(
const int ncols,
const int broken_idx,
const int suitable_idx,
const float* const tableau,
const float* const lower,
const float* const upper,
float* const assigns,
const int* const varToTableau
){
if (blockIdx.x * blockDim.x + threadIdx.x > 0)
return;
// Read bounds information for the broken variable
float ass = assigns[broken_idx];
float low = lower[broken_idx];
float upp = upper[broken_idx];
// Determine if the broken variable needs to be increased or decreased
const bool increase = ass < low;
// Obtain coefficient value in the tableau
const float coeff = tableau[varToTableau[broken_idx] * ncols
+ varToTableau[suitable_idx]];
// Amounts to adjust assignments of suitable and broken variables
const float delta = increase ? low - ass : ass - upp;
const float theta = delta / coeff;
//printf("[%d] b=%d s=%d increase=%d delta=%f theta=%f\n",
// threadIdx.x, broken_idx, suitable_idx, increase, delta, theta);
// Read bounds info for the suitable variable to check if
// increaseable or decreaseable
ass = assigns[suitable_idx];
low = lower[suitable_idx];
upp = upper[suitable_idx];
if (increase) {
if ((IS_INCREASABLE(low, upp, ass) && coeff > 0) ||
(IS_DECREASABLE(low, upp, ass) && coeff < 0)) {
assigns[suitable_idx] += coeff < 0 ? -theta : theta;
assigns[broken_idx] += delta;
//printf("a(%d) = %f\n", broken_idx, assigns[broken_idx]);
//printf("a(%d) = %f\n", suitable_idx, assigns[suitable_idx]);
}
}
else {
if ((IS_INCREASABLE(low, upp, ass) && coeff < 0) ||
(IS_DECREASABLE(low, upp, ass) && coeff > 0)) {
assigns[suitable_idx] -= coeff < 0 ? theta : -theta;
assigns[broken_idx] -= delta;
//printf("a(%d) = %f\n", broken_idx, assigns[broken_idx]);
//printf("a(%d) = %f\n", suitable_idx, assigns[suitable_idx]);
}
}
}
extern "C"
__global__ void pivot_update_inner(
const float alpha,
const int pivot_row,
const int pivot_col,
const int nrows,
const int ncols,
float* const tableau
){
// Determine thread ID in 2D (x and y)
const unsigned int col = blockDim.x * blockIdx.x + threadIdx.x; // column index
const unsigned int row = blockDim.y * blockIdx.y + threadIdx.y; // row index
if (col < ncols && row < nrows && row != pivot_row && col != pivot_col) {
// Compute helpful indices
const unsigned int delta_row_idx = OFFSET(row, 0, ncols);
const unsigned int delta_idx = delta_row_idx + col;
// Load values from global memory
const float delta = tableau[delta_idx];
const float beta = tableau[OFFSET(pivot_row, col, ncols)];
const float gamma = tableau[delta_row_idx + pivot_col];
// Store result
float coeff = delta - (beta * gamma) / alpha;
tableau[delta_idx] = coeff;
}
}
extern "C"
__global__ void pivot_update_row(
const float alpha,
const int row,
const int ncols,
float* const tableau
){
float* const tableau_row = &tableau[row * ncols];
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols)
return;
const float beta = tableau_row[col];
const float coeff = -beta / alpha;
tableau_row[col] = coeff;
}
extern "C"
__global__ void pivot_update_column(
const float alpha,
const int col,
const int nrows,
const int ncols,
float* const tableau
){
float* const tableau_col = tableau + col;
const int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= nrows)
return;
const int idx = row * ncols;
const float gamma = tableau_col[idx];
tableau_col[idx] = gamma / alpha;
}
extern "C"
__global__ void update_assignment_1(
const int n,
const float* const input,
const float* const assigns,
const int* const colToVar,
float* const output
){
extern __shared__ float partial_sums[];
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int lid = threadIdx.x;
// Boundary check
if (gid >= n)
return;
// Pre-fetch and multiply by corresponding assignment
const float a = assigns[colToVar[gid]];
partial_sums[lid] = a * input[gid];
__syncthreads();
//printf("[%d] n=%d psum=%f\n", gid, n, partial_sums[lid]);
//return;
// Reduce using interleaved pairs
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (lid < stride && lid + stride < n) {
partial_sums[lid] += partial_sums[lid + stride];
}
__syncthreads();
}
// Write the result for this block to global memory
if (lid == 0) {
output[blockIdx.x] = partial_sums[0];
}
}
extern "C"
__global__ void update_assignment_2(
const int n,
float* const data
){
extern __shared__ float partial_sums[];
const int gid = blockDim.x * blockIdx.x + threadIdx.x;
const int lid = threadIdx.x;
// Boundary check
if (gid >= n)
return;
// Pre-fetch
partial_sums[lid] = data[gid];
__syncthreads();
// Reduce using interleaved pairs
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (lid < stride && lid + stride < n) {
partial_sums[lid] += partial_sums[lid + stride];
}
__syncthreads();
}
// Write the result for this block to global memory
if (lid == 0) {
data[blockIdx.x] = partial_sums[0];
}
}
extern "C"
__global__ void update_assignment_complete(
const int n,
float* const data
){
extern __shared__ float partial_sums[];
const int lid = threadIdx.x;
//printf("[%d] var=%d input=%f\n", lid, var, input[idx], input);
// Pre-fetch
partial_sums[lid] = data[lid];
__syncthreads();
//printf("[%d] offset=%d var=%d partial_sums=%f\n", lid, offset, var, partial_sums[idx]);
// Reduce
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (lid < stride && lid + stride < n) {
partial_sums[lid] += partial_sums[lid + stride];
}
__syncthreads();
}
// Write the result to the assignments array
if (lid == 0) {
data[0] = partial_sums[0];
}
} |
24,038 | #include "compaction.cuh"
#include <iostream>
int maxThreadsPerBlock = 128;
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
// global calls
void initCuda (int N) {
cudaEventCreate(&beginEvent);
cudaEventCreate(&endEvent);
}
__global__ void naive_scan (float* in_arr, float* scan_arr, int size, int depth) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int val = 0;
int in_index = index;
if (depth == 1) {
in_index--;
}
if (in_index >= 0 && index < size) {
int exp_2 = 1;
for (int i = 1; i < depth; i++) {
exp_2 *= 2;
}
val = in_arr[in_index];
if (in_index >= exp_2) {
val += in_arr[in_index - exp_2];
}
}
if (index < size) {
scan_arr[index] = val;
}
}
__global__ void shared_scan (float* in_arr, float* scan_arr, int size, int depth) {
__shared__ float in_arr_s1 [1]; //contains the lower numbers
//__shared__ float in_arr_s2 [blockDim.x]; //contains the higher numbers
int index = threadIdx.x + blockIdx.x * blockDim.x;
int exp_2 = 1;
for (int i = 1; i < depth; i++) {
exp_2 *= 2;
}
float sValue = 0;
if (index < size) {
in_arr_s1[index] = in_arr[index];
}
__syncthreads();
int in_index = index;
if (depth == 1) {
in_index--;
}
if (in_index >= 0 && index < size) {
sValue += in_arr_s1[in_index];
if (in_index >= exp_2) {
sValue += in_arr_s1[in_index - exp_2];
}
}
//in_arr_s2[index] = in_arr[index];
if (index < size) {
scan_arr[index] = sValue;
}
__syncthreads();
}
void cudaScan (float* in_arr, float* out_arr, int size) {
int numBlocks = ceil(size/(float)maxThreadsPerBlock);
int threadsPerBlock = min(size, maxThreadsPerBlock);
float* arr1, * arr2;
cudaMalloc((void**)&arr1, size*sizeof(float));
cudaMalloc((void**)&arr2, size*sizeof(float));
float time;
int max_depth = ceil(log2((float)size));
cudaMemcpy(arr1, in_arr, size*sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(beginEvent, 0);
for (int i = 1; i <= max_depth; i++) { // not sure why it's ceil(log2(size)) but it works.
shared_scan<<<numBlocks, maxThreadsPerBlock>>>(arr1, arr2, size, i);
//cudaThreadSynchronize(); // taking these out causes it to fail occasionally.
float* temp = arr1;
arr1 = arr2;
arr2 = temp;
}
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&time, beginEvent, endEvent);
std::cout << "cudaGPUTime for size " << size << " was " << time << "ms" << std::endl;
cudaMemcpy(out_arr, arr1, size*sizeof(float), cudaMemcpyDeviceToHost);
}
__global__ void scatter (float* in_arr, float* temp_arr, float* scan_arr, float* out_arr, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < size && temp_arr[index] == 1) {
out_arr[(int)scan_arr[index]] = in_arr[index];
}
}
__global__ void compute (float* in_arr, float* out_arr, int size) {
//compute this array based on some function
int index = threadIdx.x + blockIdx.x * blockDim.x;
out_arr[index] = index % 2;
}
void cudaStreamCompaction (float* in_arr, float* out_arr, int size) {
int numBlocks = ceil(size/(float)maxThreadsPerBlock);
int threadsPerBlock = min(size, maxThreadsPerBlock);
float* temp_arr, *scan_arr;
float* arr, *compact_arr;
cudaMalloc((void**)&temp_arr, size*sizeof(int));
cudaMalloc((void**)&scan_arr, size*sizeof(int));
cudaMalloc((void**)&arr, size*sizeof(float));
cudaMalloc((void**)&compact_arr, size*sizeof(float));
cudaMemcpy(arr, in_arr, size*sizeof(float), cudaMemcpyHostToDevice);
compute<<<numBlocks, threadsPerBlock>>>(arr, temp_arr, size);
cudaScan(arr, scan_arr, size);
scatter<<<numBlocks, threadsPerBlock>>>(arr, temp_arr, scan_arr, out_arr, size);
cudaMemcpy(out_arr, compact_arr, size*sizeof(float), cudaMemcpyDeviceToHost);
} |
24,039 | __global__ void bgr2gray(float *g_odata, float *g_idata, int width, int height) {
// printf("%d, %d\\n", width, height);
int des_x = blockIdx.x * blockDim.x + threadIdx.x;
int des_y = blockIdx.y * blockDim.y + threadIdx.y;
if (des_x >= width || des_y >= height)
return;
int des_id = des_y * width + des_x;
int src_r_id = des_id * 3;
g_odata[des_id] = 0.299 * g_idata[src_r_id] + 0.587 * g_idata[src_r_id+1] + 0.114 * g_idata[src_r_id+2];
}
__global__ void sobel_abs(float *g_odata, float *g_idata, int width, int height) {
int des_x = blockIdx.x * blockDim.x + threadIdx.x;
int des_y = blockIdx.y * blockDim.y + threadIdx.y;
if (des_x >= width || des_y >= height)
return;
int index = des_y * width + des_x;
float value_x = 0;
float value_y = 0;
if (des_x == 0 || des_x == width - 1) {
value_x = 0;
}
else {
value_x = -2 * g_idata[index - 1] + 2 * g_idata[index + 1];
if (des_y != 0) {
value_x += -1 * g_idata[index - width - 1] + 1 * g_idata[index - width + 1];
}
if (des_y != height - 1) {
value_x += -1 * g_idata[index + width - 1] + 1 * g_idata[index + width + 1];
}
}
if (des_y == 0 || des_y == height - 1) {
value_y = 0;
}
else {
value_y = -2 * g_idata[index - width] + 2 * g_idata[index + width];
if (des_x != 0) {
value_y += -1 * g_idata[index - width - 1] + 1 * g_idata[index + width - 1];
}
if (des_x != width - 1) {
value_y += -1 * g_idata[index - width + 1] + 1 * g_idata[index + width + 1];
}
}
// g_odata[index] = sqrt(value_x * value_x + value_y * value_y);
g_odata[index] = fabsf(value_x) + fabsf(value_y);
}
__device__ int arg_min(float *arr, int size) {
int min_offset = 0;
float min_val = arr[0];
for (int i = 1; i < size; i++) {
if (arr[i] < min_val) {
min_offset = i;
min_val = arr[i];
}
}
return min_offset;
}
__device__ int get_array_index(int col, int row, int width) {
return row * width + col;
}
__global__ void min_energy_at_row(float *energy_m, int *backtrack_m, int width, int row) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= width) {
return;
}
int shift_col;
if (col == 0) {
shift_col = 0;
} else if (col == width - 1) {
shift_col = -2;
} else {
shift_col = -1;
}
int head = get_array_index(col + shift_col, row - 1, width);
int min_offset = arg_min(energy_m + head, 3);
int min_col = col + shift_col + min_offset;
int min_index = get_array_index(min_col, row - 1, width);
int current_index = get_array_index(col, row, width);
energy_m[current_index] += energy_m[min_index];
backtrack_m[current_index] = min_col;
}
__global__ void get_min_index(float *energy_m, int *index, int width, int height) {
int offset = width * (height - 1);
*index = arg_min(energy_m + offset, width);
}
__global__ void add_mask_by_factor(float *energy_m, float *mask, float factor, int width, int height) {
int des_x = blockIdx.x * blockDim.x + threadIdx.x;
int des_y = blockIdx.y * blockDim.y + threadIdx.y;
if (des_x >= width || des_y >= height)
return;
int des_id = des_y * width + des_x;
energy_m[des_id] += mask[des_id] * factor;
} |
24,040 | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <math.h>
#include <chrono>
#include <random>
#include <bits/stdc++.h>
using namespace std;
int random_in_range( int minimum, int maximum )
{
thread_local std::ranlux48 rng(
std::chrono::system_clock::now().time_since_epoch().count() );
return std::uniform_int_distribution <int> ( minimum, maximum )( rng );
}
__global__ void add(int *i, int *j, int *k) {
int bid = blockIdx.x;
k[bid] = i[bid] + j[bid];
}
void random_init(int *arr, int n) {
for(int i = 0 ; i < n ; i++) {
arr[i] = random_in_range(100,400);
}
}
void add_cpu(int *i, int *j, int *k, int n) {
for(int p = 0 ; p < n ; p++) {
k[p] = i[p] + j[p];
}
}
int main() {
int n = 20000;
int *a, *b;
int c[n];
int *i, *j, *k;
int size = n * sizeof(int);
a = new int[n];
b = new int[n];
random_init(a,n);
random_init(b,n);
cout<<"First: ";
for(int i = 0 ; i < n ; i++) {
cout<<a[i]<<" ";
}
cout<<endl;
cout<<"Second: ";
for(int i = 0 ; i < n ; i++) {
cout<<b[i]<<" ";
}
cout<<endl;
cudaMalloc((void **)&i,size);
cudaMalloc((void **)&j,size);
cudaMalloc((void **)&k,size);
cudaMemcpy(i,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(j,b,size,cudaMemcpyHostToDevice);
float gpu_elapsed_time;
cudaEvent_t gpu_start,gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
add<<<n,1>>>(i,j,k);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cudaMemcpy(c,k,size,cudaMemcpyDeviceToHost);
cout<<endl;
cout<<"GPU Elapsed time is: "<<gpu_elapsed_time<<" milliseconds";
cout<<endl;
cout<<"Parallel Result: ";
for(int i = 0 ; i < n ; i++) {
cout<<c[i]<<" ";
}
cout<<endl;
cout<<endl;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
add_cpu(a,b,c,n);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cout<<"CPU Elapsed time is: "<<gpu_elapsed_time<<" milliseconds";
cout<<endl;
cout<<"Serial Result: ";
for(int i = 0 ; i < n ; i++) {
cout<<c[i]<<" ";
}
cout<<endl;
cudaFree(i);
cudaFree(j);
cudaFree(k);
return 0;
} |
24,041 | #include "stdio.h"
#include "cuda_runtime.h"
// output given cudaDeviceProp
void OutputSpec( const cudaDeviceProp sDevProp )
{
printf( "Device name: %s\n", sDevProp.name );
printf( "Device memory: %d\n", sDevProp.totalGlobalMem );
printf( " Memory per-block: %d\n", sDevProp.sharedMemPerBlock );
printf( " Register per-block: %d\n", sDevProp.regsPerBlock );
printf( " Warp size: %d\n", sDevProp.warpSize );
printf( " Memory pitch: %d\n", sDevProp.memPitch );
printf( " Constant Memory: %d\n", sDevProp.totalConstMem );
printf( "Max thread per-block: %d\n", sDevProp.maxThreadsPerBlock );
printf( "Max thread dim: ( %d, %d, %d )\n", sDevProp.maxThreadsDim[0], sDevProp.maxThreadsDim[1], sDevProp.maxThreadsDim[2] );
printf( "Max grid size: ( %d, %d, %d )\n", sDevProp.maxGridSize[0], sDevProp.maxGridSize[1], sDevProp.maxGridSize[2] );
printf( "Ver: %d.%d\n", sDevProp.major, sDevProp.minor );
printf( "Clock: %d\n", sDevProp.clockRate );
printf( "textureAlignment: %d\n", sDevProp.textureAlignment );
}
int main()
{
// part1, check the number of device
int iDeviceCount = 0;
cudaGetDeviceCount( &iDeviceCount );
printf( "Number of GPU: %d\n\n", iDeviceCount );
if( iDeviceCount == 0 )
{
printf( "No supported GPU\n" );
return 0;
}
// part2, output information of each device
for( int i = 0; i < iDeviceCount; ++ i )
{
printf( "\n=== Device %i ===\n", i );
cudaDeviceProp sDeviceProp;
cudaGetDeviceProperties( &sDeviceProp, i );
OutputSpec( sDeviceProp );
}
} |
24,042 | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define SIZE 1000
#define TILE 16
__global__ void matMultiplyOnDevice(int* a, int* b, int* c, int m, int n, int k) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tempSum = 0;
if(row < m && col < k) {
for(int i = 0; i < n; i++) {
tempSum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = tempSum;
}
}
__global__ void matMultiplyTiled(int* a, int* b, int* c, int m, int n, int k) {
// declare shared memory for tiles
__shared__ int shared_a[TILE * TILE];
__shared__ int shared_b[TILE * TILE];
int row = blockIdx.y * TILE + threadIdx.y;
int col = blockIdx.x * TILE + threadIdx.x;
int tempSum = 0;
// sweep tile over matrix (same calculation as grid dim)
for (int i = 0; i < (TILE + n - 1) / TILE; i++) {
// tile overshoot should be 0
if ((i * TILE + threadIdx.x) < n && row < m) {
shared_a[(threadIdx.y * TILE) + threadIdx.x] = a[row * n + i * TILE + threadIdx.x];
}
else {
shared_a[(threadIdx.y * TILE) + threadIdx.x] = 0;
}
if ((i * TILE + threadIdx.y) < n && col < k) {
shared_b[(threadIdx.y * TILE) + threadIdx.x] = b[(i * TILE + threadIdx.y) * k + col];
}
else {
shared_b[(threadIdx.y * TILE) + threadIdx.x] = 0;
}
__syncthreads();
for (int i = 0; i < TILE; i++) {
tempSum += shared_a[(threadIdx.y * TILE) + i] * shared_b[(i * TILE) + threadIdx.x];
}
__syncthreads();
}
// make sure result isn't outside of c
if (row < m && col < k) {
c[row * k + col] = tempSum;
}
}
void matMultiplyOnHost(int* a, int* b, int* c, int m, int n, int k) {
for (int row= 0; row < m; row++) {
for (int col = 0; col < k; col++) {
int tempSum = 0.0;
for (int h = 0; h < n; h++) {
tempSum += a[row * n + h] * b[h * k + col];
}
c[row * k + col] = tempSum;
}
}
}
int main() {
int* h_a;
int* h_b;
int* h_c;
int* h_check;
int* d_a;
int* d_b;
int* d_c;
int m = 1000;
int n = 2000;
int k = 1500;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapsedTime = 0.0;
// allocate host memory for operand and resultant matrices
cudaMallocHost((void**)&h_a, sizeof(int) * m * n);
cudaMallocHost((void**)&h_b, sizeof(int) * n * k);
cudaMallocHost((void**)&h_c, sizeof(int) * m * k);
cudaMallocHost((void**)&h_check, sizeof(int) * m * k);
printf("Generating Matrices\n");
// initialize operand matrices
srand(5);
for (int i = 0; i < m * n; i++) {
//printf("%d\n", i);
h_a[i] = rand() % 1000;
}
for (int i = 0; i < n * k; i++) {
//printf("%d\n", i);
h_b[i] = rand() % 1000;
}
// allocate device memory for operand and resultant matrices
cudaMalloc((void**)&d_a, sizeof(int) * m * n);
cudaMalloc((void**)&d_b, sizeof(int) * n * k);
cudaMalloc((void**)&d_c, sizeof(int) * m * k);
// copy matrices to device
cudaMemcpy(d_a, h_a, sizeof(int) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int) * n * k, cudaMemcpyHostToDevice);
// define block and grid size
int blockSize = 32;
unsigned int grid_rows = (m + blockSize - 1) / blockSize;
unsigned int grid_cols = (k + blockSize - 1) / blockSize;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(blockSize, blockSize);
cudaEventRecord(start);
matMultiplyOnDevice<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
//matMultiplyTiled<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
cudaEventRecord(stop);
cudaMemcpy(h_c, d_c, sizeof(int)* m * k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU CUDA Time: %.3f ms\n", milliseconds);
/*
cpu_startTime = clock();
matMultiplyOnHost(h_a, h_b, h_check, m, n, k);
cpu_endTime = clock();
cpu_ElapsedTime = ((cpu_endTime - cpu_startTime)/CLOCKS_PER_SEC);
printf("CPU Time: %f ms\n", cpu_ElapsedTime);
for (int j = 0; j < m * k; j++) {
//printf("h_c[%d] = %d\n", j, h_c[j]);
//printf("h_check[%d] = %d\n", j, h_check[j]);
if (h_c[j] != h_check[j]) {
printf("Some results are wrong\n");
break;
}
}*/
// free up device and host memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_check);
printf("Done\n");
return 0;
}
|
24,043 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-08-03
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-06 float16/int added
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "../shape/IsSameShaped.h"
#include "Clip.h"
#include "Clip.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
set each entry to its clip value (CUDA Kernel)
>> a - pointer to input data array
>> b - pointer to output data array
>> lower - the lower border
>> upper - the upper border
>> size - size of the data array
*/
template <class T>
__global__
void KernelClip(T * a, T * b, T lower, T upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (a[i] > upper)
b[i] = upper;
else if (a[i] < lower)
b[i] = lower;
else
b[i] = a[i];
}
}
/*
set each entry to its clip value
>> a - input tensor we are processing
>> b - output tensor we are processing
>> lower - the lower border
>> upper - the upper border
*/
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
{
CheckNTErrors((_IsSameShaped(a, b)), "Input tensors should have the same type!");
CheckNTErrors((a->isSparse == false), "TODO!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE) {
KernelClip << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, lower, upper, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
half lower2 = __float2half(lower);
half upper2 = __float2half(upper);
KernelClip << <blocks, threads >> >((__half*)a->data, (__half*)b->data, lower2, upper2, a->unitNum);
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else if (a->dataType == X_INT) {
int lower2 = (int)lower;
int upper2 = (int)upper;
KernelClip << <blocks, threads >> >((int *)a->data, (int *)b->data, lower2, upper2, a->unitNum);
}
else if (a->dataType == X_INT8) {
ShowNTErrors("TODO!");
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
/*
clip backward computation of dE/dx (Cuda kernel)
dy/dx = 1 if lower <= x <= upper
0 otherwise
>> dedy - dE/dy
>> dedx - dE/dx
>> y - y of the function
>> x - x of the function
>> lower
>> upper
*/
__global__
void KernelClipBackward(DTYPE * dedy, DTYPE * dedx, DTYPE * y, DTYPE * x, DTYPE lower, DTYPE upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
DTYPE s = x[i];
if (s > upper || s < lower)
dedx[i] = 0;
else
dedx[i] = dedy[i];
}
}
/*
backward computation (Cuda version)
dE/dx = dE/dy * dy/dx
hard tanh: y = upper if x > upper
x if lower <= x <= upper
lower if x< lower
and dy/dx = 1 if lower <= x <= upper
0 otherwise
>> gold - gold standard to measure error (or loss)
>> y - output of the function
>> x - input of the function
>> dedy - dE/dy
>> dedx - dE/dx
>> lossName - type of loss function, e.g., cross entropy
*/
void _CudaClipBackward(XTensor * y, XTensor * x, XTensor * dedy, XTensor * dedx, DTYPE lower, DTYPE upper)
{
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/dx = dE/dy * dy/dx */
KernelClipBackward <<<dim3(gridSize[0]), dim3(blockSize[0])>>>
((DTYPE*)dedy->data,
(DTYPE*)dedx->data,
(DTYPE*)y->data, (DTYPE*)x->data,
lower, upper,
x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
24,044 | #include "includes.h"
__global__ void bestFilter(const double *Params, const bool *iW, const float *cmax, int *id){
int tid,tind,bid, ind, Nspikes, Nfilters, Nthreads, Nblocks;
float max_running = 0.0f, Th;
Nspikes = (int) Params[0];
Nfilters = (int) Params[2];
Nthreads = blockDim.x;
Nblocks = gridDim.x;
Th = (float) Params[7];
tid = threadIdx.x;
bid = blockIdx.x;
tind = tid + bid * Nthreads;
while (tind<Nspikes){
max_running = 0.0f;
id[tind] = 0;
for(ind=0; ind<Nfilters; ind++)
if (iW[tind + ind*Nspikes])
if (cmax[tind + ind*Nspikes] > max_running){
id[tind] = ind;
max_running = cmax[tind + ind*Nspikes];
}
if (max_running < Th*Th)
id[tind] = -1;
tind += Nblocks*Nthreads;
}
} |
24,045 | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <math.h>
#include <cuda.h>
#include <cuComplex.h>
#include <cufft.h>
extern "C" int fftshift(cuComplex *target, unsigned int width, unsigned int height){
unsigned int halfw, halfh;
unsigned int x,y, offset, tmpoffset;
cuComplex tmp13, tmp24;
halfw = width / 2;
halfh = height / 2;
//printf("Break up the image into 4 quadrants and rearranging them\n");
//printf("Quadrants 1 & 3\n");
for( x = 0; x < halfw; x++){
for(y = 0; y < halfh; y++){
offset = y * width + x;
tmp13.x = target[offset].x;
tmp13.y = target[offset].y;
tmpoffset = (y + halfh)* width + (x + halfw);
target[offset].x = target[tmpoffset].x;
target[offset].y = target[tmpoffset].y;
target[tmpoffset].x = tmp13.x;
target[tmpoffset].y = tmp13.y;
}
}
// printf("Quadrants 2 & 4\n");
for( x = 0; x < halfw; x++){
for(y = 0; y < halfh; y++){
offset = (y+halfh) * width + x;
tmp24.x = target[offset].x;
tmp24.y = target[offset].y;
tmpoffset = y * width + (x + halfw);
target[offset].x = target[tmpoffset].x;
target[offset].y = target[tmpoffset].y;
target[tmpoffset].x = tmp24.x;
target[tmpoffset].y = tmp24.y;
}
}
return(0);
}
extern "C" int gpufouriertransform(cuComplex *original, cuComplex *transform, unsigned int width, unsigned int height){
dim3 threadsPerBlock(1,1);
dim3 numBlock(width/threadsPerBlock.x, height/threadsPerBlock.y);
cufftHandle plan;
cufftPlan2d(&plan,height, width, CUFFT_C2C);
cuComplex *gpuoriginal, *gputransform;
//printf("Starting the gpu FFT\n");
cudaMalloc(&gpuoriginal, sizeof(cuComplex) * width * height);
cudaMalloc(&gputransform, sizeof(cuComplex) * width * height);
cudaDeviceSynchronize();
//printf("Allocating Memory errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(gpuoriginal, original, sizeof(cuComplex) * width * height, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
//printf("Copying Memory errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
cufftExecC2C(plan,gpuoriginal,gputransform, -1);
cudaMemcpy(transform, gputransform, sizeof(cuComplex) * width * height, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//printf("Copying back, Memory errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
//printf("Performing the FFT Shift\n");
fftshift(transform, width, height);
/*
for(offset = 0; offset < (width * height); offset ++){
printf("%g%+g\n", transform[offset].x, transform[offset].y);
}
*/
cudaFree(gpuoriginal);
cudaFree(gputransform);
cufftDestroy(plan);
return(0);
}
extern "C" int gpuifouriertransform(cuComplex *original, cuComplex *transform, unsigned int width, unsigned int height){
dim3 threadsPerBlock(1,1);
dim3 numBlock(width/threadsPerBlock.x, height/threadsPerBlock.y);
cufftHandle plan;
cufftPlan2d(&plan,height, width, CUFFT_C2C);
cuComplex *gpuoriginal, *gputransform;
//printf("Starting the gpu FFT\n");
cudaMalloc(&gpuoriginal, sizeof(cuComplex) * width * height);
cudaMalloc(&gputransform, sizeof(cuComplex) * width * height);
cudaDeviceSynchronize();
//printf("Allocating Memory errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(gpuoriginal, original, sizeof(cuComplex) * width * height, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
//printf("Copying Memory errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
cufftExecC2C(plan,gpuoriginal,gputransform, 1);
cudaMemcpy(transform, gputransform, sizeof(cuComplex) * width * height, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//printf("Copying back, Memory errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
/*
for(offset = 0; offset < (width * height); offset ++){
printf("%g%+g\n", transform[offset].x, transform[offset].y);
}
*/
cudaFree(gpuoriginal);
cudaFree(gputransform);
cufftDestroy(plan);
return(0);
}
|
24,046 | // double indirection, ie float **, in kernel parameter
// this test cuts all gpu buffers from one single gpu buffer
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
struct BoundedArray {
float *bounded_array[8];
};
__global__ void wipe(int *buffer, int length) {
length >>= 5;
int tid = threadIdx.x;
for(int i = 0; i < length; i++) {
buffer[(i << 5) + tid] = -1;
}
}
__global__ void run_bounded_array(struct BoundedArray boundedArray, int numBuffers, int N) {
for(int i = 0; i < numBuffers; i++) {
for(int j = 0; j < N; j++) {
boundedArray.bounded_array[i][j] = 123.0f + i + 1 + j;
}
}
}
void test1() {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
const int numBuffers = 3;
char *gpuArena;
int mallocSize = numBuffers * N * sizeof(float) + 256 + 1024;
std::cout << "mallocSize=" << mallocSize << std::endl;
cudaMalloc((void **)&gpuArena, mallocSize);
wipe<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>((int *)gpuArena, mallocSize >> 2);
struct BoundedArray boundedArray;
float *hostFloats[numBuffers];
for(int i = 0; i < numBuffers; i++) {
boundedArray.bounded_array[i] = (float *)(gpuArena + 256 + i * N * sizeof(float));
std::cout << "bounded_array[" << i << "]=" << (long)boundedArray.bounded_array[i] << std::endl;
hostFloats[i] = new float[N];
}
run_bounded_array<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(boundedArray, numBuffers, N);
for(int i = 0; i < numBuffers; i++) {
cudaMemcpy(hostFloats[i], boundedArray.bounded_array[i], N * sizeof(float), cudaMemcpyDeviceToHost);
}
cuStreamSynchronize(stream);
std::cout << std::endl;
for(int i = 0; i < numBuffers; i++) {
for(int j=0; j < 4; j++) {
cout << hostFloats[i][j] << " ";
}
cout << endl;
}
for(int i = 0; i < numBuffers; i++) {
for(int j=0; j < N; j++) {
float expected = 123.0f + 1 + i + j;
float actual = hostFloats[i][j];
if(actual != expected) {
std::cout << "mismatch for i=" << i << " j=" << j << " expected=" << expected << " actual=" << actual << std::endl;
assert(false);
}
}
}
for(int i=0; i < numBuffers; i++) {
delete[] hostFloats[i];
}
cudaFree(gpuArena);
cuStreamDestroy(stream);
std::cout << "test1 finished ok" << std::endl;
}
struct BoundedArrayUnion {
float *bounded_array[8];
float **unbounded_array;
};
__global__ void run_bounded_array_two(struct BoundedArrayUnion mystruct, int useUnbounded, int numBuffers, int N) {
float **starstar = useUnbounded ? mystruct.unbounded_array : mystruct.bounded_array;
for(int i = 0; i < numBuffers; i++) {
for(int j = 0; j < N; j++) {
starstar[i][j] = 123.0f + i + 1 + j;
}
}
}
void test2_bounded() {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
const int numBuffers = 3;
char *gpuArena;
int mallocSize = numBuffers * N * sizeof(float) + 256 + 1024;
std::cout << "mallocSize=" << mallocSize << std::endl;
cudaMalloc((void **)&gpuArena, mallocSize);
wipe<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>((int *)gpuArena, mallocSize >> 2);
struct BoundedArrayUnion boundedArray;
float *hostFloats[numBuffers];
for(int i = 0; i < numBuffers; i++) {
boundedArray.bounded_array[i] = (float *)(gpuArena + 256 + i * N * sizeof(float));
std::cout << "bounded_array[" << i << "]=" << (long)boundedArray.bounded_array[i] << std::endl;
hostFloats[i] = new float[N];
}
run_bounded_array_two<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(boundedArray, 0, numBuffers, N);
for(int i = 0; i < numBuffers; i++) {
cudaMemcpy(hostFloats[i], boundedArray.bounded_array[i], N * sizeof(float), cudaMemcpyDeviceToHost);
}
cuStreamSynchronize(stream);
std::cout << std::endl;
for(int i = 0; i < numBuffers; i++) {
for(int j=0; j < 4; j++) {
cout << hostFloats[i][j] << " ";
}
cout << endl;
}
for(int i = 0; i < numBuffers; i++) {
for(int j=0; j < N; j++) {
float expected = 123.0f + 1 + i + j;
float actual = hostFloats[i][j];
if(actual != expected) {
std::cout << "mismatch for i=" << i << " j=" << j << " expected=" << expected << " actual=" << actual << std::endl;
assert(false);
}
}
}
for(int i=0; i < numBuffers; i++) {
delete[] hostFloats[i];
}
cudaFree(gpuArena);
cuStreamDestroy(stream);
std::cout << "finished test2_bounded" << std::endl;
}
void test3_unbounded() {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
const int numBuffers = 3;
char *gpuArena;
int mallocSize = numBuffers * N * sizeof(float) + 256 + 1024;
std::cout << "mallocSize=" << mallocSize << std::endl;
cudaMalloc((void **)&gpuArena, mallocSize);
wipe<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>((int *)gpuArena, mallocSize >> 2);
struct BoundedArrayUnion boundedArray;
float *hostFloats[numBuffers];
for(int i = 0; i < numBuffers; i++) {
boundedArray.bounded_array[i] = (float *)(gpuArena + 256 + i * N * sizeof(float));
std::cout << "bounded_array[" << i << "]=" << (long)boundedArray.bounded_array[i] << std::endl;
hostFloats[i] = new float[N];
}
run_bounded_array_two<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(boundedArray, 1, numBuffers, N);
for(int i = 0; i < numBuffers; i++) {
cudaMemcpy(hostFloats[i], boundedArray.bounded_array[i], N * sizeof(float), cudaMemcpyDeviceToHost);
}
cuStreamSynchronize(stream);
std::cout << std::endl;
for(int i = 0; i < numBuffers; i++) {
for(int j=0; j < 4; j++) {
cout << hostFloats[i][j] << " ";
}
cout << endl;
}
for(int i = 0; i < numBuffers; i++) {
for(int j=0; j < N; j++) {
float expected = 123.0f + 1 + i + j;
float actual = hostFloats[i][j];
if(actual != expected) {
std::cout << "mismatch for i=" << i << " j=" << j << " expected=" << expected << " actual=" << actual << std::endl;
assert(false);
}
}
}
for(int i=0; i < numBuffers; i++) {
delete[] hostFloats[i];
}
cudaFree(gpuArena);
cuStreamDestroy(stream);
std::cout << "finished test3_unbounded" << std::endl;
}
int main(int argc, char *argv[]) {
test1();
test2_bounded();
// test3_unbounded();
return 0;
}
|
24,047 | #include <stdio.h>
__global__ void holaCUDA(float e){
printf("Hola, soy el hilo %d del bloque %d con valor pi -> %f\n", threadIdx.x, blockIdx.x,e);
}
int main (int argc, char **argv)
{
holaCUDA<<<3,4>>>(3.1416);
cudaDeviceReset(); //Reinicializa el device
return 0;
}
|
24,048 | #include "includes.h"
__global__ void set_with_value_util_kernel( double2 * __restrict buf, double v, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
double2 val;
val.x = v;
val.y = v;
buf[elem_id] = val;
}
} |
24,049 | #include <cuda.h>
#include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
typedef struct Data {
float* x;
float* y;
int num_node;
} data;
data* read_data(const char* file) {
data* d = NULL;
FILE* f = fopen (file, "r");
int num_node;
if (fscanf(f, "%5d\n", &num_node) == 0) {
printf("read inputfile failed.\n");
return NULL;
}
printf("num_node = %d\n", num_node);
d = (data*) malloc(sizeof(data));
d->num_node = num_node;
d->x = (float*)malloc(num_node * sizeof(float));
d->y = (float*)malloc(num_node * sizeof(float));
for (int i = 0; i < num_node; i++) {
fscanf(f, "%f %f\n", &(d->x[i]), &(d->y[i]));
}
for (int i = 0; i < d->num_node; i++) {
printf("%d: x = %f, y = %f\n", i, d->x[i], d->y[i]);
}
fclose(f);
return d;
}
/*
__device__ float dist(const float x, const float y, const float cx, const float cy) {
return sqrt(pow(x - cx, 2.0) + pow(y - cy, 2.0));
}
*/
__device__ float dist(const float x, const float y, const float cx, const float cy) {
return sqrtf(powf(x - cx, 2.0) + powf(y - cy, 2.0));
}
__global__ void kmeans_kernel(const float* x, const float* y, float* cx, float* cy, int num_nodes, int num_cent, int iter, float* cx_result, float* cy_result, int* result) {
__shared__ bool done;
__shared__ float* tmp_cx;
__shared__ float* tmp_cy;
__shared__ int* tmp_label;
__shared__ int* count;
__shared__ float* dist_c;
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < iter; i++) {
// calculate the closest centroid
done = true;
for (int jj = 0; jj < num_cent; jj++) {
count[jj] = 0;
}
float tmp_min = 9999;
for (int jj = 0; jj < num_cent; jj++) {
if (tmp_min > dist(x[id], y[id], cx[jj], cy[jj])) {
tmp_label[id] = jj;
atomicAdd(count + jj, 1);
}
}
// update centroid
// calculate new centroids
// add all x, all y of the same label, then divide by the nubmer of same label nodes.
for (int jj = 0; jj < num_cent; jj++) {
tmp_cx[jj] = cx[jj];
tmp_cy[jj] = cy[jj];
cx[jj] = 0;
cy[jj] = 0;
}
for (int jj = 0; jj < num_cent; jj++) {
if (tmp_label[id] == jj) {
atomicAdd(&(cx[jj]), x[id]);
atomicAdd(&(cy[jj]), y[id]);
}
}
for (int jj = 0; jj < num_cent; jj++) {
cx[jj] /= count[jj];
cy[jj] /= count[jj];
}
// calculate distance from prev_centroid to cur_centroid
for (int jj = 0; jj < num_cent; jj++) {
dist_c[jj] = dist(cx[jj], tmp_cx[jj], cy[jj], tmp_cy[jj]);
if (dist_c[jj] > 0.001) {
done = false;
}
}
// if the change of dist < 0.01, then break
// cx_result = tmp_cx
// cy_result = tmp_cy
// result = tmp_label;
//
if (done) {
for (int jj = 0; jj < num_cent; jj++) {
cx_result[jj] = tmp_cx[jj];
cy_result[jj] = tmp_cy[jj];
result[id] = tmp_label[id];
}
break;
}
}
}
void kmeans(const float* x, const float* y, const float* cx, const float* cy, int num_nodes, int num_cent, int iter, float* cx_result, float* cy_result, int* result) { // memory alloc and hac execute
int Dim = num_nodes; // TODO
float* d_in_x; // for cudamalloc in 'nodes data'
float* d_in_y;
float* d_in_cx;
float* d_in_cy;
float* d_out_cx_result; // result centroid
float* d_out_cy_result;
int* d_out_label_result;
cudaMalloc((void**) &d_in_x, num_nodes * sizeof(float));
cudaMalloc((void**) &d_in_y, num_nodes * sizeof(float));
cudaMalloc((void**) &d_in_cx, num_cent * sizeof(float));
cudaMalloc((void**) &d_in_cy, num_cent * sizeof(float));
cudaMalloc((void**) &d_out_cx_result, num_cent * sizeof(float));
cudaMalloc((void**) &d_out_cy_result, num_cent * sizeof(float));
cudaMalloc((void**) &d_out_label_result, num_nodes * sizeof(int));
cudaMemcpy(d_in_x, x, num_nodes * sizeof(float), cudaMemcpyHostToDevice); // node memcpy
cudaMemcpy(d_in_y, y, num_nodes * sizeof(float), cudaMemcpyHostToDevice); // centroid memcpy
cudaMemcpy(d_in_cx, cx, num_cent * sizeof(float), cudaMemcpyHostToDevice); // node memcpy
cudaMemcpy(d_in_cy, cy, num_cent * sizeof(float), cudaMemcpyHostToDevice); // centroid memcpy
kmeans_kernel<<<1, Dim>>>(d_in_x, d_in_y, d_in_cx, d_in_cy, num_nodes, num_cent, iter, d_out_cx_result, d_out_cy_result, d_out_label_result);
cudaMemcpy(result, d_out_label_result, num_nodes * sizeof(int), cudaMemcpyDeviceToHost); // cuda memcopy d to h
cudaMemcpy(cx_result, d_out_cx_result, num_cent * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cy_result, d_out_cy_result, num_cent * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in_x);
cudaFree(d_in_y);
cudaFree(d_in_cx);
cudaFree(d_in_cy);
cudaFree(d_out_cx_result);
cudaFree(d_out_cy_result);
cudaFree(d_out_label_result);
return;
}
// TODO
void free_data(data* d) {
}
// TODO
void print_result (const int num_node, const int num_cent, const float* cx, const float* cy, const int* label) {
printf("\t");
for (int i = 0; i < num_node; i++) {
printf("\5 %d", i);
}
printf("\nLabel : ");
for (int i = 0; i < num_node; i++) {
printf("\5 %d", label[i]);
}
for (int i = 0; i < num_cent; i++) {
printf("\ncentroid %d\t : %5f %5f", i, cx[i], cy[i]);
}
printf("\n");
}
int main (int argc, char** argv) {
if (argc != 3) {
printf("Usage: %s <nodes file> <centroids file>\n", argv[0]);
return 0;
} else {
data* d = read_data(argv[1]);
data* c = read_data(argv[2]);
float* cx_result = (float*)malloc(c->num_node * sizeof(float));;
float* cy_result = (float*)malloc(c->num_node * sizeof(float));;
int* result = (int*) malloc(d->num_node * sizeof(int));
int iter = 100;
if (d->num_node && c->num_node) {
printf("Let's do CLUSTER!\n");
kmeans(d->x, d->y, c->x, c->y, d->num_node, c->num_node, iter, cx_result, cy_result, result); // cuda
printf("print the result!\n");
print_result(d->num_node, c->num_node, cx_result, cy_result, result);
// free(d);
// free(c);
free(cx_result);
free(cy_result);
free(result);
} else {
printf("empty file.\n");
}
}
return 0;
}
|
24,050 | #include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__device__ void _3Dstencil_(float *d_e,float *d_r,int X,int Y,int Z,int k, int x, int y,int z)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
int h_e_i = h_r_i;
d_r[h_r_i] = d_e[h_e_i];
for(int lk =0;lk<(k/2);lk++)
{
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
}
}
__global__ void _3Dstencil_SharedMemory_Cube(float *d_e,float *d_r,int X,int Y,int Z,int k,int t)
{
int x,y,globalidx,localIdx;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
int k2=k/2;
//conta para tamanho do cubo em cada dimensão DIM+t*k2
extern __shared__ float cube[];
for(int z=0;z<Z;z++)
{
globalidx = x + y*X + z*(X*Y);
localIdx = threadIdx.x + threadIdx.y*blockDim.x + z*(blockDim.x*blockDim.y);
cube[localIdx] = d_e[globalidx];
}
for(int z=k2;z<Z-k2;z++)
{
_3Dstencil_(d_e,d_r,X,Y,Z,k,x+k2,y+k2,z);
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*3 - n_blocos
*4 - print
*/
int main(int argc, char* argv[]) {
float *h_e,*h_r,*h_r_test;
float *d_e, *d_r;
int size,tam,times,sharedSize;
clock_t Ticks[2];
times = 1;
int X=8;
int Y=8;
int BX=8;
int BY=8;
int Z=4;
int k=2;
int GX=1;
int GY=1;
if(argc > 1)
{
X = atoi(argv[1]);
BX=X;
}
if(argc > 2)
{
Y = atoi(argv[2]);
BY = Y;
}
if(argc > 3)
Z = atoi(argv[3]);
if(argc > 4)
k = atoi(argv[4]);
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
sharedSize = ((block_dim.x+k)*(block_dim.y+k)*(Z+k))*sizeof(float);
size = (X+k) * (Y+k) * (Z+k) * sizeof(float);
tam = X * Y * Z;
X=X+k;
Y=Y+k;
Z=Z+k;
h_e = (float*) malloc(size);
h_r = (float*) malloc(size);
h_r_test = (float*) malloc(size);
cudaMalloc(&d_e, size);
cudaMalloc(&d_r, size);
for (int i = 0; i < size/sizeof(float); i++) {
h_e[i] = (float)(rand()%9000)/100.0;
h_r[i] = 0;
}
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_r, h_r, size, cudaMemcpyHostToDevice);
int k2 = k/2;
for(int t =0; t<times; t++)
{
for(int z=k2; z<Z-k2; z++)
{
for(int y=k2; y<Y-k2; y++)
{
for(int x=k2; x<X-k2; x++)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
int h_e_i = h_r_i;
h_r_test[h_r_i] = h_e[h_e_i];
for(int lk =0;lk<(k/2);lk++)
{
if(x+lk >= X)
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(x-lk < 0)
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y+lk >= Y)
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y-lk < 0)
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z-lk < 0)
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
}
}
}
}
for (int i = 0; i < tam; i++)
{
h_e[i] = h_r_test[i];
}
}
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
_3Dstencil_SharedMemory_Cube<<<grid_dim,block_dim,sharedSize>>>(d_e,d_r,X,Y,Z,k,times);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
cudaDeviceSynchronize();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
float elapsedTime;
cudaEventElapsedTime (&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
Ticks[1] = clock();
double Tempo = (Ticks[1] - Ticks[0]) * 1000.0 / CLOCKS_PER_SEC;
printf("X %d || Y %d \nBX %d || BY %d\nZ %d \n",X,Y,BX,BY,Z);
printf ("[%d,%.5f,%lf],\n", tam,elapsedTime,Tempo/1000.0);
cudaMemcpy(h_r, d_r, size, cudaMemcpyDeviceToHost);
bool certo=true;
//printf("threads/blk %d -- blocks %d\n",th_p_blk,blks);
for (int i = 0; i < tam; i++){
//printf("%d - %d\n",h_z_res[i],h_z[i]);
if(h_r_test[i] != h_r[i])
certo=false;
}
if(!certo)
printf("\n*****\n certo = %s\n*****\n", certo ? "true" : "false");
cudaFree(d_e);
cudaFree(d_r);
std::free(h_e);
std::free(h_r);
std::free(h_r_test);
return 0;
} /* main */
|
24,051 | #include "includes.h"
__global__ void reset_nan_and_inf_kernel(float *input, size_t size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val)) {
input[index] = 0;
}
}
} |
24,052 | //xfail:REPAIR_ERROR
//--blockDim=2 --gridDim=1 --no-inline
#include <cuda.h>
__global__ void race_test (unsigned int* i, int* A)
{
int tid = threadIdx.x;
int j = atomicAdd(i,0);
A[j] = tid;
}
|
24,053 |
#include <cuda.h>
#include <iostream>
int main() {
CUdevice dev;
cuDeviceGet(&dev, 0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%d%d\n", deviceProp.major, deviceProp.minor);
} |
24,054 | #include <stdio.h>
int main(void)
{
int deviceCoint;
cudaDeviceProp devProp;
cudaGetDeviceCount(&deviceCoint);
printf("Found %d devices\n", deviceCoint);
for (int device = 0; device < deviceCoint; ++device)
{
cudaGetDeviceProperties(&devProp, device);
printf("Device: %d\n", device);
printf("Compute capability: %d.%d\n", devProp.major, devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %li\n", devProp.totalGlobalMem);
printf("Shared memory per block: %li\n", devProp.sharedMemPerBlock);
printf("Registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Max threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Total constant memory: %li\n", devProp.totalConstMem);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Device overlap: %d\n", devProp.deviceOverlap);
printf("Multiprocessor count: %d\n", devProp.multiProcessorCount);
printf("Max threads dim: %d %d %d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
printf("Max grid size: %d %d %d\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]);
}
return 0;
}
|
24,055 | /**
* Author:易培淮
* Mail:yiph@ihep.ac.cn
* Function:Accelerate simulation with Single GPU
* 2018/11/27
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <math_constants.h>
#include <assert.h>
typedef struct res_arr
{
double *arr;
int *pmt_list;
int index;
int id;
int max;
} Res_Arr;
__device__ double generateRandom(curandState *state);
__device__ void generateRandomInit(curandState *state,int seed);
__device__ int sampling(curandState *state,double *histo,int max,int id);
__device__ int binarySearch(double *histo,double target,int max,int id);
__device__ double calculateAngle(double x,double y,double z,double a,double b,double c);
__device__ void generateHits(double r,double theta, double ratio,double start_time,\
double *hittime_histo,double *npe,curandState *state,Res_Arr *p_r_arr);
__device__ int get_hittime(double r, double theta, int mode, double *hittime_histo, curandState *state);
__device__ int get_hittime_bin(int binx, int biny, int mode, double *hittime_histo, curandState *state);
__device__ int get_hittime_all(int binx, int biny,double *hittime_histo, curandState *state);
__device__ int get_bin_x(double r);
__device__ int get_bin_y(double theta);
__device__ int r_findBin(double r);
__device__ int get_npe(double r,double theta,double *npe,curandState *state);
__device__ int r3_findBin(double r3);
__device__ int theta_findBin(double theta);
__device__ int get_npe_num(int binx,int biny,double *npe,curandState *state);
__device__ int generateRandomInt(curandState *state,int begin,int end);
__global__ void pmt_calculate(double r,double pos_x,double pos_y,double pos_z,\
double *pmt_x,double *pmt_y,double *pmt_z,double intPart,double fractionPart,\
double start_time,int numElements,double *hittime_histo,double *npe,int *seed,\
double *result,int *pmt_res_list,int size);
__device__ void calculate_by_step(double r,double pos_x,double pos_y,double pos_z,\
double pmt_x,double pmt_y,double pmt_z,double intPart,double fractionPart,\
double start_time,double *hittime_histo,double *npe,curandState *state,Res_Arr *p_pmt_arr);
__global__ void step_calculate_every_pmt(double *r,double *pos_x,double *pos_y,\
double *pos_z,double *pmt_x,double *pmt_y,double *pmt_z,double *intPart,\
double *fractionPart,double *start_time, int numElements,double *hittime_histo,\
double *npe,int *seed,double *result,int *pmt_res_list,int size);
__device__ void append_res_arr(Res_Arr *p, double val);
__device__ void init_res_arr(Res_Arr *p,double *result,int *pmt_res_list,int pmtid,int size);
// float CDF_Sampling_Wrapping(double *h_pmt,double *h_hit,double *h_result, int *seed,int total_num, int nBytes,int max_n,int max_time);
#define CHECK(call) \
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
printf("Error:%s:%d, ", __FILE__, __LINE__);\
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));\
exit(1);\
}\
}
#define pmt_num 17746
#define pmt_mem 10000
#define CUDART_PI_F 3.141592654f
__global__ void
pmt_calculate(double r,double pos_x,double pos_y,double pos_z,double *pmt_x,double *pmt_y,\
double *pmt_z,double intPart,double fractionPart,double start_time,int numElements,\
double *hittime_histo,double *npe,int *seed,double *result,int *pmt_res_list,int size)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
//numElements = pmt numbers
if (id < numElements){
curandState state;
generateRandomInit(&state, seed[id]);
Res_Arr pmt_arr;
init_res_arr(&pmt_arr,result,pmt_res_list,id,size);
double theta = calculateAngle(pmt_x[id],pmt_y[id],pmt_z[id],pos_x,pos_y,pos_z);
// printf("theta = %lf\n",theta);
for(int j = 0; j < intPart; ++j){
//r 单位 米
generateHits(r,theta,1,start_time,hittime_histo,npe,&state,&pmt_arr);
}
generateHits(r,theta,fractionPart,start_time,hittime_histo,npe,&state,&pmt_arr);
}
}
__global__ void
step_calculate_every_pmt(double *r,double *pos_x,double *pos_y,double *pos_z,double *pmt_x,\
double *pmt_y,double *pmt_z,double *intPart,double *fractionPart,double *start_time, \
int numElements,double *hittime_histo,double *npe,int *seed,double *result, \
int *pmt_res_list,int size)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
//numElements = pmt numbers
if (id < numElements){
curandState state;
generateRandomInit(&state,seed[id]);
Res_Arr pmt_arr;
init_res_arr(&pmt_arr,result,pmt_res_list,id,size);
for(int i=0;i<size;i++)
{
int index = i*numElements+id;
calculate_by_step(r[index],pos_x[index],pos_y[index],pos_z[index],pmt_x[id],pmt_y[id],\
pmt_z[id],intPart[index],fractionPart[index],start_time[index],hittime_histo,\
npe,&state,&pmt_arr);
}
}
}
__device__ void
calculate_by_step(double r,double pos_x,double pos_y,double pos_z,double pmt_x,double pmt_y,\
double pmt_z,double intPart,double fractionPart,double start_time,double *hittime_histo,\
double *npe,curandState *state,Res_Arr *p_pmt_arr)
{
double theta = calculateAngle(pmt_x,pmt_y,pmt_z,pos_x,pos_y,pos_z);
for(int j = 0;j<intPart; ++j){
//r(m)
generateHits(r,theta,1,start_time,hittime_histo,npe,state,p_pmt_arr);
}
generateHits(r,theta,fractionPart,start_time,hittime_histo,npe,state,p_pmt_arr);
}
__device__ double
calculateAngle(double x,double y,double z,double a,double b,double c)
{
// printf("x=%lf,y=%lf,z=%lf,a=%lf,b=%lf,c=%lf\n",x,y,z,a,b,c);
double result = 0;
if (a == 0 and b == 0 and c == 0){
return result;
}
else{
result = acos((a*x+b*y+c*z)/(norm3d(x,y,z)*norm3d(a,b,c)));
//printf("result theta = %lf",result);
return result;
}
}
__device__ void
generateHits(double r,double theta, double ratio,double start_time,double *hittime_histo,\
double *npe,curandState *state,Res_Arr *p_r_arr)
{
int npe_histo_id = get_npe(r,theta,npe,state);
if (npe_histo_id>0)
{
// printf("npe_histo_id = %d,r = %lf,theta = %lf\n",npe_histo_id,r,theta);
for (int hitj = 0; hitj < npe_histo_id; ++hitj)
{
// printf("ratio=%lf\n",ratio);
// skip the photon according to the energy deposit
if (ratio<1 and generateRandom(state)>ratio)
{
continue;
}
double hittime_single = start_time;
// (m_flag_time)
hittime_single += (double)get_hittime(r, theta, 0, hittime_histo, state);
printf("hittime = %lf\n",hittime_single);
// generated hit
// (m_flag_savehits)
append_res_arr(p_r_arr,hittime_single);
// save_hits(pmtid, hittime_single,result);
}
}
}
// __device__ void
// save_hits(Res_Arr *p,double val){
// append_res_arr(p,val);
// }
__device__ int
get_hittime(double r, double theta, int mode, double *hittime_histo, curandState *state) {
int binx = get_bin_x(r);
int biny = get_bin_y(theta);
return get_hittime_bin(binx, biny, mode, hittime_histo, state);
}
__device__ int
get_hittime_bin(int binx, int biny, int mode, double *hittime_histo, curandState *state) {
// hit time = tmean + tres
int hittime_single = 0;
if (mode == 0) {
hittime_single = get_hittime_all(binx,biny,hittime_histo,state);
}
return hittime_single;
}
__device__ int
get_hittime_all(int binx, int biny,double *hittime_histo, curandState *state) {
// TH1F* h = get_hist(binx, biny);
const int xbinnum = 200;
const int ybinnum = 180;
if (binx<1) { binx = 1; }
else if (binx > xbinnum) { binx = xbinnum;}
if (biny<1) { biny = 1; }
else if (biny > ybinnum) { biny = ybinnum;}
int idx = (binx-1)*ybinnum+(biny-1);
int hittime_single = sampling(state,hittime_histo,3000,idx);
return hittime_single;
}
__device__ int
get_bin_x(double r)
{
int binx = 1;
int xmode = 2;
if (xmode == 2) //KR
{
binx = r_findBin(r);
}
return binx;
}
__device__ int
get_bin_y(double theta) {
int biny = 1;
int ymode = 4;
if (ymode == 4) {
biny = theta_findBin(theta);
}
return biny;
}
__device__ int
r_findBin(double r)
{
const int binnum = 200;
const double begin = 0;
const double end = 17.7;
if(r==0){
return 1;
}
else{
return (int)ceil((r-begin)/(end-begin)*binnum);
}
}
__device__ int
get_npe(double r,double theta,double *npe,curandState *state)
{
int binx = r3_findBin(pow(r,3));
int biny = theta_findBin(theta);
return get_npe_num(binx,biny,npe,state);
}
__device__ int
r3_findBin(double r3)
{
const int binnum = 100;
const double begin = 0;
const double end = 5600;
if(r3 == 0){
return 1;
}
else{
return (int)ceil((r3-begin)/(end-begin)*binnum);
}
}
__device__ int
theta_findBin(double theta)
{
const int binnum = 180;
const double begin = 0;
const double end = 180.01*CUDART_PI_F/180.0;
if(theta == 0){
return 1;
}
else{
return (int)ceil((theta-begin)/(end-begin)*binnum);
}
}
__device__ int
get_npe_num(int binx,int biny,double *npe,curandState *state)
{
int npe_from_single = 0;
if (1 <= binx and binx <= 100 and 1 <= biny and biny <= 180) {
npe_from_single = sampling(state,npe,33,(binx-1)*180+(biny-1));
} else if (binx==1 and (biny<1 or biny>180)) {
biny = generateRandomInt(state,1,180);
npe_from_single = sampling(state,npe,33,(binx-1)*180+(biny-1));
} else if (binx>1 and (biny<1 or biny>180)) {
if (biny>180) { biny = 180; }
else if (biny<1){ biny = 1; }
npe_from_single = sampling(state,npe,33,(binx-1)*180+(biny-1));
} else {
static long warning = 0;
++warning;
if (warning < 10) {
printf("npe lost: %d/%d\n", binx,biny);
} else if (warning == 10) {
printf("too many npe lost complains.\n");
}
}
return npe_from_single;
}
__device__ double
generateRandom(curandState *state)
{
// int id = blockIdx.x*blockDim.x+threadIdx.x;
double result = abs(curand_uniform_double(state));
return result;
}
__device__ int
generateRandomInt(curandState *state,int begin,int end)
{
int result = begin+int(ceil(abs(curand_uniform_double(state))*(end-begin)));
return result;
}
__device__ void
generateRandomInit(curandState *state,int seed)
{
// printf("seed = %d\n",seed);
// int id = blockIdx.x*blockDim.x+threadIdx.x;
curand_init(seed, 0, 0, state);
}
__device__ int
sampling(curandState *state,double *histo,int max,int id)
{
double prob;
prob = generateRandom(state);
return binarySearch(histo,prob,max,id);
}
__device__ int
binarySearch(double *histo,double target,int max,int id)
{
// int result_for = -1;
// int result_bin = 0;
// for (int i=0;i<max;i++){
// if (target<=histo[id*max+i]){
// // printf("[debug]histo = %lf,%lf\n",histo[id*max],histo[id*max+i]);
// // printf("[debug]target=%lf,max=%d,id =%d,i=%d\n",target,max,id,i);
// return i;
// }
// }
// return -1;
int start = 0;
int end = max-1;
int mid;
while(start+1<end){
mid = start+(end-start)/2;
if (histo[id*max+mid]==target){
end = mid;
}
else if (histo[id*max+mid] < target){
start = mid;
}
else if (histo[id*max+mid] > target){
end = mid;
}
}
if (target <= histo[id*max+start]){
return start;
}
else if (histo[id*max+start] < target){
return end;
}
return -1;
}
__device__ void
append_res_arr(Res_Arr *p, double val)//追加,可能成功,可能失败
{
p->arr[p->index+p->pmt_list[p->id]] = val;
p->pmt_list[p->id] += 1;
assert(p->pmt_list[p->id]<=p->max);
return;
}
__device__ void
init_res_arr(Res_Arr *p,double *result,int *pmt_res_list,int pmtid,int size){
p->arr = result;//存储的内存空间
p->pmt_list = pmt_res_list;//存储每个pmt内存空间使用量
p->index = pmtid*pmt_mem;//存储该pmt在数组中的起始存取点
p->id = pmtid;
p->max = pmt_mem;
// p->begin = begin;
// p->len = len;
return;
}
extern "C"
{
float GPU_Sampling_wrapper(double *r,double *pos_x,double *pos_y,double *pos_z, \
double *intPart, double *fractionPart,double *start_time,double *pmt_x,double *pmt_y,\
double *pmt_z,double *data_hit,double *data_npe,int *seed,int *size,double* h_result)
{
//GPU计时,设置开始和结束事件
cudaEvent_t start, stop;
cudaEvent_t gpu_start,gpu_stop,data_start,data_stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventCreate(&data_start);
cudaEventCreate(&data_stop);
cudaEventRecord(start);
cudaEventRecord(data_start);
//申请GPU内存
double *d_r, *d_pos_x,*d_pos_y,*d_pos_z,*d_intPart,*d_fractionPart,*d_start_time;
double *d_pmt_x,*d_pmt_y,*d_pmt_z,*d_data_hit,*d_data_npe;
double *d_result;
int *d_seed,*d_pmt_res_list;
CHECK(cudaMalloc((double**)&d_r,size[0]));
CHECK(cudaMalloc((double**)&d_pos_x,size[0]));
CHECK(cudaMalloc((double**)&d_pos_y,size[0]));
CHECK(cudaMalloc((double**)&d_pos_z,size[0]));
CHECK(cudaMalloc((double**)&d_intPart,size[0]));
CHECK(cudaMalloc((double**)&d_fractionPart,size[0]));
CHECK(cudaMalloc((double**)&d_start_time,size[0]));
CHECK(cudaMalloc((double**)&d_pmt_x,size[1]));
CHECK(cudaMalloc((double**)&d_pmt_y,size[1]));
CHECK(cudaMalloc((double**)&d_pmt_z,size[1]));
CHECK(cudaMalloc((double**)&d_data_hit,size[2]));
CHECK(cudaMalloc((double**)&d_data_npe,size[3]));
CHECK(cudaMalloc((int**)&d_seed,size[4]));
CHECK(cudaMalloc((double**)&d_result,pmt_num*pmt_mem*sizeof(double)));
CHECK(cudaMalloc((int**)&d_pmt_res_list,pmt_num*sizeof(int)));
//设置内存
CHECK(cudaMemset(d_pmt_res_list,0,pmt_num*sizeof(int)));
CHECK(cudaMemset(d_result,0,pmt_num*pmt_mem*sizeof(double)));
//将CPU内存拷贝到GPU
CHECK(cudaMemcpy(d_r, r, size[0], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pos_x, pos_x, size[0], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pos_y, pos_y, size[0], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pos_z, pos_z, size[0], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_intPart, intPart, size[0], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_fractionPart, fractionPart, size[0], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_start_time, start_time, size[0], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pmt_x, pmt_x, size[1], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pmt_y, pmt_y, size[1], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pmt_z, pmt_z, size[1], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_data_hit, data_hit, size[2], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_data_npe, data_npe, size[3], cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_seed, seed, size[4], cudaMemcpyHostToDevice));
cudaEventRecord(data_stop);
cudaEventSynchronize(data_stop);
// printf("[GPU]GPU数据拷贝完成\n");
//设置使用编号为0的GPU
CHECK(cudaSetDevice(0));
// //设置线程数量
int threadPerBlock= 1024;
int blocksPerGrid = 18;
dim3 block(threadPerBlock);
//设置块数量
dim3 grid(blocksPerGrid);//blocksPerGrid
//调用核函数
cudaEventRecord(gpu_start);
// for(int i = 0;i<size[0]/8;i++) {
// CHECK(cudaDeviceSynchronize());
// // printf("[GPU]核函数开始运行[%d]\n",i);
// pmt_calculate<<<grid, block>>>(r[i],pos_x[i],pos_y[i],pos_z[i],d_pmt_x,d_pmt_y,\
// d_pmt_z,intPart[i],fractionPart[i],start_time[i],17746,d_data_hit,d_data_npe,\
// (int*)(d_seed+i*pmt_num),d_result,d_pmt_res_list,(int)size[0]/8);
// }
step_calculate_every_pmt<<<grid, block>>>(d_r,d_pos_x,d_pos_y,d_pos_z,d_pmt_x,d_pmt_y,\
d_pmt_z,d_intPart,d_fractionPart,d_start_time,pmt_num,d_data_hit,d_data_npe,d_seed,\
d_result,d_pmt_res_list,(int)size[0]/sizeof(double));
cudaEventRecord(gpu_stop);
cudaEventSynchronize(gpu_stop);
// printf("[GPU]核函数运行完成\n");
// CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(h_result, d_result, pmt_num*pmt_mem*sizeof(double), cudaMemcpyDeviceToHost));
// printf("threadPerBlock:%d\n",threadPerBlock);
// printf("blocksPerGrid;%d\n",blocksPerGrid);
//释放GPU内存
CHECK(cudaFree(d_data_hit));
CHECK(cudaFree(d_data_npe));
CHECK(cudaFree(d_pmt_x));
CHECK(cudaFree(d_pmt_y));
CHECK(cudaFree(d_pmt_z));
CHECK(cudaFree(d_seed));
CHECK(cudaFree(d_result));
// printf("[GPU]GPU运行完成\n");
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float total_time,gputime,datatime;
//计算用时,精度0.5us
cudaEventElapsedTime(&datatime, data_start, data_stop);
cudaEventElapsedTime(&gputime, gpu_start, gpu_stop);
cudaEventElapsedTime(&total_time, start, stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cudaEventDestroy(data_start);
cudaEventDestroy(data_stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("total use time %f ms\n", total_time);
printf("gpu use time %f ms\n",gputime);
printf("data use time %f ms\n",datatime);
printf("data transport back use time %f ms\n",total_time - datatime - gputime);
CHECK(cudaDeviceReset());
return total_time;
// return 0.0;
}
} |
24,056 | #include "includes.h"
__global__ void mykernel(void)
{
} |
24,057 | #include "includes.h"
__device__ float* deconcatenate(unsigned int x)
{
float * array = new float[32];
for (int i = 0; i < 32; i++)
{
array[i] = (x & ( 1 << i )) >> i;
}
return array;
}
__device__ unsigned int concatenate(float* array)
{
unsigned int rvalue=0;
unsigned int sign;
for (int i = 0; i < 32; i++)
{
sign = (array[i]>=0);
rvalue = rvalue | (sign<<i);
}
return rvalue;
}
__global__ void deconcatenate_rows_kernel(unsigned int *a, float *b, int size)
{
float * array;
for(int i=0; i<size; i+=32)
{
array = deconcatenate(a[i/32]);
for (int k=0;k<32;k++) b[i+k] = array[k];
delete[] array;
}
} |
24,058 | //STL
#include <iostream>
#include <string>
#include <vector>
#include <time.h>
using namespace std;
///*OCTAVE M-script*/ xfp=single(0.0:0.1:6.3);Xfp=single(dct(xfp));clc;length(Xfp);Xfp(1:5)
unsigned i;
const unsigned N = 2048;
unsigned gpuThr = 256;
unsigned gpuBl = N / gpuThr;
vector < float > inputVec( N );
string letter, subFp; const string sep( "_" );
//=========================== gpu ===========================
__device__ float d_x[ N ], d_Xfp32[ N ], d_ix[ N ];
__device__ double d_Xfp64[ N ]; //double size per dimension in comparision to floats array in global memory; for 2D results in quadratic size
__constant__ unsigned d_N[ 1 ];
__global__ void printKernel()
{
unsigned resNo = 10;
for ( unsigned i = 0; i < resNo; i++ )
printf( "x[%i]: %f\n", i, d_x[ i ] );
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_Xfp32[%i]: %.6f; d_Xfp64[%i]: %.6f\n", i, d_Xfp32[ i ], i, d_Xfp64[ i ] );
for ( unsigned i = 0; i < resNo; i++ )
printf( "ix[%i]: %f\n", i, d_ix[ i ] );
double acc = 0.0f;
for( unsigned i = 0; i < N; i++ )
acc += ( d_Xfp32[ i ] - d_Xfp64[ i ] ) * ( d_Xfp32[ i ] - d_Xfp64[ i ] );
acc /= N;
printf( "mean difference in dct float vs double accumulators: %.6f\n", sqrtf( acc ) );
}
__global__ void idctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = ( float( ind ) + 0.5f ) * 3.14159265f / float( N );
float sqrConst = sqrtf( 2.0f / float( N ) );
float tmpX = sqrtf( 1.0f / float( N ) ) * d_Xfp32[ 0 ];
float accDC = 0.0f, tmpx = 0.0f;
for ( unsigned k = 1; k < N; k++ )
{
tmpx = d_Xfp32[ k ];
tmpX += tmpx * sqrConst * __cosf( constVal * ( float( k ) ) );
accDC += tmpx;
}
d_ix[ ind ] = tmpX;
}
__global__ void dctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = float( ind ) * 3.14159265f / float( N );
float sqrConst = sqrtf( 2.0f / float( N ) );
float tmpX = 0.0f, accDC = 0.0f, tmpx = 0.0f;
for ( unsigned i = 0; i < N; i++ )
{
tmpx = d_x[ i ];
tmpX += sqrConst * tmpx * __cosf( constVal * ( float( i ) + 0.5f ) );
accDC += tmpx;
}
d_Xfp32[ ind ] = tmpX;
d_Xfp32[ 0 ] = accDC / sqrtf( float( N ) );
}
__global__ void dctKernelDouble()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = float( ind ) * 3.14159265f / float( N );
float sqrConst = sqrtf( 2.0f / float( N ) );
double tmpX = 0.0f, accDC = 0.0f, tmpx = 0.0f;
for ( unsigned i = 0; i < N; i++ )
{
tmpx = d_x[ i ];
tmpX += sqrConst * tmpx * __cosf( constVal * ( float( i ) + 0.5f ) );
accDC += tmpx;
}
d_Xfp64[ ind ] = tmpX;
d_Xfp64[ 0 ] = accDC / sqrtf( float( N ) );
}
int main( int argc, char* argv[] )
{
for(i=0;i<(unsigned)inputVec.size();i++)inputVec[i]=0.1f*i;
cudaMemcpyToSymbol( d_x, &inputVec[ 0 ], sizeof( float ) * ( unsigned )inputVec.size() );
cudaMemcpyToSymbol( d_N, &N, sizeof( unsigned ) );
clock_t t = clock();
dctKernelFloat<<< gpuBl, gpuThr >>>();
cudaDeviceSynchronize();
cout << "CPU clocks float accumulator: " << double( clock() - t ) << endl;
t = clock();
dctKernelDouble<<< gpuBl, gpuThr >>>();
cudaDeviceSynchronize();
cout << "CPU clocks double accumulator: " << double( clock() - t ) << endl;
t = clock();
idctKernelFloat<<< gpuBl, gpuThr >>>();
cudaDeviceSynchronize();
cout << "CPU clocks idct float accumulator: " << double( clock() - t ) << endl;
printKernel<<< 1, 1 >>>();
cudaFree( d_x );
cudaFree( d_ix );
cudaFree( d_Xfp32 );
cudaFree( d_Xfp64 );
cudaFree( d_N );
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
//P.S. Please note, that streaming data from GPU to RAM is costly in both directions - keep computations in GPU.
|
24,059 | #include <iostream>
#include <cstdio>
using namespace std;
extern "C" __global__ void helloFromGPU() {
printf("Hello World from GPU thread %d!\n", threadIdx.x);
}
/*int __declspec(dllexport) test(const unsigned int n) {
for (unsigned int i=0; i<n; i++) {
cout << "Hello World from CPU!\n";
helloFromGPU <<<1, n >>> ();
}
// cudaDeviceReset();
cudaDeviceSynchronize();
// Without cudaDeviceReset() or cudaDeviceSynchronize() the kernel messages are not printed.
// In addition, the .exe file handle is still held by malwarebytes... sometimes.
// Maybe only after Malwarebytes has been running a long time.
// Restarting Malwarebytes fixes things.
return 42;
}
*/ |
24,060 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
// Just for testing
// code from https://stackoverflow.com/questions/13320321/printf-in-my-cuda-kernel-doesnt-result-produce-any-output
__global__
void set1(int *t) {
t[threadIdx.x] = 1;
}
inline bool failed(cudaError_t error) {
if (cudaSuccess == error)
return false;
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(error));
return true;
}
int main() {
int blockSize;
for (blockSize = 1; blockSize < 1 << 12; blockSize++) {
printf("Testing block size of %d\n", blockSize);
int *t;
if (failed(cudaMallocManaged(&t, blockSize * sizeof(int)))) {
failed(cudaFree(t));
break;
}
for (int i = 0; i < blockSize; i++)
t[0] = 0;
set1 <<<1, blockSize>>>(t);
if (failed(cudaPeekAtLastError())) {
failed(cudaFree(t));
break;
}
if (failed(cudaDeviceSynchronize())) {
failed(cudaFree(t));
break;
}
bool hasError = false;
for (int i = 0; i < blockSize; i++)
if (1 != t[i]) {
printf("CUDA error: t[%d] = %d but not 1\n", i, t[i]);
hasError = true;
break;
}
if (hasError) {
failed(cudaFree(t));
break;
}
failed(cudaFree(t));
}
blockSize--;
if (blockSize <= 0) {
printf("CUDA error: block size cannot be 0\n");
return 1;
}
printf("Block maximum size is %d", blockSize);
return 0;
} |
24,061 | #include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__global__ void _3Dstencil_global(float *d_e,float *d_r,int X,int Y,int Z,int k){
//int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
//printf("sou id %d || threadIdx.x %d || blockIdx.x %d || blockDim.x %d \n",thread_id,threadIdx.x ,blockIdx.x,blockDim.x);
//int thread_id = threadIdx.x + threadIdx.y*blockDim.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
//printf("sou id %d || threadIdx.x %d || blockIdx.x %d|| blockIdx.y %d || blockDim.x %d|| blockDim.y %d \n",thread_id,threadIdx.x ,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
int x,y;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
//printf("X = %d || Y = %d\n",x,y);
for(int z=0; z<Z; z++)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
int h_e_i = h_r_i;
d_r[h_r_i] = d_e[h_e_i];
for(int lk =0;lk<(k/2);lk++)
{
if(x+lk >= X)
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
if(x-lk < 0)
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
if(y+lk >= Y)
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
if(y-lk < 0)
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
if(z+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
if(z-lk < 0)
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
d_r[h_r_i] += d_e[h_e_i];
}
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*3 - n_blocos
*4 - print
*/
int main(int argc, char* argv[]) {
float *h_e,*h_r,*h_r_test;
float *d_e, *d_r;
int size,tam,times;
clock_t Ticks[2];
times = 1;
int X=8;
int Y=8;
int BX=8;
int BY=8;
int Z=4;
int k=2;
int GX=1;
int GY=1;
if(argc > 1)
{
X = atoi(argv[1]);
BX=X;
}
if(argc > 2)
{
Y = atoi(argv[2]);
BY = Y;
}
if(argc > 3)
Z = atoi(argv[3]);
if(argc > 4)
k = atoi(argv[4]);
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
size = X * Y * Z * sizeof(float);
tam = X * Y * Z;
h_e = (float*) malloc(size);
h_r = (float*) malloc(size);
h_r_test = (float*) malloc(size);
cudaMalloc(&d_e, size);
cudaMalloc(&d_r, size);
for (int i = 0; i < tam; i++) {
h_e[i] = (float)(rand()%9000)/100.0;
h_r[i] = 0;
}
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_r, h_r, size, cudaMemcpyHostToDevice);
for(int t =0; t<times; t++)
{
for(int z=0; z<Z; z++)
{
for(int y=0; y<Y; y++)
{
for(int x=0; x<X; x++)
{
int h_r_i = x + ( y * (X) ) + ( z* (X*Y) );
int h_e_i = h_r_i;
h_r_test[h_r_i] = h_e[h_e_i];
for(int lk =0;lk<(k/2);lk++)
{
if(x+lk >= X)
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(x-lk < 0)
h_e_i = (x+lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x-lk) + ( (y) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y+lk >= Y)
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(y-lk < 0)
h_e_i = (x) + ( (y+lk) * (X) ) + ( (z) * (X*Y) );
else
h_e_i = (x) + ( (y-lk) * (X) ) + ( (z) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z+lk >= Z)
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
if(z-lk < 0)
h_e_i = (x) + ( (y) * (X) ) + ( (z+lk) * (X*Y) );
else
h_e_i = (x) + ( (y) * (X) ) + ( (z-lk) * (X*Y) );
h_r_test[h_r_i] += h_e[h_e_i];
}
}
}
}
for (int i = 0; i < tam; i++)
{
h_e[i] = h_r_test[i];
}
}
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
_3Dstencil_global<<<grid_dim,block_dim>>>(d_e,d_r,X,Y,Z,k);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
cudaDeviceSynchronize();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
float elapsedTime;
cudaEventElapsedTime (&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
Ticks[1] = clock();
double Tempo = (Ticks[1] - Ticks[0]) * 1000.0 / CLOCKS_PER_SEC;
printf("X %d || Y %d \nBX %d || BY %d\nZ %d \n",X,Y,BX,BY,Z);
printf ("[%d,%.5f,%lf],\n", tam,elapsedTime,Tempo/1000.0);
cudaMemcpy(h_r, d_r, size, cudaMemcpyDeviceToHost);
bool certo=true;
//printf("threads/blk %d -- blocks %d\n",th_p_blk,blks);
for (int i = 0; i < 256; i++){
//printf("%d - %d\n",h_z_res[i],h_z[i]);
if(h_r_test[i] != h_r[i])
certo=false;
}
if(!certo)
printf("\n*****\n certo = %s\n*****\n", certo ? "true" : "false");
cudaFree(d_e);
cudaFree(d_r);
std::free(h_e);
std::free(h_r);
std::free(h_r_test);
return 0;
} /* main */
|
24,062 | #include <stdlib.h>
#include <stdio.h>
#include <cstdlib>
#include <math.h>
#include <random>
#include <iostream>
#include <curand_kernel.h>
#include <ctime>
class Particle
{
public:
float3 pos = make_float3(0,0,0);
float3 vel = make_float3(1,1,1);
Particle() {}
Particle(float3 velocity){
vel = velocity;
}
void print_particle() {
printf("position (%f,%f,%f) \n", pos.x, pos.y, pos.z);
printf("velocity (%f,%f,%f) \n", vel.x, vel.y, vel.z);
}
};
__device__ float3 operator+(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
float3 add_float3(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
float3 sub_float3(const float3 &a, const float3 &b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
float mag_float3(const float3 &a)
{
return abs(a.x) + abs(a.y) + abs(a.z);
}
__global__
void timestep_update(Particle *particles, int n_particles)
{
int thread = blockIdx.x*blockDim.x + threadIdx.x;
// Avoid index out of bounds
if(thread > n_particles - 1){
return;
}
// particles[thread].vel = particles[thread].vel + particles[thread].vel;
// Update position
particles[thread].pos = particles[thread].pos + particles[thread].vel;
/*printf("Thread %d Coordinate X %f \n", thread, particles[thread].pos.x);
printf("Thread %d Coordinate Y %f \n", thread, particles[thread].pos.y);
printf("Thread %d Coordinate Z %f \n", thread, particles[thread].pos.z);*/
}
__global__
void dummy_kernel(float *d_out, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
curandState state;
curand_init((unsigned long long)clock() + i, 0, 0, &state);
for(int j = 0; j < N; j++){
d_out[j] = curand_uniform_double(&state);
}
}
float3 random_velocity()
{
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> uni(-10, 10);
auto x = uni(rng);
auto y = uni(rng);
auto z = uni(rng);
return make_float3(x,y,z);
}
void timestep_update_cpu(Particle *particles, int n_particles){
for(int i = 0; i < n_particles; i++){
//particles[i].vel = add_float3(particles[i].vel, particles[i].vel);
particles[i].pos = add_float3(particles[i].pos, particles[i].vel);
}
}
int main(int argc, char** argv)
{
int n_particles, n_iterations, n_threads;
n_iterations = 1000;
n_particles = 1000000;
n_threads = 256;
//int grid_size = n_particles/n_threads;
int grid_size = 10;
if (n_particles%n_threads != 0){
grid_size++;
}
int bytes = sizeof(Particle) * n_particles;
// Allocate particle array on host
Particle *particles, *gpu_particles;
cudaMallocHost(&particles, sizeof(Particle) * n_particles);
cudaMallocHost(&gpu_particles, sizeof(Particle) * n_particles);
// Initiate particles
for(int i = 0; i < n_particles; i++)
{
float3 random_vel = random_velocity();
particles[i] = Particle(random_vel);
}
// Allocate on device
int num_streams = 4;
Particle *batches[num_streams];
cudaStream_t streams[num_streams];
int batch_size = bytes / num_streams;
int batch_stride = n_particles / num_streams;
// Create streams
for(int i = 0; i < num_streams; i++){
cudaStreamCreate(&streams[i]);
cudaMalloc(&batches[i], batch_size);
}
// Dummy variables
float *d_out;
int N = 1000;
cudaMalloc((void**)&d_out, N * sizeof(float));
for(int j = 0; j < n_iterations; j++){
for (int i = 0; i < num_streams; i++) {
int batch_number = batch_stride * i;
cudaMemcpyAsync(batches[i], &gpu_particles[batch_number], batch_size, cudaMemcpyHostToDevice, streams[i]);
timestep_update<<<grid_size, n_threads, 0, streams[i]>>>(batches[i], n_particles);
dummy_kernel<<<grid_size, n_threads, 0, streams[i]>>>(d_out, N);
cudaMemcpyAsync(&gpu_particles[batch_number], batches[i], batch_size, cudaMemcpyDeviceToHost, streams[i]);
}
cudaDeviceSynchronize();
}
for(int i = 0; i < num_streams; i++){
cudaStreamDestroy(streams[i]);
}
}
|
24,063 | #include<iostream>
#include<algorithm>
#include<iomanip>
#include<time.h>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
#include <thrust/iterator/permutation_iterator.h>
#define N (8<<27)
#define M N/10
template<class T>
class plusOne{
public:
__device__ __host__ T operator() (T a){
return a+1;
}
};
int f()
{
srand(time(NULL));
return rand() % 1000;
}
int main(){
printf("size %d \n",N);
srand(time(NULL));
thrust::host_vector<int> source(N);
std::generate(source.begin(),source.end(),rand);
thrust::device_vector<int> dsource=source;
thrust::host_vector<int> map(M);
/*
map[0] = 3;
map[1] = 1;
map[2] = 0;
map[3] = 5;
*/
std::generate(map.begin(),map.end(),f);
thrust::device_vector<int> dmap=map;
cudaEvent_t start,stop;
float elapsed;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
int sum = thrust::reduce(thrust::make_permutation_iterator(dsource.begin(), dmap.begin()), thrust::make_permutation_iterator(dsource.begin(), dmap.end()));
std::cout << "sum :" << sum << std::endl;
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed,start,stop);
std::cout<<"gpu :"<<elapsed<<"ms"<<std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
24,064 | //device code
__global__ void VecAdd(float* a, float* b, float* c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
//host code
int main()
{
int N = 1024;
size_t size = N*sizeof(float);
//allocate input vectors in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//initialize input vectors
for (int i = 0; i < N; i++)
{
h_A[i] = 0;
h_B[i] = 0;
}
//allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
//copy from host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock - 1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
//Copy result from device to host. h_C is the result.
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
24,065 | #include "includes.h"
__global__ void kernelBackprop1(float *delta_nabla_w,int w_off,float *activations,float *delta_nabla_b,int b_off) {
delta_nabla_w[w_off+(blockIdx.x*blockDim.x)+threadIdx.x]=activations[threadIdx.x]*delta_nabla_b[b_off+blockIdx.x];
//delta_nabla_w[w_off+(threadIdx.x*gridDim.x)+blockIdx.x]=activations[threadIdx.x]*delta_nabla_b[b_off+blockIdx.x];
} |
24,066 | // https://www.jianshu.com/p/a0184e73a460
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define w 2000
struct Matrix
{
int width;
int height;
float *elements;
};
__device__ float getElement(Matrix *A, int row, int col)
{
return A->elements[row * A->width + col];
}
__device__ void setElement(Matrix *A, int row, int col, float value)
{
A->elements[row * A->width + col] = value;
}
__global__ void matMulKernel(Matrix *A, Matrix *B, Matrix *C)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
//int tid = threadIdx.x;
for (int i = 0; i < A->width; ++i)
{
int index = (int)getElement(B, i, col) % 23;
Cvalue += getElement(A, row, i) * getElement(B, i, col);
// Cvalue += Cvalue / (double)3.1;
//sudo /usr/local/cuda-10.2/bin/nvprof --metrics branch_efficiency ./matrixMul
//if (index == 17)
//{
// for (int i = 0; i < 23; i++)
// {
// //Cvalue += Cvalue / (double)3.1;
// }
//}
Cvalue += Cvalue / (double)0.3456789;
//Cvalue += Cvalue / (float)3.123234;
//else
//{
//Cvalue += Cvalue / (double)3.2;
//}
//Cvalue += Cvalue / (double)3.22;
//Cvalue += Cvalue / (double)3.23;
//Cvalue += Cvalue / (float)3.1;
//printf("%f \n", rsqrtf(4));
//Cvalue += sqrtf(Cvalue);
//Cvalue += acosf(Cvalue);
//if (index == 0)
{
//Cvalue += sqrtf(Cvalue);
//Cvalue += Cvalue / (double)3.1;
}
/*
else if (index == 1)
{
Cvalue += Cvalue / (double)3.1415926;
}
else if (index == 2)
{
Cvalue += Cvalue / (double)2.71828;
}
else if (index == 3)
{
Cvalue += Cvalue / (double)1.4141;
}
else if (index == 4)
{
Cvalue += Cvalue / (double)1.4142;
}
else if (index == 5)
{
Cvalue += Cvalue / (double)1.4143;
}
else if (index == 6)
{
Cvalue += Cvalue / (double)1.4144;
}
else if (index == 7)
{
Cvalue += Cvalue / (double)1.4145;
}
else if (index == 8)
{
Cvalue += Cvalue / (double)1.4146;
}
else if (index == 9)
{
Cvalue += Cvalue / (double)1.4147;
}
else if (index == 10)
{
Cvalue += Cvalue / (double)1.4148;
}
else if (index == 11)
{
Cvalue += Cvalue / (double)1.4149;
}
else if (index == 12)
{
Cvalue += Cvalue / (double)1.41401;
}
else if (index == 13)
{
Cvalue += Cvalue / (double)1.4121;
}
else if (index == 14)
{
Cvalue += Cvalue / (double)1.4124;
}
else if (index == 15)
{
Cvalue += Cvalue / (double)1.41214;
}
else if (index == 16)
{
Cvalue += Cvalue / (double)3.14159246;
}
else if (index == 17)
{
Cvalue += Cvalue / (double)2.7182843;
}
else if (index == 18)
{
Cvalue += Cvalue / (double)1.414145;
}
else if (index == 19)
{
Cvalue += Cvalue / (double)1.414122;
}
else if (index == 20)
{
Cvalue += Cvalue / (double)1.41423;
}
else if (index == 21)
{
Cvalue += Cvalue / (double)1.41444;
}
else if (index == 22)
{
Cvalue += Cvalue / (double)1.41453;
}
else if (index == 23)
{
Cvalue += Cvalue / (double)1.43146;
}
else if (index == 24)
{
Cvalue += Cvalue / (double)1.24147;
}
else if (index == 25)
{
Cvalue += Cvalue / (double)1.14148;
}
else if (index == 26)
{
Cvalue += Cvalue / (double)1.41149;
}
else if (index == 27)
{
Cvalue += Cvalue / (double)1.414201;
}
else if (index == 28)
{
Cvalue += Cvalue / (double)(1.41218 + 28);
}
else if (index == 29)
{
Cvalue += Cvalue / (double)(1.41214 + 29);
}
else if (index == 30)
{
Cvalue += Cvalue / (double)(1.41214 + 30);
}
else if (index == 31)
{
Cvalue += Cvalue / (double)(1.41214 + 31);
}*/
}
setElement(C, row, col, Cvalue);
}
void GPU_Test()
{
int width = w;
int height = w;
Matrix *A, *B, *C;
cudaMallocManaged((void**)&A, sizeof(Matrix));
cudaMallocManaged((void**)&B, sizeof(Matrix));
cudaMallocManaged((void**)&C, sizeof(Matrix));
int nBytes = width * height * sizeof(float);
cudaMallocManaged((void**)&A->elements, nBytes);
cudaMallocManaged((void**)&B->elements, nBytes);
cudaMallocManaged((void**)&C->elements, nBytes);
A->height = height;
A->width = width;
B->height = height;
B->width = width;
C->height = height;
C->width = width;
for (int i = 0; i < width * height; ++i)
{
A->elements[i] = rand();// + 1.0;
//printf("%d \n",((int)A->elements[i] % 23));
B->elements[i] = rand();// + 2.0;
}
dim3 blockSize(32, 32);
dim3 gridSize((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y);
struct timeval t1,t2;
gettimeofday(&t1,NULL);
double timeuse;
matMulKernel<<<gridSize, blockSize>>>(A, B, C);
cudaDeviceSynchronize();
gettimeofday(&t2,NULL);
timeuse = t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/1000000.0;
printf("GPU Use Time:%fs\n", timeuse);
}
int main()
{
//CPU_Test();
GPU_Test();
return 0;
}
|
24,067 | extern "C" __device__ void kernel(int* result) { *result = 1; }
|
24,068 | /* Utility program. Convert input into a format that the spiking-visualizer can
* easily use. */
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
int format(void)
{
char line[LINE_MAX];
int time = 0;
float value;
while (fgets(line, LINE_MAX, stdin) != NULL) {
value = strtof(line, NULL);
printf("[%d, %10f],\n", time, value);
time++;
}
return 0;
}
int main(void)
{
format();
return 0;
}
|
24,069 |
/* Implementation of a XOR neural network in CUDA */
#include <stdio.h>
// weights for the hidden layer
float weights_h[] = { 0.5f, -1.0f, -1.0f,
-1.5f, 1.0f, 1.0f };
// weights for the output layer
float weights_o[] = { 0.5f, -1.0f, -1.0f };
// weight arrays for the device
float *dev_hw;
float *dev_ow;
// device input
float *dev_in;
// device output
float *dev_out;
// inputs
float inputs[4][2] = { { 0.0f, 0.0f }, { 0.0f, 1.0f },
{ 1.0f, 0.0f }, { 1.0f, 1.0f }};
float outputs[] = { 0.0f, 1.0f, 1.0f, 0.0f };
// a forward propagation pass, calculating outputs
__global__ void calculate_output(float *dev_hw, float *dev_ow, float *input, float *output)
{
int tid = threadIdx.x;
__shared__ float hidden_out[2];
__shared__ float o;
// hidden layer
if (tid < 2) {
hidden_out[tid] = dev_hw[tid * 3] * 1.0f +
dev_hw[tid * 3 + 1] * input[0] +
dev_hw[tid * 3 + 2] * input[1];
// threshold
if (hidden_out[tid] > 0.0f)
hidden_out[tid] = 1.0f;
else
hidden_out[tid] = 0.0;
}
__syncthreads();
if (tid < 1) {
o = dev_ow[0] * 1.0f +
dev_ow[1] * hidden_out[0] +
dev_ow[2] * hidden_out[1];
// threshold
if (o > 0.0f)
*output = 1.0f;
else
*output = 0.0f;
}
}
int main(int argc, char **argv)
{
float out;
printf("### XOR test (forward propagation)\n");
cudaMalloc((void**) &dev_hw, 6 * sizeof(float));
cudaMalloc((void**) &dev_ow, 3 * sizeof(float));
cudaMalloc((void**) &dev_in, 2 * sizeof(float));
cudaMalloc((void**) &dev_out, sizeof(float));
cudaMemcpy(dev_hw, weights_h, 6 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_ow, weights_o, 3 * sizeof(float), cudaMemcpyHostToDevice);
// try input 0, 0
cudaMemcpy(dev_in, inputs[0], 2 * sizeof(float), cudaMemcpyHostToDevice);
calculate_output<<<1, 2>>>(dev_hw, dev_ow, dev_in, dev_out);
cudaMemcpy(&out, dev_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("Output for (0, 0) = %f\n", out);
// try input 0, 1
cudaMemcpy(dev_in, inputs[1], 2 * sizeof(float), cudaMemcpyHostToDevice);
calculate_output<<<1, 2>>>(dev_hw, dev_ow, dev_in, dev_out);
cudaMemcpy(&out, dev_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("Output for (0, 1) = %f\n", out);
// try input 1, 0
cudaMemcpy(dev_in, inputs[2], 2 * sizeof(float), cudaMemcpyHostToDevice);
calculate_output<<<1, 2>>>(dev_hw, dev_ow, dev_in, dev_out);
cudaMemcpy(&out, dev_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("Output for (1, 0) = %f\n", out);
// try input 1, 1
cudaMemcpy(dev_in, inputs[3], 2 * sizeof(float), cudaMemcpyHostToDevice);
calculate_output<<<1, 2>>>(dev_hw, dev_ow, dev_in, dev_out);
cudaMemcpy(&out, dev_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("Output for (1, 1) = %f\n", out);
cudaFree(dev_hw);
cudaFree(dev_ow);
cudaFree(dev_in);
cudaFree(dev_out);
return 0;
}
|
24,070 | #include <thrust/set_operations.h>
#include <thrust/host_vector.h>
#include <stdio.h>
int main()
{
int A1[7] = {13, 1, 5, 3, 1, 1, 0};
int A2[7] = {13, 8, 5, 3, 2, 1, 1};
thrust::host_vector<int> result(7);
thrust::host_vector<int>::iterator result_end;
result_end = thrust::set_intersection(A1, A1 + 7, A2, A2 + 7, result.begin());
//result.erase(result_end, result.end());
for (int i = 0; i < result.size(); i++)
printf("%d\n", result[i]);
}
|
24,071 | #include "sample.cuh"
__device__ double
generateRandom(curandState *state)
{
double result = abs(curand_uniform_double(state));
return result;
}
__device__ int
generateRandomInt(curandState *state,int begin,int end)
{
int result = begin+int(ceil(abs(curand_uniform_double(state))*(end-begin)));
return result;
}
__device__ void
generateRandomInit(curandState *state,int seed)
{
curand_init(seed, 0, 0, state);
}
__device__ int
sampling(curandState *state,double *histo,int max,int id)
{
double prob;
prob = generateRandom(state);
return binarySearch(histo,prob,max,id);
}
__device__ int
binarySearch(double *histo,double target,int max,int id)
{
int start = 0;
int end = max-1;
int mid;
while(start+1<end){
mid = start+(end-start)/2;
if (histo[id*max+mid]==target){
end = mid;
}
else if (histo[id*max+mid] < target){
start = mid;
}
else if (histo[id*max+mid] > target){
end = mid;
}
}
if (target <= histo[id*max+start]){
return start;
}
else if (histo[id*max+start] < target){
return end;
}
return -1;
} |
24,072 | #include<stdio.h>
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
const int width=5;
const int height=22;
const int size=width*height*sizeof(int );
//const int size=sizeof(int)*width;
/*__global__ void kerneltest(int **b,size_t pitch)
{
printf("(%d,%d)\n",threadIdx.x,threadIdx.y);
int *c=(int *)((char *)b+threadIdx.x*pitch);
printf("%d, ",c[threadIdx.y]);
}
int main(int argc,char **argv)
{
int i,j;
int a[height][width];
int c[height][width];
int **b;
size_t pitch;
cudaError_t res;
for(i=0;i<height;i++)
{
for(j=0;j<width;j++)
{
a[i][j]=j+i*width;
c[i][j]=0;
printf("%d ",a[i][j]);
}
printf("\n");
}
res=cudaMallocPitch((void **)&b,&pitch,size,height);CHECK(res);printf("1");
res=cudaMemcpy2D(b,pitch,a,size,size,height,cudaMemcpyHostToDevice);CHECK(res);printf("2");
dim3 threads(5,10);
dim3 blocks(1,1);
kerneltest<<<blocks,threads>>>(b,pitch);
printf("3");
res=cudaMemcpy2D(c,size,b,pitch,size,height,cudaMemcpyDeviceToHost);CHECK(res);printf("4\n");
for(i=0;i<height;i++)
{
for(j=0;j<width;j++)
{
printf("%d ",c[i][j]);
}
printf("\n");
}
cudaFree(b);
return 0;
}
*/
__global__ void testkernel(int **b,int dheight,int dwidth,int dmul)
{
//printf("%d\n",add[threadIdx.x+blockIdx.x*blockDim.x]);
/*if(threadIdx.x+blockIdx.x*blockDim.x<dwidth){
int i,idx,idy;
int num=0;
for(i=0;i<dheight;i++)
{
idx=(threadIdx.x+i*dwidth)/blockDim.x;
idy=(threadIdx.x+i*dwidth)%blockDim.x;
num+=add[idy+idx*blockDim.x];
}
printf("%d ",num);}*/
int i,j,num;
int idx=threadIdx.x+blockIdx.x*blockDim.x;
int idy;
printf("(%d,%d)\n",threadIdx.x,idx);
int *add=(int *)((int *)b);//,pointarr[threadIdx.y][threadIdx.x]);
for(i=0;i<dmul;i++)
{
idy=dmul*idx+i;
num=0;
if(idy<dheight){
for(j=0;j<dwidth;j++)
{
num+=add[idy*dwidth+j];
}
printf("(%d,%d):%d\n",idx,idy,num);
}
}
}
int main()
{
int a[height][width];
int c[height][width];
int **b;
int thread_size=10;
int block_size=1;
int mul;
mul=(height/(thread_size*block_size))+1;
for(int i=0;i<height;i++)
{
for(int j=0;j<width;j++)
{
a[i][j]=j+i*width;
c[i][j]=0;
printf("%d ",a[i][j]);
}
printf("\n");
}
cudaMalloc((void **)&b,size);
cudaMemcpy(b,a,size,cudaMemcpyHostToDevice);
dim3 threads(thread_size,1);
dim3 blocks(block_size,1);
testkernel<<<blocks,threads>>>(b,height,width,mul);
cudaDeviceSynchronize();
cudaMemcpy(c,b,size,cudaMemcpyDeviceToHost);
for(int i=0;i<height;i++)
{
for(int j=0;j<width;j++)
{
printf("%d ",c[i][j]);
}
printf("\n");
}
return 0;
}
|
24,073 | #include "includes.h"
__global__ void entrySearch_max_int_kernel(int *g_iarr, int *g_maxarr, int size)
{
// create shared memory
extern __shared__ int sarr_int[];
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
if(i + blockDim.x < size) {
if(g_iarr[i] > g_iarr[i + blockDim.x]) {
sarr_int[tid] = g_iarr[i];
} else {
sarr_int[tid] = g_iarr[i + blockDim.x];
}
} else if (i < size) {
sarr_int[tid] = g_iarr[i];
} else {
sarr_int[tid] = INT_MIN;
}
__syncthreads();
// do comparison in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if(tid < s) {
if(sarr_int[tid] < sarr_int[tid + s]) {
sarr_int[tid] = sarr_int[tid + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if(tid == 0) {
g_maxarr[blockIdx.x] = sarr_int[0];
}
} |
24,074 | #include <iostream>
#include <math.h>
int I = 500;
int J = 500;
int K = 500;
__global__
void mul(int I, int J, int K, float *x, float *y, float *z)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int q=index; q<I*K; q+=stride) {
int i = q / K;
int k = q % K;
z[q] = 0.0f;
for(int j=0; j<J; j++) {
z[q] += x[i*J+j] * y[j*K+k];
}
}
}
int main(void)
{
setbuf(stdout, NULL);
printf("Start\n");
float *x, *y, *z;
cudaMallocManaged(&x, I*J*sizeof(float));
cudaMallocManaged(&y, J*K*sizeof(float));
cudaMallocManaged(&z, I*K*sizeof(float));
for(int i = 0; i < I*J; i++)
{
x[i] = 1.0f;
}
for(int i = 0; i < J*K; i++)
{
y[i] = 2.0f;
}
int blockSize = 1; //512;
int numBlocks = 1; //min(65535, (I*K + blockSize - 1) / blockSize);
printf("Number %f\n", x[0]);
mul<<<numBlocks, blockSize>>>(I, J, K, x, y, z);
printf("Number %f\n", x[0]);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
// for(int i = 0; i < N; i++)
// {
// if(fabs(y[i] - 3.0f)>0.000000001) {
// printf("Wrong! %d %f", i, y[i]);
// break;
// }
// }
printf("Number %f\n", x[0]);
cudaFree(x);
cudaFree(y);
cudaFree(z);
}
|
24,075 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
// Step 1: Query Device for maximum block sizes and thread sizes
// (not really sure what we care about)
// Step 2: Take in user input to specify data dimensions
// Step 3: Check to make sure user inputs match with specified program device queries
int TOTAL_GLOBAL_MEM, REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, *MAX_THREADS_DIM;
__global__
void vec_mult_add(const unsigned int * A, const unsigned int * B, const unsigned int c, unsigned int * D, int num_elements)
{
/*int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}*/
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < num_elements) {
D[id] = c*A[id] + B[id];
}
}
// Device Query Information:
void getHardwareConstraints() {
//=============================Gets number of cuda devices===========================================
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
//=============================Gets number of cuda devices===========================================
// for each device found, store this device in some type of object
int device;
for (device = 0; device < deviceCount; device++) {
// Sets the context of the device so that we know which device we are working with if there
// are multiple
cudaSetDevice(device);
cudaDeviceProp deviceProp;
// gets the "properties" struct that stores the properties of a device
// from this property struct, we can query the limitations of this device
cudaGetDeviceProperties(&deviceProp, device);
printf("\nDevice: %d \"%s\"\n===========================================\n", device, deviceProp.name);
TOTAL_GLOBAL_MEM = deviceProp.totalGlobalMem;
REGS_PER_BLOCK = deviceProp.regsPerBlock;
WARP_SIZE = deviceProp.warpSize;
MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock;
MAX_THREADS_DIM = deviceProp.maxThreadsDim;
printf("The %s has:\n\t-%zu total bytes of global memory\n\t-%d registers per block\n\t-%d threads per warp\n\t-A maximum of %d threads per block\n\t-And a maximum thread dimension of %d x %d x %d\n", deviceProp.name, TOTAL_GLOBAL_MEM, REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, MAX_THREADS_DIM[0], MAX_THREADS_DIM[1], MAX_THREADS_DIM[2]);
// What I think we care about:
// 1. totalGlobalMem
// 2. regsPerBlock
// 3. warpSize (i.e. numThreadsPerBlock (is this equal to regsPerBlock??)
// 4. maxThreadsperBlock
// 5. maxThreadsDim[3]
}
}
void printVector(unsigned int * A, int numElements) {
printf("\n[");
for(int i = 0; i < numElements; i++){
printf("\n %d",A[i]);
}
printf("\n]\n");
}
void checkOutput(unsigned int * A, int numElements) {
bool incorrect = false;
for(int i = 0; i < numElements; i++) {
if(A[i] != (2*i + i)) {
incorrect = true;
printf("\nIncorrect value of %d at index %d... should be %d\n", A[i], i, (2*i + i));
}
}
if(!incorrect) {
printf("\nOutput is Correct!\n");
}
}
void elementWiseMult_Add() {
}
int main(int argc, char* argv[]) {
int numThreads;
int blockSize;
numThreads = atoi(argv[1]);
blockSize = atoi(argv[2]);
getHardwareConstraints();
/*std::cout << "This program will take two arrays of integers from 0-1023, multiply them the first array by a constant of 2, and add the first array to the second array\n";
std::cout << "You must first specify a number of threads you would like to use for this operation: ";
std::cin >> numThreads;
std::cout << "\nAnd a block size: ";
std::cin >> blockSize;*/
printf("numThreads: %d and blockSize: %d\n", numThreads, blockSize);
// keep track of errors from cuda operations
cudaError_t err = cudaSuccess;
// this will be taken from cmd
int numElements = 1024;
size_t size = numElements * sizeof(int);
// Allocate the host input vector A
unsigned int *h_A = (unsigned int *)malloc(size);
// Allocate the host input vector B
unsigned int *h_B = (unsigned int *)malloc(size);
// Allocate the host output vector C
unsigned int *h_D = (unsigned int *)malloc(size);
//srand(time(NULL));
int c = 2;
// initialize host input vectors
for(int i = 0; i < numElements; i++){
h_A[i] = i;
h_B[i] = i;
}
// DEBUG
//printf("%d * \n", c);
//printVector(h_A, numElements);
//printf(" + \n");
//printVector(h_B, numElements);
// Allocate device input vector
// Allocate the device input vector A
unsigned int *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate device input vector B
unsigned int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate device output vector D
// Allocate device input vector B
unsigned int *d_D = NULL;
err = cudaMalloc((void **)&d_D, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector D (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
//printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// calculate appropriate thread size
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = blockSize;
int blocksPerGrid = numThreads / threadsPerBlock;
//int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vec_mult_add<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, c, d_D, numElements);
err = cudaGetLastError();
if(err != cudaSuccess) {
fprintf(stderr, "Failed to execute kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_D, d_D, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess) {
fprintf(stderr, "Failed to copy kernel back to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
checkOutput(h_D, numElements);
//printf(" = \n");
//printVector(h_D, numElements);
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_D);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_D);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
//printf("Here are random numbers %d %d\n", (rand() % 11), (rand() % 11));
return(EXIT_SUCCESS);
}
|
24,076 | #include "includes.h"
__global__ void KerInOutUpdateVelrhopM1(unsigned n,const int *inoutpart ,const float4 *velrhop,float4 *velrhopm1)
{
const unsigned cp=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle.
if(cp<n){
const unsigned p=inoutpart[cp];
velrhopm1[p]=velrhop[p];
}
} |
24,077 | /*-----------
*
* matrixMulGlobal.cu
*
* This is the source file for matrix multiplication with global memory only.
*
* This kernel is from NVIDIA CUDA samples. reduction_kernel.cu.
*
* streamsOptBenchmark/reduction_kernel.cu
*
* By Hao Li
*
*------------
*/
/*
Parallel reduction kernels
*/
#include <stdio.h>
// #include "structs.h"
// #include "functions.h"
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
struct SharedMemory
{
__device__ inline operator float *()
{
extern __shared__ float __smem[];
return (float *)__smem;
}
__device__ inline operator const float *() const
{
extern __shared__ float __smem[];
return (float *)__smem;
}
};
// // specialize for double to avoid unaligned memory
// // access compile errors
// template<>
// struct SharedMemory<double>
// {
// __device__ inline operator double *()
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
// __device__ inline operator const double *() const
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
// };
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
__global__ void reduce0(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
__global__ void reduce1(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
__global__ void reduce2(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce3(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
int mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
// void reduce(int size, int threads, int blocks,
// int whichKernel, int *d_idata, int *d_odata)
// {
// dim3 dimBlock(threads, 1, 1);
// dim3 dimGrid(blocks, 1, 1);
// // when there is only one warp per block, we need to allocate two warps
// // worth of shared memory so that we don't index shared memory out of bounds
// int smemSize = (threads <= 32) ? 2 * threads * sizeof(int) : threads * sizeof(int);
// // choose which of the optimized versions of reduction to launch
// switch (whichKernel)
// {
// case 0:
// reduce0<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 1:
// reduce1<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 2:
// reduce2<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 3:
// reduce3<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// }
// }
// int main(int argc, char **argv){
// int matrixDataSize = sizeof(int) * MATRIX_SIZE * MATRIX_SIZE;
// Matrix h_A, h_C;
// Matrix d_A, d_C;
// initMatrix(h_A, matrixDataSize, onHOST);
// initMatrix(h_C, matrixDataSize, onHOST);
// initMatrix(d_A, matrixDataSize, onDEVICE);
// initMatrix(d_C, matrixDataSize, onDEVICE);
// cudaMemcpy(d_A.elements, h_A.elements, matrixDataSize, cudaMemcpyHostToDevice);
// // Invoke kernel
// // dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// // dim3 dimGrid(h_B.width / dimBlock.x, h_A.height / dimBlock.y);
// // execute the kernel
// for(int i =0; i < 4; i++){
// reduce(matrixDataSize, h_A.width / BLOCK_SIZE * h_A.height / BLOCK_SIZE,
// BLOCK_SIZE*BLOCK_SIZE, i, d_A.elements, d_C.elements);
// }
// cudaMemcpy(h_C.elements, d_C.elements, matrixDataSize, cudaMemcpyDeviceToHost);
// free(h_A.elements);
// free(h_C.elements);
// cudaFree(d_A.elements);
// cudaFree(d_C.elements);
// return 0;
// }
|
24,078 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
__global__ void adicionarKernel(double* resultado, const double* n) {
int i = threadIdx.x;
double a = 1, b = 0;
double delta = pow(b, 2) - (4 * a * (n[i] * -1));
resultado[i] = ((b * -1) + sqrt(delta)) / 2 * a;
}
cudaError_t calcularRaizes(double* resultado, const double* n, unsigned int tam) {
double* gpu_n;
double* gpu_resultado;
cudaError_t status;
status = cudaSetDevice(0);
if (status != cudaSuccess) {
printf("cudaSetDevice falhou!");
goto Falha;
}
status = cudaMalloc((void**)&gpu_resultado, tam * sizeof(double));
if (status != cudaSuccess) {
printf("cudaMalloc falhou!");
goto Falha;
}
status = cudaMalloc((void**)&gpu_n, tam * sizeof(double));
if (status != cudaSuccess) {
printf("cudaMalloc falhou!");
goto Falha;
}
status = cudaMemcpy(gpu_n, n, tam * sizeof(double), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
printf("cudaMemcpy falhou!");
goto Falha;
}
adicionarKernel<<<1, tam>>>(gpu_resultado, gpu_n);
status = cudaGetLastError();
if (status != cudaSuccess) {
printf("adicionarKernel falhou: %s", cudaGetErrorString(status));
goto Falha;
}
status = cudaDeviceSynchronize();
if (status != cudaSuccess) {
printf("cudaDeviceSynchronize falhou: %d", status);
goto Falha;
}
status = cudaMemcpy(resultado, gpu_resultado, tam * sizeof(double), cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
printf("cudaMemcpy falhou!");
goto Falha;
}
Falha:
cudaFree(gpu_resultado);
cudaFree(gpu_n);
return status;
}
int main()
{
const int tam = 10;
const double n[tam] = { 1, 4, 9, 16, 25, 36, 49, 64, 81, 100 };
double resultado[tam] = { 0 };
cudaError_t status = calcularRaizes(resultado, n, tam);
if (status != cudaSuccess) {
printf("calcularRaizes falhou!");
return 1;
; }
printf("RESULTADO\n");
for (int i = 0; i < tam; i++) {
printf("Raiz de %f = %f\n", n[i], resultado[i]);
}
getchar();
status = cudaDeviceReset();
if (status != cudaSuccess) {
printf("cudaDeviceReset falhou!");
return 1;
}
return 0;
} |
24,079 | /*#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define TILE_SIZE 4
#define INPUT_SIZE 12
#define MASK_WIDTH 5
__constant__ float M[MASK_WIDTH];
__global__ void convolution_shared_memory(float* N, float* P) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float N_s[TILE_SIZE];
N_s[threadIdx.x] = N[i];
__syncthreads();
int this_title_start_point = blockIdx.x * blockDim.x;
int next_tile_start_point = (blockIdx.x + 1) * blockDim.x;
int n_start_point = i - (MASK_WIDTH / 2);
float Pvalue = 0;
for (int j = 0; j < MASK_WIDTH; j++) {
int N_index = n_start_point + j;
if (N_index >= 0 && N_index < INPUT_SIZE) {
if ((N_index >= this_title_start_point) && (N_index < next_tile_start_point)) {
Pvalue += N_s[threadIdx.x + j - (MASK_WIDTH / 2)] * M[j];
}
else {
Pvalue += N[N_index] * M[j];
}
}
}
P[i] = Pvalue;
}
__global__ void convolution_constant_memory(float* N, float* P, int Width) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
int n_start_point = i - (MASK_WIDTH / 2);
for (int j = 0; j < MASK_WIDTH; j++) {
if (n_start_point + j >= 0 && n_start_point + j < Width) {
Pvalue += N[n_start_point + j] * M[j];
}
}
P[i] = Pvalue;
}
__global__ void convolution_global_memory(float* N, float* M, float* P, int Width) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
int n_start_point = i - (MASK_WIDTH / 2);
for (int j = 0; j < MASK_WIDTH; j++) {
if (n_start_point + j >= 0 && n_start_point + j < Width) {
Pvalue += N[n_start_point + j] * M[j];
}
}
P[i] = Pvalue;
}
int mafghjkin() {
//device input and output
float* d_N = 0;
float* d_P = 0;
cudaMalloc(&d_N, INPUT_SIZE * sizeof(float));
cudaMalloc(&d_P, INPUT_SIZE * sizeof(float));
//host input and output
float* h_N = (float*)malloc(INPUT_SIZE * sizeof(float));
float* h_P = (float*)malloc(INPUT_SIZE * sizeof(float));
float* h_M = (float*)malloc(MASK_WIDTH * sizeof(float));
//initialize input on host
for (int i = 0; i < INPUT_SIZE; ++i) {
h_N[i] = (float)i;
}
//transfer input to device
cudaMemcpy(d_N, h_N, INPUT_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_P, h_P, INPUT_SIZE * sizeof(float), cudaMemcpyHostToDevice);
//initialize mask on host
for (int j = 0; j < MASK_WIDTH; ++j) {
h_M[j] = (float)j;
}
//transfer mask to constant memory
cudaMemcpyToSymbol(M, h_M, MASK_WIDTH * sizeof(float));
//call convolution kernel
convolution_shared_memory << <(INPUT_SIZE + TILE_SIZE - 1) / TILE_SIZE, TILE_SIZE >> > (d_N, d_P);
//retrieve result from device
cudaMemcpy(h_P, d_P, INPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < INPUT_SIZE; ++i) {
printf("%f\n", h_P[i]);
}
cudaFree(d_N);
cudaFree(d_P);
cudaFree(M);
free(h_N);
free(h_P);
free(h_M);
}*/ |
24,080 | #include<stdio.h>
#include<stdlib.h>
#include<curand_kernel.h>
#include<curand.h>
#include<sys/time.h>
unsigned int NUM_PARTICLES = 1000;
unsigned int NUM_ITERATIONS = 100;
unsigned int BLOCK_SIZE = 512;
unsigned int GRID_SIZE = ((NUM_PARTICLES)/BLOCK_SIZE);
typedef struct {
float posX;
float posY;
float posZ;
}position;
typedef struct {
float velX;
float velY;
float velZ;
}velocity;
typedef struct {
position pos;
velocity vel;
}Particle;
/*Only a one-time step to fill data to the array of structure(i.e. Particle)*/
void fill_data(Particle *p)
{
for(int i=0; i< NUM_PARTICLES; i++)
{
p->pos.posX = 10*((float)rand()/RAND_MAX);
p->pos.posY = 10*((float)rand()/RAND_MAX);
p->pos.posZ = 10*((float)rand()/RAND_MAX);
p->vel.velX = 100*((float)rand()/RAND_MAX);
p->vel.velY = 100*((float)rand()/RAND_MAX);
p->vel.velZ = 100*((float)rand()/RAND_MAX);
}
}
/*update velocity w.r.t to common pattern () and then
update position w.r.t formula given: p.x = p.x + v.x.dt where dt=1 */
void update_velocity_position_in_cpu(Particle *p)
{
struct timeval start_time;
struct timeval stop_time;
gettimeofday(&start_time, NULL);
for(int j=0; j<NUM_ITERATIONS; j++)
{
for(int i=0; i<NUM_PARTICLES; i++)
{
(i+j)%2 ? (p[i].vel.velX += (5*(i+j))%100) : (p[i].vel.velX -= (5*(i+j))%100);
(i+j)%2 ? (p[i].vel.velY += (3*(i+j))%100) : (p[i].vel.velY -= (3*(i+j))%100);
(i+j)%2 ? (p[i].vel.velZ += (7*(i+j))%100) : (p[i].vel.velZ -= (7*(i+j))%100);
p[i].pos.posX = p[i].pos.posX + p[i].vel.velX;
p[i].pos.posY = p[i].pos.posY + p[i].vel.velY;
p[i].pos.posZ = p[i].pos.posZ + p[i].vel.velZ;
}
}
gettimeofday(&stop_time, NULL);
printf("Total time of Execution in CPU: %ld usec\n\n",
(stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec));
}
void update_velocity_position_rand_in_cpu(Particle *p)
{
struct timeval start_time;
struct timeval stop_time;
gettimeofday(&start_time, NULL);
for(int j=0; j<NUM_ITERATIONS; j++)
{
for(int i=0; i<NUM_PARTICLES; i++)
{
p[i].vel.velX = 100*((float)rand()/RAND_MAX);
p[i].vel.velY = 100*((float)rand()/RAND_MAX);
p[i].vel.velZ = 100*((float)rand()/RAND_MAX);
p[i].pos.posX = p[i].pos.posX + p[i].vel.velX;
p[i].pos.posY = p[i].pos.posY + p[i].vel.velY;
p[i].pos.posZ = p[i].pos.posZ + p[i].vel.velZ;
}
}
gettimeofday(&stop_time, NULL);
printf("Total time of Execution in CPU: %ld usec\n\n",
(stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec));
}
__global__ void particle_kernel(Particle *p, int Nparticles, int Niterations)
{
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
if(i < Nparticles) {
for(int j=0; j<Niterations; j++)
{
(i+j)%2 ? (p[i].vel.velX += (5*(i+j))%100) : (p[i].vel.velX -= (5*(i+j))%100);
(i+j)%2 ? (p[i].vel.velY += (3*(i+j))%100) : (p[i].vel.velY -= (3*(i+j))%100);
(i+j)%2 ? (p[i].vel.velZ += (7*(i+j))%100) : (p[i].vel.velZ -= (7*(i+j))%100);
p[i].pos.posX = p[i].pos.posX + p[i].vel.velX;
p[i].pos.posY = p[i].pos.posY + p[i].vel.velY;
p[i].pos.posZ = p[i].pos.posZ + p[i].vel.velZ;
}
}
__syncthreads();
}
__global__ void particle_kernel_per_iteration(Particle *p, int Nparticles)
{
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
if(i < Nparticles) {
p[i].pos.posX = p[i].pos.posX + p[i].vel.velX;
p[i].pos.posY = p[i].pos.posY + p[i].vel.velY;
p[i].pos.posZ = p[i].pos.posZ + p[i].vel.velZ;
}
__syncthreads();
}
void update_velocity_position_in_gpu(Particle *p)
{
struct timeval start_time;
struct timeval stop_time;
Particle *gPar = NULL;
cudaMalloc(&gPar, NUM_PARTICLES*sizeof(Particle));
gettimeofday(&start_time, NULL);
cudaMemcpy(gPar, p, NUM_PARTICLES*sizeof(Particle), cudaMemcpyHostToDevice);
particle_kernel<<<GRID_SIZE, BLOCK_SIZE>>>(gPar, NUM_PARTICLES, NUM_ITERATIONS);
cudaDeviceSynchronize();
cudaMemcpy(p, gPar, NUM_PARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost);
gettimeofday(&stop_time, NULL);
printf("Total time of Execution in GPU: %ld usec\n\n",
(stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec));
cudaFree(gPar);
}
void update_velocity_position_rand_in_gpu(Particle *p)
{
struct timeval start_time;
struct timeval stop_time;
Particle *gPar = NULL;
cudaMalloc(&gPar, NUM_PARTICLES*sizeof(Particle));
gettimeofday(&start_time, NULL);
for(int i=0; i<NUM_ITERATIONS; i++)
{
cudaMemcpy(gPar, p, NUM_PARTICLES*sizeof(Particle), cudaMemcpyHostToDevice);
p[i].vel.velX = 100*((float)rand()/RAND_MAX);
p[i].vel.velY = 100*((float)rand()/RAND_MAX);
p[i].vel.velZ = 100*((float)rand()/RAND_MAX);
particle_kernel_per_iteration<<<GRID_SIZE, BLOCK_SIZE>>>(gPar, NUM_PARTICLES);
cudaDeviceSynchronize();
cudaMemcpy(p, gPar, NUM_PARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost);
}
gettimeofday(&stop_time, NULL);
printf("Total time of Execution in GPU: %ld usec\n\n",
(stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec));
cudaFree(gPar);
}
int main(int argc, char *argv[])
{
int input = 0;
if(argc != 3)
{
printf("No. of arguments to be passed should be 2\n");
exit(1);
}
NUM_PARTICLES = atoi(argv[1]);
BLOCK_SIZE = atoi(argv[2]);
Particle *par = (Particle*)malloc(NUM_PARTICLES*sizeof(Particle));
fill_data(par);
printf("Enter 1 for CPU(with rand), 2 for CPU(with pattern), 3 for GPU(with rand)/CPU dependency, 4 for GPU(with pattern)\n");
fflush(stdout);
scanf("%d",&input);
switch(input)
{
case 1: update_velocity_position_rand_in_cpu(par);
break;
case 2: update_velocity_position_in_cpu(par);
break;
case 3: update_velocity_position_rand_in_gpu(par);
break;
case 4: update_velocity_position_in_gpu(par);
break;
default: printf("Wrong Input\n");
break;
}
free(par);
return 0;
}
|
24,081 | extern "C"
__global__ void compute_probs(double* alphas, double* rands, double* probs, int n, int K, int M) {
// assign overall id/index of the thread = id of row
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n) {
double maxval;
int m, k;
int maxind;
double M_d = (double) M;
double* w = new double[K];
for(k = 0; k < K; ++k){ // initialize probs (though already done on CPU)
probs[i*K + k] = 0.0;
}
// core computations
for(m = 0; m < M; ++m){ // loop over Monte Carlo iterations
for(k = 0; k < K; ++k){ // generate W ~ N(alpha, 1)
w[k] = alphas[i*K + k] + rands[m*K + k];
}
// determine which category has max W
maxind = K-1;
maxval = w[K-1];
for(k = 0; k < (K-1); ++k){
if(w[k] > maxval){
maxind = k;
maxval = w[k];
}
}
probs[i*K + maxind] += 1.0;
}
// compute final proportions
for(k = 0; k < K; ++k) {
probs[i*K + k] /= M_d;
}
free(w);
}
}
|
24,082 | #include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cassert>
#include <algorithm>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 p = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int m = p.y * numCols + p.x;
if(p.x >= numCols || p.y >= numRows)
return;
float color = 0.0f;
//filterWidth = 18;
for(int f_y = 0; f_y < filterWidth; f_y++) {
for(int f_x = 0; f_x < filterWidth; f_x++) {
int c_x = p.x + f_x - filterWidth/2;
int c_y = p.y + f_y - filterWidth/2;
c_x = min(max(c_x, 0), numCols - 1);
c_y = min(max(c_y, 0), numRows - 1);
float filter_value = filter[f_y*filterWidth + f_x];
color += filter_value*static_cast<float>(inputChannel[c_y*numCols + c_x]);
}
}
outputChannel[m] = color;
}
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 p = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int m = p.y * numCols + p.x;
if(p.x >= numCols || p.y >= numRows)
return;
redChannel[m] = inputImageRGBA[m].x;
greenChannel[m] = inputImageRGBA[m].y;
blueChannel[m] = inputImageRGBA[m].z;
}
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_filter, sizeof( float) * filterWidth * filterWidth));
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const dim3 blockSize(32, 32);
const dim3 gridSize(numCols/blockSize.x + 1, numRows/blockSize.y + 1);
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(
d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(
d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(
d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
24,083 | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "g_updatePrimalVar.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *u = NULL;
cudaMalloc(&u, XSIZE*YSIZE);
float *u_bar = NULL;
cudaMalloc(&u_bar, XSIZE*YSIZE);
float *u_diff = NULL;
cudaMalloc(&u_diff, XSIZE*YSIZE);
const float *p = NULL;
cudaMalloc(&p, XSIZE*YSIZE);
const float *f = NULL;
cudaMalloc(&f, XSIZE*YSIZE);
const float *scalar_op = NULL;
cudaMalloc(&scalar_op, XSIZE*YSIZE);
float tau = 1;
float theta = 1;
size_t width = XSIZE;
size_t height = YSIZE;
size_t channels = 1;
const bool *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
g_updatePrimalVar<<<gridBlock,threadBlock>>>(u,u_bar,u_diff,p,f,scalar_op,tau,theta,width,height,channels,mask);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
g_updatePrimalVar<<<gridBlock,threadBlock>>>(u,u_bar,u_diff,p,f,scalar_op,tau,theta,width,height,channels,mask);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
g_updatePrimalVar<<<gridBlock,threadBlock>>>(u,u_bar,u_diff,p,f,scalar_op,tau,theta,width,height,channels,mask);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
24,084 | #include "includes.h"
__global__ void BaseNeuronSetFloatPtArray(float *arr, int *pos, int n_elem, int step, float val)
{
int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (array_idx<n_elem) {
arr[pos[array_idx]*step] = val;
}
} |
24,085 | #include <cmath>
#include <iostream>
#include <functional>
#include <algorithm>
#include <vector>
#include <random>
#include <chrono>
#include <stdio.h>
#include <fstream>
#define SQRT2PI 2.50662827463100050241
#define BLOCK_SIZE 512
#define SHARED_MEM_SIZE 2048
__device__ float gauss_kernel(float x){
return exp(-x*x / 2) / SQRT2PI;
}
__global__
void kernel(int n, float h, float* x, float* y){
__shared__ float x_buf[SHARED_MEM_SIZE];
__shared__ float y_buf[SHARED_MEM_SIZE];
float xr;
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
if (gidx < n){
xr = x[gidx];
for (int i = 0; i < gridDim.x; i++){
int j = blockDim.x*((blockIdx.x + i) % gridDim.x) + threadIdx.x;
x_buf[threadIdx.x] = x[j];
__syncthreads();
for (int k = 0; k < blockDim.x; k++){
y_buf[threadIdx.x] += gauss_kernel((xr - x_buf[k]) / h);
}
}
y_buf[threadIdx.x] /= (n*h);
y[gidx] = y_buf[threadIdx.x];
}
}
__host__
void gaussian_kde(int n, float h, const std::vector<float>& x, std::vector<float>& y){
float* d_x;
float* d_y;
int size = n * sizeof(float);
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMemcpy(d_x, x.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y.data(), size, cudaMemcpyHostToDevice);
int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
std::cout << numBlocks << std::endl ;
kernel<<<numBlocks, BLOCK_SIZE>>>(n, h, d_x, d_y);
cudaDeviceSynchronize();
cudaMemcpy(y.data(), d_y, size, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
}
int main(int argc, char const *argv[])
{
int n = 1000000;
float h = 0.1;
std::vector<float> x(n);
std::vector<float> y(n, 0.0);
std::random_device rd;
std::mt19937 gen(rd());
std::lognormal_distribution<float> N(0.0, 1.0);
std::generate(std::begin(x), std::end(x), std::bind(N, gen));
// now running your awesome code from a3.hpp
auto t0 = std::chrono::system_clock::now();
gaussian_kde(n, h, x, y);
auto t1 = std::chrono::system_clock::now();
auto elapsed_par = std::chrono::duration<double>(t1 - t0);
std::cout << "Tp: " << elapsed_par.count() << "s" << std::endl;
std::ofstream x_file;
std::ofstream y_file;
x_file.open("x.csv");
y_file.open("y.csv");
for (auto i: x) x_file << i << ",";
for (auto i: y) y_file << i << ",";
x_file.close();
y_file.close();
return 0;
}
|
24,086 | #include<bits/stdc++.h>
using namespace std;
#define pi (2.0*acos(0.0))
#define eps 1e-6
#define ll long long
#define inf (1<<29)
#define vi vector<int>
#define vll vector<ll>
#define sc(x) scanf("%d",&x)
#define scl(x) scanf("%lld",&x)
#define all(v) v.begin() , v.end()
#define me(a,val) memset( a , val ,sizeof(a) )
#define pb(x) push_back(x)
#define pii pair<int,int>
#define mp(a,b) make_pair(a,b)
#define Q(x) (x) * (x)
#define L(x) ((x<<1) + 1)
#define R(x) ((x<<1) + 2)
#define M(x,y) ((x+y)>>1)
#define fi first
#define se second
#define MOD 1000000007
#define ios ios::sync_with_stdio(0)
#define N 100000
__global__ void f(int *need){
*need++;
printf("%d\n",*need);
}
int main(){
int *need;
int r = 5;
need = &r;
f<<< 1 , 1 >>>( need );
printf("%d\n",*need);
return 0;
}
|
24,087 |
#include <iostream>
using namespace std;
__global__ void kernel( int* b, int* t)
{
b[blockIdx.x] = blockIdx.x; // Blocks in the grid
*t = blockDim.x; // Treads per block
}
int main()
{
int* b;
int* d_b;
int t;
int* d_t;
int numblocks = 4;
b = new int[numblocks];
// store in d_b the address of a memory
// location on the device
cudaMalloc( (void**)&d_b, numblocks*sizeof(int));
cudaMalloc( (void**)&d_t, sizeof(int));
kernel<<<numblocks,1>>>(d_b,d_t);
cudaMemcpy( b, d_b, numblocks*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy( &t, d_t, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
cudaFree(d_t);
int block;
for( block=0; block<numblocks; block++)
{
cout << "blockIdx " << b[block]
<< ": " << t
<< " threads per block"
<< endl;
}
delete [] b;
return 0;
}
|
24,088 | #include "includes.h"
__global__ void transpose_v1(float* a,float* b, int n){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i >= n || j >= n) return;
b[n*j+i] = a[n*i+j];
} |
24,089 | #include "includes.h"
__global__ void orthogonalize23( float *Qi_gdof, int *blocksizes, int numblocks, int largestblock ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for( int j = 4; j < 6; j++ ) {
for( int k = 3; k < j; k++ ) { // <-- vectors we're orthognalizing against
float dot_prod = 0.0;
for( int l = 0; l < blocksizes[i]; l++ ) {
dot_prod += Qi_gdof[i * 6 * largestblock + l * 6 + k] * Qi_gdof[i * 6 * largestblock + l * 6 + j];
}
//dot_prod += Qi_gdof[i][l][k] * Qi_gdof[i][l][j];
for( int l = 0; l < blocksizes[i]; l++ ) {
Qi_gdof[i * 6 * largestblock + l * 6 + j] -= Qi_gdof[i * 6 * largestblock + l * 6 + k] * dot_prod;
}
//Qi_gdof[i][l][j] -= Qi_gdof[i][l][k] * dot_prod;
}
float rotnorm = 0.0;
for( int l = 0; l < blocksizes[i]; l++ ) {
rotnorm += Qi_gdof[i * 6 * largestblock + l * 6 + j] * Qi_gdof[i * 6 * largestblock + l * 6 + j];
}
//rotnorm += Qi_gdof[i][l][j] * Qi_gdof[i][l][j];
rotnorm = 1.0 / sqrt( rotnorm );
for( int l = 0; l < blocksizes[i]; l++ ) {
Qi_gdof[i * 6 * largestblock + l * 6 + j] *= rotnorm;
}
//Qi_gdof[i][l][j] *= rotnorm;
}
} |
24,090 | #include<stdio.h>
__global__ void kernel(int *array,int goal,bool *flag,int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int first = index * size ;
int last = first + size;
int middle = (first+last)/2;
while (first <= last) {
if (array[middle] < goal)
first = middle + 1;
else if (array[middle] == goal) {
// printf("number is found in bolackid=%d threadid=%d\n",blockIdx.x,threadIdx.x);
*flag = true;
// assert(0);
break;
}
else
last = middle - 1;
middle = (first + last)/2;
}
if(array[threadIdx.x] == goal){
*flag = true;
}
}
int main()
{
int BlockNumber;
int ThreadNumber;
int Goal;
int N ;
int *array;
bool *flag ;
printf("Enter The array size: ");
scanf("%d", &N);
printf("Enter Block number: ");
scanf("%d", &BlockNumber);
printf("Enter Thread number: ");
scanf("%d", &ThreadNumber);
printf("Enter the number to find: ");
scanf("%d", &Goal);
cudaMallocManaged(&array, N*sizeof(int));
cudaMallocManaged(&flag, sizeof(bool));
for(int i = 0; i < N; i++){
array[i] = i ;
}
kernel<<<BlockNumber, ThreadNumber>>>(array, Goal, flag,N/(BlockNumber*ThreadNumber));
cudaDeviceSynchronize();
if(*flag == true){
printf("goal is found \n");
}else printf("goal not found\n");
}
|
24,091 | #include "includes.h"
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
} |
24,092 | // This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
__global__ void vector_add(const float *a,
const float *b,
float *c,
const size_t n)
{
// compute the global element index this thread should process
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
// avoid accessing out of bounds elements
if(i < n)
{
// sum elements
c[i] = a[i] + b[i];
}
}
int main(void)
{
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(float);
// points to host & device arrays
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
// cudaMalloc the device arrays
cudaMalloc((void**)&device_array_a, num_bytes);
cudaMalloc((void**)&device_array_b, num_bytes);
cudaMalloc((void**)&device_array_c, num_bytes);
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_array_b == 0 || host_array_c == 0 ||
device_array_a == 0 || device_array_b == 0 || device_array_c == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i)
{
// make array a a linear ramp
host_array_a[i] = (float)i;
// make array b random
host_array_b[i] = (float)rand() / RAND_MAX;
}
// copy arrays a & b to the device memory space
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_array_b, host_array_b, num_bytes, cudaMemcpyHostToDevice);
// compute c = a + b on the device
const size_t block_size = 256;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// launch the kernel
vector_add<<<grid_size, block_size>>>(device_array_a, device_array_b, device_array_c, num_elements);
// copy the result back to the host memory space
cudaMemcpy(host_array_c, device_array_c, num_bytes, cudaMemcpyDeviceToHost);
// print out the first 10 results
for(int i = 0; i < 10; ++i)
{
printf("result %d: %1.1f + %7.1f = %7.1f\n", i, host_array_a[i], host_array_b[i], host_array_c[i]);
}
// deallocate memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
cudaFree(device_array_a);
cudaFree(device_array_b);
cudaFree(device_array_c);
}
|
24,093 | extern "C" {
__global__ void Vector_Addition(int *a, int *b, int *c)
{
int tid = blockIdx.x;
if (tid < 100)
c[tid] = a[tid] + b[tid];
}
}
|
24,094 | #include <math.h>
#include <iostream>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include "chebyshev.cuh"
#include "kernel.cu"
#include <string>
#include <stdlib.h>
#include <math.h>
#define Im1 0 // i - 1
#define I 1 // i
#define Ip1 2 // i + 1
using namespace std;
double* chebyshev(int matrix_size, double** Ab, int s, int max_iter) {
int N = matrix_size;
dim3 threadsPerBlock(N);
dim3 blocksPerGrid(1);
if (N > 512) {
threadsPerBlock.x = 512;
blocksPerGrid.x = ceil(double(N) / double(threadsPerBlock.x));
}
double delta, x_2_norm, a, w_0, c, L, B, scalar_1, scalar_2;
double *x_start, *A_vector, *b_vector, *w, *norm_vector, *rows;; //HOST
double *d_x, *d_scalar_1, *d_scalar_2, *d_temp_vector, *d_x_start, *d_A_vector, *d_b_vector; //DEVICE
int iteration, k;
delta = 0.000001; // accuracy
x_2_norm = 0; // second norm for stop criteria
a = 100; // alpha
bool stop = false; // stop criteria bool
//ALLOCATE MEMORY ON HOST
x_start = (double *)malloc(N * sizeof(double));
A_vector = (double *)malloc(N * N * sizeof(double));
b_vector = (double *)malloc(N * sizeof(double));
w = (double *)malloc(N * 2 * sizeof(double));
norm_vector = (double *)malloc(N * sizeof(double));
rows = (double *)malloc(3 * N * sizeof(double));
//ALLOCATE MEMORY ON DEVICE
cudaMalloc((void **)&d_x, N * 3 * sizeof(double));
cudaMalloc((void **)&d_scalar_1, sizeof(double));
cudaMalloc((void **)&d_scalar_2, sizeof(double));
cudaMalloc((void **)&d_temp_vector, N * sizeof(double));
cudaMalloc((void **)&d_x_start, N * sizeof(double));
cudaMalloc((void **)&d_A_vector, N * N * sizeof(double));
cudaMalloc((void **)&d_b_vector, N * sizeof(double));
B = Ab[0][0];
//init x_start, find Beta, copy Ab to vectors
for (int i = 0; i < N; i++) {
x_start[i] = 0;
b_vector[i] = Ab[i][N];
if (Ab[i][i] > B) {
B = Ab[i][i];
}
for (int j = 0; j < N; j++) {
A_vector[i * N + j] = Ab[i][j];
}
}
B = 2 * B;
//COPY FROM HOST TO DEVICE
cudaMemcpy(d_x_start, x_start, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_A_vector, A_vector, N * N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b_vector, b_vector, N * sizeof(double), cudaMemcpyHostToDevice);
//Step 0:
iteration = 0;
w_0 = (B - a) / (B + a);
c = 2 / (B + a);
L = 2 * (B + a) / (B - a);
copyVectorToMatRowKernel << <blocksPerGrid, threadsPerBlock >> >(d_x_start, d_x, Im1, N);
while (iteration < max_iter && stop == false) {
//Step 1
k = 0;
copyVectorToMatRowKernel <<<blocksPerGrid, threadsPerBlock >>>(d_x_start, d_x, I, N);
w[Im1] = 0;
w[I] = w_0;
while (iteration < max_iter) {
//Step 2
scalar_1 = w[I] * w[Im1];
scalar_2 = c * (1 + w[I] * w[Im1]);
cudaMemcpy(d_scalar_1, &scalar_1, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_scalar_2, &scalar_2, sizeof(double), cudaMemcpyHostToDevice);
//copies x(i) to temp_vector
copyMatRowToVectorKernel <<<blocksPerGrid, threadsPerBlock>>>(d_x, d_temp_vector, I, N);
//multiples A matrix in vector form with x(i) stored in temp_vector
multMatrixVectorKernel <<<blocksPerGrid, threadsPerBlock>>>(d_A_vector, d_temp_vector, d_temp_vector, N);
//calculates x(i+1) and sets x(i-1) and x(i)
calculateXplus1Kernel <<<blocksPerGrid, threadsPerBlock>>>(d_x, d_temp_vector, d_b_vector, d_scalar_1, d_scalar_2, N);
w[Im1] = w[I];
w[I] = 1 / (L - w[Im1]);
x_2_norm = 0;
normKernel <<<blocksPerGrid, threadsPerBlock >> >(d_x, d_temp_vector, N);
cudaMemcpy(norm_vector, d_temp_vector, N * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
x_2_norm += norm_vector[i];
}
x_2_norm = sqrt(x_2_norm);
if (x_2_norm < delta) {
stop = true;
break;
}
// Step 3
iteration++;
k++;
if (k >= s) {
copyMatRowToVectorKernel << <blocksPerGrid, threadsPerBlock >> >(d_x, d_x_start, I, N);
break;
}
}
}
copyMatRowToVectorKernel << <blocksPerGrid, threadsPerBlock >> >(d_x, d_temp_vector, I, N);
cudaMemcpy(x_start, d_temp_vector, N * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_scalar_1);
cudaFree(d_scalar_2);
cudaFree(d_temp_vector);
cudaFree(d_x_start);
cudaFree(d_A_vector);
cudaFree(d_b_vector);
free(A_vector);
free(b_vector);
free(w);
free(norm_vector);
cout << "iters: " << iteration << endl;
return x_start;
}
|
24,095 | #include "includes.h"
__global__ void normalize_N(float* N, float* norm, int npix_per_component) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int c = blockIdx.y*blockDim.y + threadIdx.y;
if (i < npix_per_component) {
N[c*npix_per_component + i] = N[c*npix_per_component + i] / norm[i];
}
} |
24,096 | #include "vector.cuh"
__device__ void
append_res_arr(Res_Arr *p, double val)//追加,可能成功,可能失败
{
p->arr[p->index+p->pmt_list[p->id]] = val;
p->pmt_list[p->id] += 1;
return;
}
__device__ void
init_res_arr(Res_Arr *p,double *result,int *pmt_res_list,int pmtid,int size){
p->arr = result;//存储的内存空间
p->pmt_list = pmt_res_list;//存储每个pmt内存空间使用量
p->index = pmtid*pmt_mem;//存储该pmt在数组中的起始存取点
p->id = pmtid;
// p->begin = begin;
// p->len = len;
return;
} |
24,097 | #include "stdio.h"
#include "cuda_runtime.h"
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#define THREADS 1024
#define gpu_error_check(ans) { gpu_assert((ans), __FILE__, __LINE__); }
inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void swap(int* array, int first, int second)
{
int tmp = array[first];
array[first] = array[second];
array[second] = tmp;
}
__device__
void swap_gpu(int* array, int first, int second)
{
int tmp = array[first];
array[first] = array[second];
array[second] = tmp;
}
__global__
void bitonic_exchange_gpu(int* dev_values, int depth, unsigned long step)
{
/* Sorting partners: i and pair_for_i */
unsigned int i, pair_for_i;
/* Orient tells in which part of bitonic (sub-)sequence elements are (descending or ascending) */
unsigned int orient_i, orient_pair_for_i;
i = threadIdx.x + blockDim.x * blockIdx.x;
pair_for_i = i + depth;
orient_pair_for_i = pair_for_i & step;
orient_i = i & step;
/*
If current array[i] is the second for other array[j] (i<j) so we just do nothing
It can be seen if for some a[i], a[pair_for_i] located in other bitonic (sub-)sequence
For example, a[i] in ascending part, a[pair_for_i] in descending
*/
if (orient_i != 0 && orient_pair_for_i == 0
|| orient_i == 0 && orient_pair_for_i != 0)
{
return;
}
if (orient_i == 0)
{
/* Sort ascending */
if (dev_values[i]>dev_values[pair_for_i])
{
swap_gpu(dev_values, i, pair_for_i);
}
}
else
{
/* Sort descending */
if (dev_values[i]<dev_values[pair_for_i])
{
swap_gpu(dev_values, i, pair_for_i);
}
}
}
void bitonic_sort_gpu(int* array, unsigned long size)
{
size_t size_mem_array = size * sizeof(int);
int* array_gpu;
gpu_error_check(cudaMalloc(&array_gpu, size_mem_array));
gpu_error_check(cudaMemcpy(array_gpu, array, size_mem_array, cudaMemcpyHostToDevice));
dim3 blocks = (size < THREADS) ? size : size / THREADS;
dim3 threadsPerBlock = (size < THREADS) ? 1 : THREADS;
for (int step = 2; step <= size; step <<= 1)
{
for (int depth = step >> 1; depth >= 1; depth >>= 1)
{
bitonic_exchange_gpu<<<blocks, threadsPerBlock>>>(array_gpu, depth , step);
}
}
gpu_error_check(cudaMemcpy(array, array_gpu, size_mem_array, cudaMemcpyDeviceToHost));
cudaFree(&array_gpu);
}
void bitonic_exchange(int* array, int depth, int step, unsigned long size)
{
for (int i = 0; i < size; i++)
{
unsigned int pair_for_i;
unsigned int orient_i, orient_pair_for_i;
pair_for_i = i + depth;
orient_pair_for_i = pair_for_i & step;
orient_i = i & step;
if (orient_i != 0 && orient_pair_for_i == 0
|| orient_i == 0 && orient_pair_for_i != 0)
{
continue;
}
if (orient_i == 0)
{
if (array[i] > array[pair_for_i])
{
swap(array, i, pair_for_i);
}
}
else
{
if (array[i] < array[pair_for_i])
{
swap(array, i, pair_for_i);
}
}
}
}
void bitonic_sort(int* array, unsigned long size)
{
for (int step = 2; step <= size; step <<= 1)
{
for (int j = step >> 1 ; j >= 1; j >>= 1)
{
bitonic_exchange(array, j, step, size);
}
}
} |
24,098 | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define TILE_SIZE 30
__global__ void kernel(int *A0, int *Anext, int nx, int ny, int nz) {
#define A0(i, j, k) A0[((k)*ny + (j))*nx + (i)]
#define Anext(i, j, k) Anext[((k)*ny + (j))*nx + (i)]
int tx = threadIdx.x, ty = threadIdx.y;
int dx = blockDim.x, dy = blockDim.y;
int i = tx + dx * blockIdx.x;
int j = ty + dy * blockIdx.y;
if ((i < nx) && (j < ny)){//check we are within bounds
//Load this thread's top, bottom, and center z values
int bottom = A0(i,j,0);
int center = A0(i,j,1);
int top = A0(i,j,2);
//create shared memory tile
__shared__ int ds_A[TILE_SIZE][TILE_SIZE];
//loop through all z slices
for (int k=1; k<nz-1; ++k){
//load up current z-axis slice
ds_A[ty][tx] = center;
__syncthreads(); //wait for all other threads to do their thing
Anext(i,j,k) = bottom + top +
((tx > 0)? ds_A[ty][tx-1]: (i==0)? 0: A0(i-1,j,k)) +
((tx < dx-1)? ds_A[ty][tx+1]: (i==nx-1)? 0: A0(i+1,j,k)) +
((ty > 0)? ds_A[ty-1][tx]: (j==0)? 0: A0(i,j-1,k)) +
((ty < dy-1)? ds_A[ty+1][tx]: (j==ny-1)? 0: A0(i,j+1,k)) -
6 * center;
//shift z-values
bottom = center; center = top;
__syncthreads();
//load new top value
top = A0(i,j,k+2);
}
}
#undef A0
#undef Anext
}
void launchStencil(int* A0, int* Anext, int nx, int ny, int nz) {
//set-up space as TILE_SIZE=30 so each block is 30x30 threads
//we need nx/30 x ny/30 blocks in each grid
dim3 dimGrid(ceil(nx/double(TILE_SIZE)),ceil(ny/double(TILE_SIZE)),1);
dim3 dimBlock(TILE_SIZE,TILE_SIZE,1);
kernel<<<dimGrid, dimBlock>>>(A0,Anext,nx,ny,nz);
}
|
24,099 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
void query_device()
{
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("No CUDA Support device found!");
}
int devNo = 0;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, devNo);
printf("Device %d : %s\n", devNo, iProp.name);
printf("Number of multiprocessors: %d\n", iProp.multiProcessorCount);
printf("clock rate : %d\n", iProp.clockRate);
printf("Compute capability : %d.%d\n", iProp.major, iProp.minor);
printf("Total amount of global memory : %4.2f KB\n", iProp.totalGlobalMem /
1024.0);
printf("Total amount of constant memory : %4.2f KB\n", iProp.totalConstMem
/1024.0);
printf("Total amount of shared memory per block : %4.2f KB\n",
iProp.sharedMemPerBlock / 1024.0);
printf("Total amount of shared memory per MP : %4.2f KB\n",
iProp.sharedMemPerMultiprocessor / 1024.0);
printf("Warp size : %d\n", iProp.warpSize);
printf("Maximum number of threads per block: %d\n",
iProp.maxThreadsPerBlock);
printf("Maximum number of threads per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor);
printf("Maximum number of warps per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor / 32);
printf("Maximum Grid size : (%d, %d, %d)\n", iProp.maxGridSize[0],
iProp.maxGridSize[1], iProp.maxGridSize[2]);
printf("Maximum block dimension : (%d, %d, %d)\n", iProp.maxThreadsDim[0],
iProp.maxThreadsDim[1], iProp.maxThreadsDim[2]);
}
int main(void)
{
query_device();
return 0;
}
|
24,100 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstring>
#include <stdio.h>
#include <string>
__global__ void checkPointer(const int* c, const size_t pitch, const size_t num, const size_t nrows, const size_t ncols);
__global__ void checkPointer(const int *c, const size_t pitch, const size_t num, const size_t nrows, const size_t ncols)
{
const int icol = blockIdx.x * blockDim.x + threadIdx.x;
const int irow = blockIdx.y * blockDim.y + threadIdx.y;
int idx = 0;
int local = 0;
if (icol < ncols && irow < nrows)
{
idx = irow * (pitch / sizeof(int)) + icol;
local = c[idx];
}
}
int main()
{
const size_t nrows = 5;
const size_t ncols = 15;
cudaError_t allocError;
cudaError_t cpyError;
cudaError_t status;
int BLOCKX = 16;
int BLOCKY = 16;
int* a = new int[nrows * ncols];
int b[5 * 15];
for (auto irow = 0; irow < nrows; irow++)
{
for (auto icol = 0; icol < ncols; icol++)
{
int idx = irow * ncols + icol;
a[idx] = idx;
b[idx] = idx;
}
}
status = cudaSetDevice(0);
int* a_h;
allocError = cudaMallocHost((void**)&a_h, nrows * ncols * sizeof(int));
memcpy(a_h, a, nrows * ncols * sizeof(int));
for (auto i = 0; i < nrows * ncols; i++)
{
printf("%d\n", a_h[i]);
}
int* a_d;
size_t pitch = 0;
allocError = cudaMallocPitch((void**)&a_d, &pitch, ncols * sizeof(int), nrows);
cpyError = cudaMemcpy2D((void*)a_d, pitch, (void*)a_h, ncols * sizeof(int), ncols * sizeof(int), nrows, cudaMemcpyHostToDevice);
dim3 block(BLOCKX, BLOCKY, 1);
dim3 grid((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y, 1);
checkPointer<<<block, grid>>>(a_d, pitch, nrows * ncols, nrows, ncols);
status = cudaGetLastError();
status = cudaDeviceSynchronize();
status = cudaDeviceReset();
cudaFreeHost(a_h);
cudaFree(a_d);
delete[] a;
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.