serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
2,201 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
#include<unistd.h>
__global__ void sumArraysOnGPUN(float *A,float *B,float *C,const int N){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx<N)
C[idx] = A[idx] + B[idx];
printf(" %f + %f = %f On GPU:block %d thread %d\n",A[idx],B[idx],C[idx],blockIdx.x,threadIdx.x);
}
void initialData(float *ip,const int size){
time_t t;
srand((unsigned int)time(&t));
for(int i=0;i<size;i++){
ip[i] = (float)(rand()%100)/1.0f;
}
}
void print(float *array,const int size){
for(int i=0;i<size;i++){
printf(" %f",array[i]);
}
printf("\n");
}
int main(){
int n;
scanf("%d",&n);
int nBytes = n*sizeof(float);
float *h_A,*h_B,*h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initialData(h_A,n);
sleep(1);
initialData(h_B,n);
print(h_A,n);
print(h_B,n);
float *d_A,*d_B,*d_C;
cudaMalloc((float **)&d_A,nBytes);
cudaMalloc((float **)&d_B,nBytes);
cudaMalloc((float **)&d_C,nBytes);
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,nBytes,cudaMemcpyHostToDevice);
dim3 block(1);
dim3 thread(n);
sumArraysOnGPUN<<<block,thread>>>(d_A,d_B,d_C,n);
cudaMemcpy(h_C,d_C,nBytes,cudaMemcpyDeviceToHost);
print(h_C,n);
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
2,202 | #include "includes.h"
__global__ void increment_kernel(int *g_data, int inc_value) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
} |
2,203 | /**
* Multiply 2 matrices using CUDA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <string.h>
typedef struct {
int width;
int height;
float* elements;
} Matrix;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN( value ) { \
cudaError_t err = value; \
if( err != cudaSuccess ) { \
fprintf( stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(err), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
#define BLOCK_SIZE (2u)
#define MATRIX_1_WIDTH (6u)
#define MATRIX_1_HEIGHT (4u)
#define MATRIX_2_WIDTH (4u)
#define MATRIX_2_HEIGHT (6u)
__global__ void matrix_fill( Matrix * data )
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if( i < data->width * data->height ) data->elements[i] = i + 1;
}
__global__ void matrix_mul( Matrix * matrix_1, Matrix * matrix_2, Matrix * matrix_result )
{
float value = 0;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = 0; i < matrix_1->width; ++i) {
value += matrix_1->elements[row * matrix_1->width + i] * matrix_2->elements[i * matrix_2->width + col];
}
matrix_result->elements[row * matrix_result->width + col] = value;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(int argc, char **argv) {
/* Allocate data buffer in host memory for result matrix */
Matrix *h_matrix_result = (Matrix*) malloc( sizeof(Matrix) );
h_matrix_result->width = MATRIX_2_WIDTH;
h_matrix_result->height = MATRIX_1_HEIGHT;
h_matrix_result->elements = (float*) malloc( MATRIX_2_WIDTH * MATRIX_1_HEIGHT * sizeof(float) );
memset( h_matrix_result->elements, 0, MATRIX_2_WIDTH * MATRIX_1_HEIGHT * sizeof(float) );
/* Allocate data buffer in device memory for 3 matrices */
Matrix *d_matrix_1, *d_matrix_2, *d_matrix_result = NULL;
float *d_matrix_1_elements, *d_matrix_2_elements, *d_matrix_result_elements;
int w, h;
CUDA_CHECK_RETURN( cudaMalloc( &d_matrix_1, sizeof(Matrix) ) );
// Cannot allocate in struct in device directly, allocate separately and than link
CUDA_CHECK_RETURN( cudaMalloc( &d_matrix_1_elements, MATRIX_1_WIDTH * MATRIX_1_HEIGHT * sizeof(float) ) );
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_1->elements), &d_matrix_1_elements, sizeof(float *), cudaMemcpyHostToDevice) );
w = MATRIX_1_WIDTH;
h = MATRIX_1_HEIGHT;
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_1->width), &w, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_1->height), &h, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN( cudaMalloc( &d_matrix_2, sizeof(Matrix) ) );
// Cannot allocate in struct in device directly, allocate separately and than link
CUDA_CHECK_RETURN( cudaMalloc( &d_matrix_2_elements, MATRIX_2_WIDTH * MATRIX_2_HEIGHT * sizeof(float) ) );
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_2->elements), &d_matrix_2_elements, sizeof(float *), cudaMemcpyHostToDevice) );
w = MATRIX_2_WIDTH;
h = MATRIX_2_HEIGHT;
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_2->width), &w, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_2->height), &h, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN( cudaMalloc( &d_matrix_result, sizeof(Matrix) ) );
// Cannot allocate in struct in device directly, allocate separately and than link
CUDA_CHECK_RETURN( cudaMalloc( &d_matrix_result_elements, MATRIX_2_WIDTH * MATRIX_1_HEIGHT * sizeof(float) ) );
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_result->elements), &d_matrix_result_elements, sizeof(float *), cudaMemcpyHostToDevice) );
w = MATRIX_2_WIDTH;
h = MATRIX_1_HEIGHT;
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_result->width), &w, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN( cudaMemcpy( &(d_matrix_result->height), &h, sizeof(int), cudaMemcpyHostToDevice ) );
/* Configure kernel */
int blockSize = BLOCK_SIZE;
int gridSizeMatrix1 = (MATRIX_1_WIDTH * MATRIX_1_HEIGHT + BLOCK_SIZE - 1) / BLOCK_SIZE;
int gridSizeMatrix2 = (MATRIX_2_WIDTH * MATRIX_2_HEIGHT + BLOCK_SIZE - 1) / BLOCK_SIZE;
/* Run kernels to fill 2 matrices */
matrix_fill<<< gridSizeMatrix1, blockSize >>>( d_matrix_1 );
matrix_fill<<< gridSizeMatrix2, blockSize >>>( d_matrix_2 );
/* Wait until the kernel finishes its work */
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
/* Configure kernel */
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(h_matrix_result->width/dimBlock.x, h_matrix_result->height/dimBlock.y);
/* Multiply 2 matrices */
matrix_mul<<< dimGrid, dimBlock >>>( d_matrix_1, d_matrix_2, d_matrix_result );
/* Wait until the kernel finishes its work */
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
/* Copy back to host and print result matrix */
CUDA_CHECK_RETURN( cudaMemcpy( h_matrix_result->elements, d_matrix_result_elements, MATRIX_2_WIDTH * MATRIX_1_HEIGHT * sizeof(float), cudaMemcpyDeviceToHost) );
for( int i = 0; i < h_matrix_result->width * h_matrix_result->height; ++i ) {
std::cout << h_matrix_result->elements[i] << " ";
if ((i+1) % (h_matrix_result->width) == 0) {
std::cout << std::endl;
}
}
CUDA_CHECK_RETURN( cudaFree(d_matrix_1_elements) );
CUDA_CHECK_RETURN( cudaFree(d_matrix_1) );
CUDA_CHECK_RETURN( cudaFree(d_matrix_2_elements) );
CUDA_CHECK_RETURN( cudaFree(d_matrix_2) );
CUDA_CHECK_RETURN( cudaFree(d_matrix_result_elements) );
CUDA_CHECK_RETURN( cudaFree(d_matrix_result) );
free( h_matrix_result->elements );
free( h_matrix_result );
return 0;
}
|
2,204 | #include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h> // CURAND lib header file
#define TRIALS_PER_THREAD 2048
#define BLOCKS 256
#define THREADS 256
#define PI 3.1415926535 // known value of pi
__global__ void pi_mc(float *estimate, curandState *states) {
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
int points_in_circle = 0;
float x, y;
// Initialize CURAND
curand_init(tid, 0, 0, &states[tid]);
for(int i = 0; i < TRIALS_PER_THREAD; i++) {
x = curand_uniform(&states[tid]);
y = curand_uniform(&states[tid]);
// count if x & y is in the circule.
points_in_circle += (x*x + y*y <= 1.0f);
}
estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD;
}
int main(int argc, char *argv[]) {
float host[BLOCKS * THREADS];
float *dev;
curandState *devStates;
cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float));
cudaMalloc( (void **)&devStates, BLOCKS*THREADS*sizeof(curandState) );
pi_mc<<<BLOCKS, THREADS>>>(dev, devStates);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float),
cudaMemcpyDeviceToHost);
float pi_gpu=0.0;
for(int i = 0; i < BLOCKS * THREADS; i++) pi_gpu += host[i];
pi_gpu /= (BLOCKS * THREADS);
printf("CUDA estimate of PI = %f [error of %f ]\n",
pi_gpu, pi_gpu - PI);
cudaFree(dev);
cudaFree(devStates);
return 0;
}
|
2,205 | #include "includes.h"
__global__ void Substep2Kernel (double *Dens, double *VradInt, double *VthetaInt, double *TemperInt, int nrad, int nsec, double *invdiffRmed, double *invdiffRsup, double *DensInt, int Adiabatic, double *Rmed, double dt, double *VradNew, double *VthetaNew, double *Energy, double *EnergyInt)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dv;
if (i<nrad && j<nsec){
dv = VradInt[(i+1)*nsec + j] - VradInt[i*nsec + j];
if (dv < 0.0)
DensInt[i*nsec + j] = CVNR*CVNR*Dens[i*nsec+j]*dv*dv;
else
DensInt[i*nsec + j] = 0.0;
dv = VthetaInt[i*nsec + (j+1)%nsec] - VthetaInt[i*nsec + j];
if (dv < 0.0)
TemperInt[i*nsec + j] = CVNR*CVNR*Dens[i*nsec+j]*dv*dv;
else
TemperInt[i*nsec + j] = 0.0;
}
} |
2,206 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void print_details_of_wraps()
{
int gid = (blockIdx.y * gridDim.x * blockDim.x) + (blockDim.x * blockIdx.x) + threadIdx.x;
int warp_id = threadIdx.x / 32;
int gbid = blockIdx.y * gridDim.x + blockIdx.x;
printf("tid: %d, bid.x : %d, bid.y : %d, gid: %d, warp_id : %d, gbid : %d \n", threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, gbid);
}
int main(int argc , char ** argv)
{
dim3 block(42);
dim3 grid(2, 2);
print_details_of_wraps<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
2,207 | //============================================================================
// Name : cudaProg.cpp
// Author : Pratil
// Version :
// Copyright : Your copyright notice
// Description : Hello World in C++, Ansi-style
//============================================================================
#include <iostream>
#include <stdio.h>
using namespace std;
__global__ void squareFunc(unsigned int *d_in, unsigned int *d_out)
{
int idx = threadIdx.x;
unsigned int val = d_in[idx];
d_out[idx] = val * val;
//printf("%d square value %d \n ", idx, d_out[idx]);
}
int main()
{
const unsigned int arr_len = 64;
const unsigned int arr_size = 64 * sizeof(unsigned int);
unsigned int arr_in[arr_len];
unsigned int arr_out[arr_len];
for (unsigned int i = 0; i < 64; i++)
{
arr_in[i] = i;
cout << i << " : " << arr_in[i] << endl;
}
unsigned int *d_in;
unsigned int *d_out;
cudaMalloc((void**) &d_in, arr_size);
cudaMalloc((void**) &d_out, arr_size);
cudaMemcpy(d_in, arr_in, arr_size, cudaMemcpyHostToDevice);
squareFunc<<<1,64>>>(d_in, d_out);
cudaMemcpy(arr_out, d_out, arr_size, cudaMemcpyDeviceToHost);
for (unsigned int i = 0; i < 64; i++)
{
cout << i <<" : " << arr_out[i] << endl;
}
cudaDeviceSynchronize();
cudaFree(d_out);
cudaFree(d_in);
return 0;
}
|
2,208 | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "RoeStep.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nbrOfGrids = 1;
double *d_u1 = NULL;
cudaMalloc(&d_u1, XSIZE*YSIZE);
double *d_u2 = NULL;
cudaMalloc(&d_u2, XSIZE*YSIZE);
double *d_u3 = NULL;
cudaMalloc(&d_u3, XSIZE*YSIZE);
const double *d_vol = NULL;
cudaMalloc(&d_vol, XSIZE*YSIZE);
double *d_f1 = NULL;
cudaMalloc(&d_f1, XSIZE*YSIZE);
double *d_f2 = NULL;
cudaMalloc(&d_f2, XSIZE*YSIZE);
double *d_f3 = NULL;
cudaMalloc(&d_f3, XSIZE*YSIZE);
const double *d_tau = NULL;
cudaMalloc(&d_tau, XSIZE*YSIZE);
const double *d_h = NULL;
cudaMalloc(&d_h, XSIZE*YSIZE);
const double *d_gama = NULL;
cudaMalloc(&d_gama, XSIZE*YSIZE);
double *w1 = NULL;
cudaMalloc(&w1, XSIZE*YSIZE);
double *w2 = NULL;
cudaMalloc(&w2, XSIZE*YSIZE);
double *w3 = NULL;
cudaMalloc(&w3, XSIZE*YSIZE);
double *w4 = NULL;
cudaMalloc(&w4, XSIZE*YSIZE);
double *fc1 = NULL;
cudaMalloc(&fc1, XSIZE*YSIZE);
double *fc2 = NULL;
cudaMalloc(&fc2, XSIZE*YSIZE);
double *fc3 = NULL;
cudaMalloc(&fc3, XSIZE*YSIZE);
double *fr1 = NULL;
cudaMalloc(&fr1, XSIZE*YSIZE);
double *fr2 = NULL;
cudaMalloc(&fr2, XSIZE*YSIZE);
double *fr3 = NULL;
cudaMalloc(&fr3, XSIZE*YSIZE);
double *fl1 = NULL;
cudaMalloc(&fl1, XSIZE*YSIZE);
double *fl2 = NULL;
cudaMalloc(&fl2, XSIZE*YSIZE);
double *fl3 = NULL;
cudaMalloc(&fl3, XSIZE*YSIZE);
double *fludif1 = NULL;
cudaMalloc(&fludif1, XSIZE*YSIZE);
double *fludif2 = NULL;
cudaMalloc(&fludif2, XSIZE*YSIZE);
double *fludif3 = NULL;
cudaMalloc(&fludif3, XSIZE*YSIZE);
double *rsumr = NULL;
cudaMalloc(&rsumr, XSIZE*YSIZE);
double *utilde = NULL;
cudaMalloc(&utilde, XSIZE*YSIZE);
double *htilde = NULL;
cudaMalloc(&htilde, XSIZE*YSIZE);
double *uvdif = NULL;
cudaMalloc(&uvdif, XSIZE*YSIZE);
double *absvt = NULL;
cudaMalloc(&absvt, XSIZE*YSIZE);
double *ssc = NULL;
cudaMalloc(&ssc, XSIZE*YSIZE);
double *vsc = NULL;
cudaMalloc(&vsc, XSIZE*YSIZE);
double *eiglam1 = NULL;
cudaMalloc(&eiglam1, XSIZE*YSIZE);
double *eiglam2 = NULL;
cudaMalloc(&eiglam2, XSIZE*YSIZE);
double *eiglam3 = NULL;
cudaMalloc(&eiglam3, XSIZE*YSIZE);
double *sgn1 = NULL;
cudaMalloc(&sgn1, XSIZE*YSIZE);
double *sgn2 = NULL;
cudaMalloc(&sgn2, XSIZE*YSIZE);
double *sgn3 = NULL;
cudaMalloc(&sgn3, XSIZE*YSIZE);
int *isb1 = NULL;
cudaMalloc(&isb1, XSIZE*YSIZE);
int *isb2 = NULL;
cudaMalloc(&isb2, XSIZE*YSIZE);
int *isb3 = NULL;
cudaMalloc(&isb3, XSIZE*YSIZE);
double *a1 = NULL;
cudaMalloc(&a1, XSIZE*YSIZE);
double *a2 = NULL;
cudaMalloc(&a2, XSIZE*YSIZE);
double *a3 = NULL;
cudaMalloc(&a3, XSIZE*YSIZE);
double *ac11 = NULL;
cudaMalloc(&ac11, XSIZE*YSIZE);
double *ac12 = NULL;
cudaMalloc(&ac12, XSIZE*YSIZE);
double *ac13 = NULL;
cudaMalloc(&ac13, XSIZE*YSIZE);
double *ac21 = NULL;
cudaMalloc(&ac21, XSIZE*YSIZE);
double *ac22 = NULL;
cudaMalloc(&ac22, XSIZE*YSIZE);
double *ac23 = NULL;
cudaMalloc(&ac23, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
RoeStep<<<gridBlock,threadBlock>>>(nbrOfGrids,d_u1,d_u2,d_u3,d_vol,d_f1,d_f2,d_f3,d_tau,d_h,d_gama,w1,w2,w3,w4,fc1,fc2,fc3,fr1,fr2,fr3,fl1,fl2,fl3,fludif1,fludif2,fludif3,rsumr,utilde,htilde,uvdif,absvt,ssc,vsc,eiglam1,eiglam2,eiglam3,sgn1,sgn2,sgn3,isb1,isb2,isb3,a1,a2,a3,ac11,ac12,ac13,ac21,ac22,ac23);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
RoeStep<<<gridBlock,threadBlock>>>(nbrOfGrids,d_u1,d_u2,d_u3,d_vol,d_f1,d_f2,d_f3,d_tau,d_h,d_gama,w1,w2,w3,w4,fc1,fc2,fc3,fr1,fr2,fr3,fl1,fl2,fl3,fludif1,fludif2,fludif3,rsumr,utilde,htilde,uvdif,absvt,ssc,vsc,eiglam1,eiglam2,eiglam3,sgn1,sgn2,sgn3,isb1,isb2,isb3,a1,a2,a3,ac11,ac12,ac13,ac21,ac22,ac23);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
RoeStep<<<gridBlock,threadBlock>>>(nbrOfGrids,d_u1,d_u2,d_u3,d_vol,d_f1,d_f2,d_f3,d_tau,d_h,d_gama,w1,w2,w3,w4,fc1,fc2,fc3,fr1,fr2,fr3,fl1,fl2,fl3,fludif1,fludif2,fludif3,rsumr,utilde,htilde,uvdif,absvt,ssc,vsc,eiglam1,eiglam2,eiglam3,sgn1,sgn2,sgn3,isb1,isb2,isb3,a1,a2,a3,ac11,ac12,ac13,ac21,ac22,ac23);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2,209 | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call){ \
const cudaError_t error = call; \
if (error != cudaSuccess){ \
printf("Error: %s: %d, ",__FILE__,__LINE__);\
printf("code:%d, reason: %s\n",error,cudaGetErrorString(error));\
exit(-10*error);\
}\
}
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void initialInt(int *ip, int size){
for (int i=0; i<size; i++){
ip[i] = i;
}
}
void printMatrix(int *C,const int nx,const int ny){
int *ic = C;
printf("\n Matrix: (%d.%d)\n",nx,ny);
for (int iy=0;iy<ny;iy++){
for (int ix=0;ix<nx;ix++){
printf("%3d",ic[ix]);
}
ic += nx;
printf("\n");
}
printf("\n");
}
__global__ void printThreadIndex(int *A,const int nx,const int ny){
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy*nx + ix;
printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) "
"global index %2d ival %2d \n",threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,
ix,iy,idx,A[idx]);
}
int main(int argc,char **argv){
printf("%s Starting...\n",argv[0]);
// get device information
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp,dev));
printf("Using dev %d: %s\n",dev,deviceProp.name);
CHECK(cudaSetDevice(dev));
// set mat dim
int nx = 8;
int ny = 6;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
// malloc host mem
int *h_A;
h_A = (int *)malloc(nBytes);
// initialize host mat with integer
initialInt(h_A,nxy);
printMatrix(h_A,nx,ny);
// malloc device mem
int *d_MatA;
cudaMalloc((void **) &d_MatA,nBytes);
// transfer data from host to dev
cudaMemcpy(d_MatA,h_A,nBytes,cudaMemcpyHostToDevice);
// set up execution config
dim3 block(4,2);
dim3 grid((nx+block.x-1)/block.x,(ny+block.y-1)/block.y);
// invoke the kernel
printThreadIndex <<< grid,block >>>(d_MatA,nx,ny);
cudaDeviceSynchronize();
// free mem on host & dev
cudaFree(d_MatA);
free(h_A);
// reset device
cudaDeviceReset();
return (0);
}
|
2,210 | #include "cuda.h"
/*---------------------------------------------------------------------
______ ______ _____ ______ _____
| ____|___ // ____| ____/ ____|
| |__ / /| (___ | |__ | | __
| __| / / \___ \| __|| | |_ |
| |____ / /__ ____) | |___| |__| |
|______/_____|_____/|______\_____|
GPU-enabled version using CUDA
Version 1.01
EZSEG: Routine to segment an image using a two-threshold
variable-connectivity region growing method utilizing
GPU acceleration through CUDA.
void ezseg_cuda(float *IMG, float *SEG, int nt, int np,
float thresh1, float thresh2, int nc, int* iters)
INPUT/OUTPUT:
IMG: Input image.
SEG:
ON INPUT:
Matrix of size (nt,np) which contain
1's where there is valid IMG data, and
non-zero values for areas with invalid/no IMG data.
ON OUTPUT:
Segmentation map (0:detection ,same as input o.w.).
nt,np: Dimensions of image.
thresh1: Seeding threshold value.
thresh2: Growing threshold value.
nc: # of consecutive pixels needed for connectivity.
iters:
ON INPUT:
maximum limit on number of iterations.
ON OUTPUT:
number of iterations performed.
----------------------------------------------------------------------
Copyright (c) 2015 Predictive Science Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
----------------------------------------------------------------------
*/
/*Define block size.*/
const int BLOCK_SIZE = 16;
/*** Kernel for ezseg iteration ***/
__global__ void ezseg_kernel(float *EUV, float *CHM, float *CHM_TMP, int nt, int np,
float thresh1, float thresh2, int nc, int *val_modded)
{
int fillit,ij,ii,jj;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
float local_vec[15],tmp_sum;
if((i>0 && i<nt-1) && (j>0 && j<np-1))
{
ij = nt*j+i; /*Location of i,j in 1D global arrays*/
fillit = 0;
if(CHM_TMP[ij] == 1 ){ /*Good data not marked yet*/
if(EUV[ij] <= thresh1){
fillit = 1;
}
else if(EUV[ij] <= thresh2){
local_vec[ 0] = CHM_TMP[nt*(j+1)+(i-1)];
local_vec[ 1] = CHM_TMP[nt*(j+1)+(i )];
local_vec[ 2] = CHM_TMP[nt*(j+1)+(i+1)];
local_vec[ 3] = CHM_TMP[nt*(j )+(i+1)];
local_vec[ 4] = CHM_TMP[nt*(j-1)+(i+1)];
local_vec[ 5] = CHM_TMP[nt*(j-1)+(i )];
local_vec[ 6] = CHM_TMP[nt*(j-1)+(i-1)];
local_vec[ 7] = CHM_TMP[nt*(j )+(i-1)];
local_vec[ 8] = local_vec[0];
local_vec[ 9] = local_vec[1];
local_vec[10] = local_vec[2];
local_vec[11] = local_vec[3];
local_vec[12] = local_vec[4];
local_vec[13] = local_vec[5];
local_vec[14] = local_vec[6];
for(ii=0;ii<8;ii++){
tmp_sum = 0.0f;
for(jj=0;jj<nc;jj++){
tmp_sum = tmp_sum + local_vec[ii+jj];
}
if(tmp_sum == 0){
fillit = 1;
break;
}
}
} /*euv<thresh2*/
if (fillit == 1) {
CHM[ij] = 0.0f;
if(*val_modded == 0) {
atomicAdd(val_modded, 1);
}
}
} /*good data no mark*/
} /*valid point*/
}
/*********************************************************************/
/*********************************************************************/
/*********************************************************************/
extern "C" void ezseg_cuda(float *EUV, float *CHM, int nt, int np,
float thresh1, float thresh2, int nc, int* iters )
{
int val_modded,max_iters,k;
/*GPU variables:*/
float *EUVgpu,*CHMgpu,*CHM_TMPgpu;
int *val_modded_gpu;
/*Allocate GPU arrays:*/
cudaMalloc((void **) &EUVgpu, sizeof(float)*nt*np);
cudaMalloc((void **) &CHMgpu, sizeof(float)*nt*np);
cudaMalloc((void **) &CHM_TMPgpu, sizeof(float)*nt*np);
cudaMalloc((void **) &val_modded_gpu, sizeof(int));
/*Copy euv and chm to GPU*/
cudaMemcpy(EUVgpu, EUV, sizeof(float)*nt*np, cudaMemcpyHostToDevice);
cudaMemcpy(CHMgpu, CHM, sizeof(float)*nt*np, cudaMemcpyHostToDevice);
cudaMemcpy(CHM_TMPgpu, CHMgpu, sizeof(float)*nt*np, cudaMemcpyDeviceToDevice);
/*Set up CUDA grid*/
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid((int)ceil((nt+0.0)/dimBlock.x),
(int)ceil((np+0.0)/dimBlock.y));
/*Start main loop*/
max_iters = *iters;
*iters = 0;
for(k=0;k<max_iters;k++){
/*Reset val_modded*/
val_modded = 0;
cudaMemcpy(val_modded_gpu, &val_modded, sizeof(int), cudaMemcpyHostToDevice);
/*Perform iteration:*/
ezseg_kernel<<<dimGrid,dimBlock>>>(EUVgpu,CHMgpu,CHM_TMPgpu,nt,np,thresh1,
thresh2,nc,val_modded_gpu);
*iters = *iters + 1;
/*Make sure everything is done*/
cudaDeviceSynchronize();
/*Get data mod flag*/
cudaMemcpy(&val_modded, val_modded_gpu, sizeof(int), cudaMemcpyDeviceToHost);
/*If no new CH points, break out of iterations*/
if(val_modded == 0){
break;
}
/*Reset tmp to be new map iterate for next iteration:*/
cudaMemcpy(CHM_TMPgpu,CHMgpu,sizeof(float)*nt*np,cudaMemcpyDeviceToDevice);
}
/*Copy result from GPU back to CPU*/
cudaMemcpy(CHM,CHMgpu,sizeof(float)*nt*np,cudaMemcpyDeviceToHost);
/*Free up GPU memory*/
cudaFree(EUVgpu);
cudaFree(CHMgpu);
cudaFree(CHM_TMPgpu);
cudaFree(val_modded_gpu);
}
|
2,211 | #include<stdio.h>
#include<stdlib.h>
int M,N;
double *A, *AT;
double *d_A, *d_AT;
__global__ void MT(double *A, double *AT, int m, int n){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < m){
for(int rows=0; rows<n; rows++){
AT[idx * n + rows] = A[idx + rows * m];
}
}
}
int main(int argc, char *argv[]){
M = atoi(argv[1]);
N = atoi(argv[2]);
A = (double *)malloc(M*N*sizeof(double));
AT = (double *)malloc(M*N*sizeof(double));
for(int i=0;i<M*N;i++) A[i] = i;
for(int i=0;i<M*N;i++) AT[i] = 0;
cudaMalloc((void**) &d_A, M*N*sizeof(double));
cudaMalloc((void**) &d_AT, M*N*sizeof(double));
cudaMemcpy(d_A, A, M*N*sizeof(double), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block(256);
dim3 grid((255+M)/256);
cudaEventRecord(start);
for(int i=0;i<5;i++) MT<<<grid,block>>>(d_A,d_AT,M,N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// this is the runtime for 100 spmvs in ms
printf("runtime [ms]: %f\n", milliseconds/ 5.0 );
cudaMemcpy(AT, d_AT, M*N*sizeof(double), cudaMemcpyDeviceToHost);
free(A);
free(AT);
cudaFree(d_A);
cudaFree(d_AT);
}
|
2,212 | #include "includes.h"
__global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows, const int m1_columns, const int m2_columns) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns;
int c = i % m2_columns;
float t_output = 0.f;
for (int k = 0; k < m1_columns; ++k) {
t_output += m1[r * m1_columns + k] * m2[k * m2_columns + c];
}
output[i] = t_output;
}
} |
2,213 | #include <fstream>
#include <sstream>
#include <vector>
#include "math.h"
#include <limits>
#include <float.h>
__device__
double calculateMin(double *data, int col, int rows, int columns) {
double calculatedMin = DBL_MAX;
for (int i = 0; i < rows * columns; i += columns) {
auto temp = data[i];
if (i == 0) {
calculatedMin = temp;
}
else {
if (temp < calculatedMin) {
calculatedMin = temp;
}
}
}
return calculatedMin;
}
__device__
double calculateMax(double *data, int col, int rows, int columns) {
double calculatedMax = DBL_MIN;
for (int i = 0; i < rows * columns; i += columns) {
auto temp = data[i];
if (temp > calculatedMax) {
calculatedMax = temp;
}
}
return calculatedMax;
}
__global__
void runMinMaxNormalization(double* data, int rows, int columns) {
double *mins = new double[columns];
double *maxs = new double[columns];
for(int col = 0; col < columns; col++) {
maxs[col] = calculateMax(data, col, rows, columns);
mins[col] = calculateMin(data, col, rows, columns);
}
int stride = blockDim.x * gridDim.x;
int columnsCounter = 0;
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < rows * columns; i += stride) {
if (columnsCounter + 1 == columns) {
columnsCounter = 0;
}
else {
double value = (data[i]-mins[columnsCounter])/(maxs[columnsCounter]-mins[columnsCounter]);
data[i] = value;
columnsCounter++;
}
}
}
__host__
int countWords(const std::string& text, char delimiter) {
std::stringstream stream(text);
std::string temp;
int counter = 0;
while(getline(stream, temp, delimiter)) {
counter++;
}
return counter;
}
__host__
void countRowsAndColumns(std::string filename, int* rows, int* columns) {
std::ifstream featuresFile(filename);
std::string line;
std::getline(featuresFile, line);
*columns = countWords(line, ',');
*rows = 1;
while (std::getline(featuresFile, line))
(*rows)++;
featuresFile.close();
}
__host__
void readFeaturesFromCsv(std::string filename, double* result, int rows, int columns) {
std::ifstream featuresFile(filename);
std::string line;
int i = 0;
while(std::getline(featuresFile, line))
{
std::stringstream ss(line);
double value;
while(ss >> value){
result[i] = value;
if(ss.peek() == ',') ss.ignore();
i++;
}
}
featuresFile.close();
}
__host__
void writeFeaturesToCsv(double *output, int rows, int columns){
std::ofstream file("data.csv");
int size = rows * columns;
int columnsCounter = 0;
for(int i = 0; i < size; ++i) {
double value = output[i];
file << value;
if (columnsCounter + 1 != columns) {
file << ",";
columnsCounter++;
} else {
file << "\n";
columnsCounter = 0;
}
}
file.close();
}
int main(int argc, char* argv[]) {
int rows, columns;
std::string fileName("./winequality-white.csv");
countRowsAndColumns(fileName, &rows, &columns);
double *input = new double[rows * columns];
readFeaturesFromCsv(fileName, input, rows, columns);
double *normalized;
cudaMalloc((void **) &normalized, rows * columns * sizeof(double));
cudaMemcpy(normalized, input, rows * columns * sizeof(double), cudaMemcpyHostToDevice);
cudaEvent_t startTime, stopTime;
cudaEventCreate(&startTime);
cudaEventCreate(&stopTime);
cudaEventRecord(startTime);
runMinMaxNormalization<<<1, 1>>>(normalized, rows, columns);
float resultTime;
cudaEventRecord(stopTime);
cudaEventSynchronize(stopTime);
cudaEventElapsedTime(&resultTime, startTime, stopTime);
printf("Algorithm took: %f (ms)\n", resultTime);
cudaMemcpy(input, normalized, rows * columns * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(normalized);
writeFeaturesToCsv(input, rows, columns);
} |
2,214 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
const int PARTITION_SIZE = 32;
#define AT(mtx, width, row, column) \
mtx[(row) * (width) + (column)]
inline double nowSec()
{
struct timeval t;
struct timezone tzp;
gettimeofday(&t, &tzp);
return t.tv_sec + t.tv_usec*1e-6;
}
__global__ void global_mmul (int *A, int *B, int *C, int N)
{
int i = N-1 - (blockIdx.y * blockDim.y + threadIdx.y);
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i_part = i % PARTITION_SIZE;
int j_part = j % PARTITION_SIZE;
int rowPerPart = N/PARTITION_SIZE;
__shared__ int Apart[PARTITION_SIZE][PARTITION_SIZE];
__shared__ int Bpart[PARTITION_SIZE][PARTITION_SIZE];
AT(C, N, i, j) = 0;
for (int n = 0; n < rowPerPart; n++)
{
Apart[i_part][j_part] = AT(A, N, i, n*PARTITION_SIZE + j_part);
Bpart[i_part][j_part] = AT(B, N, n*PARTITION_SIZE + i_part, j);
__syncthreads();
for (int k=0; k<PARTITION_SIZE; k++)
AT(C, N, i, j) += Apart[i_part][k]*Bpart[k][j_part];
}
}
#ifdef PRINT
void printMtx (int *m, int N)
{
for (int i=0; i<N*N; i++)
{
if (i>0 && i%N == 0)
fprintf(stderr, "\n");
fprintf(stderr, "%d\t",*m);
m++;
}
}
#endif
int main(int argc, char **argv)
{
if (argc != 2)
{
puts("Usage: Matrix_mult [N]\n");
return -1;
}
int N=atoi(argv[1]);
if (N % PARTITION_SIZE)
{
printf ("error: N must be a multiple of %d\n", PARTITION_SIZE);
return -1;
}
unsigned NN=N*N;
int Nblocks = N/PARTITION_SIZE;
int *A, *B, *C;
cudaMallocManaged(&A, NN*sizeof(int));
cudaMallocManaged(&B, NN*sizeof(int));
cudaMallocManaged(&C, NN*sizeof(int));
cudaDeviceSynchronize();
for (int i=0; i<N; i++)
{
for (int j=0; j<N; j++)
{
AT(A, N, i, j) = (i == j) ? 1 : 0;
AT(B, N, i, j) = i*N + j;
}
}
#ifdef PRINT
fprintf(stderr,"A=\n");
printMtx(A, N);
fprintf(stderr,"\n\nB=\n");
printMtx(B, N);
fprintf(stderr,"\n\n");
#endif
dim3 blockPerGrid(Nblocks,Nblocks);
dim3 threadPerBlock(PARTITION_SIZE,PARTITION_SIZE);
double t_begin = nowSec();
global_mmul <<< blockPerGrid, threadPerBlock >>> (A,B,C,N);
cudaDeviceSynchronize();
double t_end = nowSec();
#ifdef PRINT
fprintf(stderr,"\n\nC=\n");
printMtx(C, N);
fprintf(stderr,"\n");
#endif
printf("Elapsed time: %f sec\n", t_end - t_begin);
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
2,215 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void transpose(int *a, int *t){
int n = threadIdx.x, m=blockIdx.x, size = blockDim.x, size1 = gridDim.x;
t[n*size1+m] = a[m*size+n];
}
int main(){
int *a, *t, m, n;
int *d_a, *d_t;
printf("Enter the value of m: "); scanf("%d",&m);
printf("Enter the value of n: "); scanf("%d",&n);
int size = sizeof(int)*m*n;
a=(int*)malloc(size);
t=(int*)malloc(size);
printf("Enter input matrix: \n");
for(int i=0; i<m*n; i++)
scanf("%d",&a[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
transpose<<<m,n>>>(d_a,d_t);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Resultant matrix:\n");
for(int i=0; i<n; i++){
for(int j=0; j<m; j++){
printf("%d ",t[i*m+j]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_t);
return 0;
}
|
2,216 | #include "includes.h"
__global__ void conflictDetection (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m, int *detectConflict){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int myColour = degreeCount[i];
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
for (int j=start; j<stop; j++){
if (degreeCount[neighbourArray[j]-1] == myColour){
// detectConflict[i]=1;
// break;
if (i < neighbourArray[j]-1){
if (detectConflict[i]!=1){
detectConflict[i]=1;
}
}
else if (detectConflict[neighbourArray[j]-1]!=1){
detectConflict[neighbourArray[j]-1]=1;
}
// if (detectConflict[i]!=1){
// detectConflict[i]=1;
// }
//
// if (detectConflict[neighbourArray[j]-1]!=1){
// detectConflict[neighbourArray[j]-1]=1;
// }
}
}
} |
2,217 | #include "includes.h"
__global__ void floyd2DKernel(int * M, const int nverts, const int k){
int jj = blockIdx.x * blockDim.x + threadIdx.x; // indice filas
int ii = blockIdx.y * blockDim.y + threadIdx.y; // indice columnas
int tid = (ii * nverts) + jj;
int i = tid/nverts;
int j = tid - i * nverts;
//printf ("Fila %u, Columna %u => Thread id %d.\n", i, j, tid);
if(i < nverts && j < nverts){
if (i!=j && i!=k && j!=k) {
int ik = (i*nverts) + k;
int kj = (k*nverts) + j;
int ij = (i*nverts) + j;
int aux = M[ik]+M[kj];
int vikj = min(aux, M[ij]);
M[ij] = vikj;
}
}
} |
2,218 | #include "cuda_MP5.cuh"
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int cuda_MP5(int argc, char* argv[])
{
int num_elements = NUM_ELEMENTS;
int errorM = 0;
const unsigned int array_mem_size = sizeof(float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*)malloc(array_mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Read the input data array from the given file.
switch (argc - 1)
{
case 1: // One Argument
// errorM = ReadFile(h_data, argv[1]);
errorM = ReadFileData_MP5(h_data, argv[1]);
if (errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default:
// No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
for (unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000 * (rand() / (float)RAND_MAX));
}
break;
}
// compute reference solution
float reference = 0.0f;
computeGold_MP5(&reference, h_data, num_elements);
// **===-------- Modify the body of this function -----------===**
float result = computeOnDevice_MP5(h_data, num_elements);
// **===-----------------------------------------------------------===**
// We can use an epsilon of 0 since values are integral and in a range
// that can be exactly represented
float epsilon = 0.0f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
printf("Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf("device: %f host: %f\n", result, reference);
// cleanup memory
free(h_data);
return 0;
}
int ReadFileData_MP5(float* M, char* file_name)
{
unsigned int data_read = NUM_ELEMENTS;
// cutReadFilef(file_name, &(M->elements), &data_read, true);
ifstream iFile(file_name);
unsigned i = 0;
if (iFile) {
float data;
while (iFile >> data) {
M[i++] = data;
}
}
return (i != data_read);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set
//! Each element is the sum of the elements before it in the array.
//! @param reference reference data, computed but preallocated
//! @param idata input data as provided to device
//! @param len number of elements in reference / idata
////////////////////////////////////////////////////////////////////////////////
void computeGold_MP5(float* reference, float* idata, const unsigned int len)
{
reference[0] = 0;
double total_sum = 0;
unsigned int i;
for (i = 0; i < len; ++i)
{
total_sum += idata[i];
}
*reference = total_sum;
}
// **===----------------- Modify this function ---------------------===**
// Take h_data from host, copies it to device, setup grid and thread
// dimensions, excutes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
float computeOnDevice_MP5(float* h_data, int num_elements)
{
float* d_data;
cudaMalloc((void**)&d_data, sizeof(float)*num_elements);
cudaMemcpy(d_data, h_data, sizeof(float)*num_elements,
cudaMemcpyHostToDevice);
int num_block = ceil((float)num_elements / (2 * RD_BLOCK_SIZE));
float* d_block_sum;
cudaMalloc((void**)&d_block_sum, sizeof(float)*num_block);
// placeholder
dim3 dimGrid, dimBlock;
dimGrid.x = num_block;
dimGrid.y = dimGrid.z = 1;
dimBlock.x = RD_BLOCK_SIZE;
dimBlock.y = dimBlock.z = 1;
reduction_kernel_MP5 << <dimGrid, dimBlock >> > (d_data, d_block_sum, num_elements);
cudaDeviceSynchronize();
float* h_block_sum = (float*)malloc(sizeof(float)*num_block);
cudaMemcpy(h_block_sum, d_block_sum, sizeof(float)*num_block,
cudaMemcpyDeviceToHost);
cudaMemcpy(h_data, d_data, sizeof(float)*num_elements,
cudaMemcpyDeviceToHost);
cudaFree(d_block_sum);
cudaFree(d_data);
float sum = 0;
for (int i = 0; i < num_block; i++) {
sum += h_block_sum[i];
// cout << i << " " << h_block_sum[i] << endl;
// cout << (i + 1) * 128 - 1 << " " << h_data[(i + 1) * 128 - 1] << endl;
}
return sum;
// return h_data[num_elements-1];
}
// **===----------------- MP4.1 - Modify this function --------------------===**
//! @param g_idata input data in global memory
// result is expected in index 0 of g_idata
//! @param n input number of elements to scan from input data
// **===------------------------------------------------------------------===**
__global__ void reduction_kernel_MP5(float *g_data, float *blocksum, int n)
{
__shared__ float ds_data[2 * RD_BLOCK_SIZE];
unsigned int tx = threadIdx.x;
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if (2 * id + 1 < n) {
ds_data[2 * tx] = g_data[2 * id];
ds_data[2 * tx + 1] = g_data[2 * id + 1];
}
for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) {
__syncthreads();
unsigned int index = (threadIdx.x + 1) * 2 * stride - 1;
if (index < 2 * blockDim.x)
ds_data[index] += ds_data[index - stride];
}
__syncthreads();
if (threadIdx.x == 0)
blocksum[blockIdx.x] = ds_data[2 * blockDim.x - 1];
if (2 * id + 1 < n) {
g_data[2 * id] = ds_data[2 * tx];
g_data[2 * id + 1] = ds_data[2 * tx + 1];
}
} |
2,219 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float* var_26,float var_27,float var_28,float var_29) {
if (comp >= +1.2098E-44f - +1.3675E36f + var_2 * (var_3 + +1.8233E-42f)) {
if (comp == (var_4 / -1.6941E-36f / var_5)) {
if (comp <= (-0.0f / var_6 + expf(var_7 * var_8 - +1.1490E-44f))) {
float tmp_1 = +0.0f;
float tmp_2 = var_9 * var_10;
comp += tmp_2 / tmp_1 - (+1.6737E-37f / ceilf((var_11 / var_12 - (-1.9241E-43f + var_13 - (var_14 + var_15)))));
if (comp == fabsf(acosf(-1.0522E10f + (-0.0f + sinf((-1.3195E-20f / (var_16 * var_17))))))) {
float tmp_3 = (-1.7483E34f - (var_18 * (+1.9849E-35f / -0.0f - var_19 + var_20)));
comp = tmp_3 - (var_21 - cosf(-1.1098E36f));
}
if (comp <= (var_22 - (-1.6237E-16f - var_23 - (var_24 * -1.0733E28f)))) {
comp = (var_25 + -1.2367E34f - -1.3208E-42f + (-1.8013E34f - +1.4686E-43f));
}
for (int i=0; i < var_1; ++i) {
var_26[i] = +1.1237E-44f;
comp += var_26[i] - atanf((var_27 * -1.7977E12f));
float tmp_4 = +1.5908E-42f;
comp = tmp_4 + (-1.5652E-23f / var_28 - var_29);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float* tmp_27 = initPointer( atof(argv[27]) );
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30);
cudaDeviceSynchronize();
return 0;
}
|
2,220 | /*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <cuda_fp16.h>
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void leaky_relu_kernel(const T* src_gpu, T* dst_gpu, int n_in,
float slope) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_in <= idx)
return;
float val = src_gpu[idx];
dst_gpu[idx] = max(val, slope * val);
}
template <typename T>
void apply_leaky_relu(const T* src_gpu, T* dst_gpu, int n_in, float slope,
int batch_size, cudaStream_t stream) {
int block_size = 1024;
int grid_size = (int)std::ceil(1.0 * n_in * batch_size / block_size);
dim3 grid(grid_size, batch_size);
leaky_relu_kernel<T><<<grid, block_size, 0, stream>>>(
src_gpu, dst_gpu, n_in * batch_size, slope);
}
// explicit instantiation (without this, link error will happen)
template void apply_leaky_relu(const float*, float*, int, float, int,
cudaStream_t);
template void apply_leaky_relu(const __half*, __half*, int, float, int,
cudaStream_t);
}
}
|
2,221 | #include "includes.h"
//#define _SIZE_T_DEFINED
extern "C"
{
}
__global__ void ShuffleRGB(float* input, float* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if (id < size)
{
//int index = id / 3 + (id % 3) * (size / 3);
output[id / 3 + (id % 3) * (size / 3)] = input[id];
}
} |
2,222 |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
double CLOCK() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// Add GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
output[location] = x%255;
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
kernel<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
kernel<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
2,223 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
__global__ void block_scan(
unsigned long long *g_odata,
unsigned long long *g_idata,
unsigned long long n,
unsigned long long *block_sums){
__shared__ unsigned int temp[1024];
int tid = threadIdx.x;
int offset = 1;
int block_offset = blockIdx.x * 1024;
//Parallel Load into shared memory, each thread load 2 elements
if(block_offset + 2*tid<n){
temp[2*tid] = g_idata[block_offset + 2*tid];
}
else{
temp[2*tid] = 0;
}
if(block_offset + 2*tid+1<n){
temp[2*tid+1] = g_idata[block_offset + 2*tid+1];
}
else{
temp[2*tid+1]=0;
}
//Downward pass
for (int d = 1024>>1;d>0;d>>=1)
{
__syncthreads();
if(tid < d)
{
//Compute indices of 2 elements to be handled
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
//Zero the last element
if (tid == 0){
temp[1023]=0;
}
//Upward pass
for (int d=1;d<1024;d*=2)
{
offset >>= 1;
__syncthreads();
if(tid<d)
{
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
int swap = temp[ai];
temp[ai] = temp[bi];
temp[bi]+= swap;
}
}
__syncthreads();
if(block_offset + 2*tid<n){
g_odata[block_offset + 2*tid] = temp[2*tid];
}
if(block_offset + 2*tid+1<n){
g_odata[block_offset + 2*tid + 1] = temp[2*tid+1];
}
//Compute block sum
if(tid==0){
int bid = blockIdx.x;
if(1024*bid+1023<n){
block_sums[bid] = temp[1023]+g_idata[1024*bid+1023];
}
else{
block_sums[bid] = temp[1023];
}
}
__syncthreads();
}
__global__ void add_block_sums(
unsigned long long *A_gpu,
unsigned long long *a,
unsigned long long N,
unsigned long long *block_sums){
//Load block sums
int tid = threadIdx.x;
int bid = blockIdx.x;
int block_offset = blockIdx.x * 1024;
__shared__ long long blocksum;
blocksum = block_sums[bid];
if(block_offset + 2*tid<N){
A_gpu[block_offset + 2*tid] += blocksum;
}
if(block_offset + 2*tid+1<N){
A_gpu[block_offset + 2*tid+1] += blocksum;
}
__syncthreads();
}
bool scan(
unsigned long long *array_out,
unsigned long long *array_in,
unsigned long long N){
// printf("scan %llu\n",N);
//Allocate block sum
unsigned long long numOfBlocks;
if(N>N/1024*1024){numOfBlocks = N/1024+1;}
else{numOfBlocks = N/1024;}
unsigned long long *block_sums;
cudaMallocManaged(&block_sums,numOfBlocks*sizeof(unsigned long long));
//Pascal+ GPU prefetch
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(array_in,N*sizeof(unsigned long long),device, NULL);
cudaMemPrefetchAsync(array_out,N*sizeof(unsigned long long),device, NULL);
cudaMemPrefetchAsync(block_sums,numOfBlocks*sizeof(unsigned long long),device, NULL);
//Scan 1024 element blocks
block_scan<<<numOfBlocks,512>>>(array_out,array_in,N,block_sums);
cudaDeviceSynchronize();
// printf("%llu block scan completed!\n",numOfBlocks);
//Scan block sums
unsigned long long *block_sums_out;
cudaMallocManaged(&block_sums_out,numOfBlocks*sizeof(unsigned long long));
cudaMemPrefetchAsync(block_sums_out,numOfBlocks*sizeof(unsigned long long),device, NULL);
if(numOfBlocks>1){
scan(block_sums_out,block_sums,numOfBlocks);
}
//Add block sums
add_block_sums<<<numOfBlocks,512>>>(array_out,array_in,N,block_sums_out);
cudaDeviceSynchronize();
// printf("%llu block sum completed!\n",N);
cudaFree(block_sums);
cudaFree(block_sums_out);
return true;
}
int main(int argc, char **argv)
{
// 1) Take a positive integer N as an argument
unsigned long long N;
if(argc>1){
N = std::stoll(argv[1]);
}
else{
std::cerr << "Usage: ./homework4 N" << std::endl
<< "Testing with N=1000000" << std::endl;
N=1000000;
}
// 2) Create an input integer array a[N] of size N
unsigned long long *a;
cudaMallocManaged(&a,N*sizeof(unsigned long long));
// 3) Populate the array with random integers from he range [1,1000]
for(unsigned long long i=0;i<N;i++){
a[i]=rand()%1000+1;
}
// 4) Compute the scan output array A_cpu in sequential on the CPU
unsigned long long *A_cpu;
A_cpu = new unsigned long long[N];
A_cpu[0]=0;
for(unsigned long long i =1;i<N;i++){
A_cpu[i]=A_cpu[i-1]+a[i-1];
}
// 5) Compute the scan output array A_gpu on the GPU
unsigned long long *A_gpu;
cudaMallocManaged(&A_gpu,N*sizeof(unsigned long long));
scan(A_gpu,a,N);
// 6) Compare A_cpu and A_gpu
int error(0);
for(int i=0;i<N;i++){
if(A_cpu[i]!=A_gpu[i]){
std::cout << "Not equal at " << i << ": "<< A_cpu[i] << " " << A_gpu[i] << std::endl;
exit(0);
error++;
}
}
if(error){
printf("Scan Failed!\n");
}
else{
printf("Scan Successful!\n");
}
cudaFree(A_gpu);
cudaFree(a);
}
|
2,224 | #include "includes.h"
__global__ void d_count_kernel(unsigned int * d_pivots, int * r_buckets, int pivotsLength, unsigned int * r_indices, unsigned int * r_sublist, unsigned int * d_in, int itemCount) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < itemCount) {
unsigned int element = d_in[idx];
unsigned int index = pivotsLength/2 - 1;
unsigned int jump = pivotsLength/4;
int pivot = d_pivots[index];
while(jump >= 1) {
index = (element < pivot) ? (index - jump) : (index + jump);
pivot = d_pivots[index];
jump /= 2;
}
index = (element < pivot) ? index : index + 1;
r_sublist[idx] = index;
r_indices[idx] = atomicAdd(&r_buckets[index], 1);
}
} |
2,225 | #include <stdio.h>
#include <stdlib.h>
#define N 256
__global__ void kernel(int *a, int *b, int *c){
__shared__ int s[N];
__shared__ int r[N];
int t =threadIdx.x;
r[t]=b[t];
s[t]=a[t];
__syncthreads();
c[t]=s[t]+r[t];
}
int main (void)
{
//const int n=64;
int a[N],c[N],b[N],i;
for(i=0;i<N;i++)
{
a[i]=i;
b[i]=N-i-1;
c[i]=0;
}
int *a_d,*b_d, *c_d;
cudaMalloc(&a_d, N * sizeof(int));
cudaMalloc(&b_d, N * sizeof(int));
cudaMalloc(&c_d, N * sizeof(int));
cudaMemcpy(a_d, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, N*sizeof(int), cudaMemcpyHostToDevice);
kernel<<<1,N>>>(a_d, b_d,c_d);
cudaMemcpy(c, c_d, N*sizeof(int), cudaMemcpyDeviceToHost);
for (i = 1; i < N; i++)
if (c[i] != c[i-1])
printf("Verificar- Hay un error");
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
|
2,226 | #include <stdio.h>
#include <stdlib.h>
int main(void) {
int num_elements = 16;
int num_bytes = num_elements * sizeof(int);
int *device_array = 0;
int *host_array = 0;
// malloc host memory
host_array = (int *)malloc(num_bytes);
// cudaMalloc device memory
cudaMalloc((void **)&device_array, num_bytes);
// zero out the device array with cudaMemset
cudaMemset(device_array, 0, num_bytes);
// copy the contents of the device array to the host
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print out the result element by element
for (int i = 0; i < num_elements; ++i) printf("%d ", host_array[i]);
printf("\n");
// use free to deallocate the host array
free(host_array);
// use cudaFree to deallocate the device array
cudaFree(device_array);
return 0;
}
|
2,227 | /*
#include <omp.h>
#include <stdio.h>
main(int argc, char *argv[]) {
int nthreads, tid;
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
}
}
*/ |
2,228 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
//Thread block size
#define BLOCK_SIZE 3
#define WA 10
// Matrix A width
#define HA 10
// Matrix A height
#define WB 10
// Matrix B width
#define HB WA
// Matrix B height
#define WC WB
// Matrix C width
#define HC HA
// Matrix C height
//Allocates a matrix with random float entries.
void randomInit(float * data ,int size)
{
for(int i = 0; i < size; ++i)
//data[i] = rand() / (float) RAND_MAX;
data[i] = i;
}
// CUDA Kernel
__global__ void matrixMul(float* C,float* A,float* B,int hA, int wA,int wB)
{
// 2D Thread ID
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
// value stores the element that is computed by the thread
float value = 0;
if(row < hA && col < wB)
{
for(int i = 0; i < wA; ++i)
{
float elementA = A[row * wA + i];
float elementB = B[wA * i + col];
value += elementA * elementB;
}
// Write the matrix to device memory each
// thread writes one element
C[row * wA + col] = value;
}
}
// Program main
int main(int argc ,char** argv)
{
// set seed for rand()
srand(2006);
// 1. allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A =sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B =sizeof(float) * size_B;
float * h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. print out A and B
printf("\n\nMatrix A\n");
for(int i = 0; i < size_A; i++)
{
printf("%6.0f ", h_A[i]);
if(((i + 1) % WA) == 0)
printf("\n");
}
printf("\n\nMatrix B\n");
for(int i = 0; i < size_B; i++)
{
printf("%6.0f ", h_B[i]);
if(((i + 1) % WB) == 0)
printf("\n");
}
// 4. allocate host memory for the result C
unsigned int size_C = WC * HC;
unsigned int mem_size_C =sizeof(float) * size_C;
float * h_C = (float *) malloc(mem_size_C);
// 8. allocate device memory
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
//9. copy host memory to device
cudaMemcpy(d_A, h_A,mem_size_A ,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B,mem_size_B ,cudaMemcpyHostToDevice);
// 10. allocate device memory for the result
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// 5. perform the calculation
// setup execution parameters
dim3 threads(BLOCK_SIZE , BLOCK_SIZE);
dim3 grid((int)ceil((float)WC / threads.x), (int)ceil((float)HC / threads.y));
// execute the kernel
matrixMul<<< grid , threads >>>(d_C, d_A,d_B, HA, WA, WB);
// 11. copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C ,cudaMemcpyDeviceToHost);
// 6. print out the results
printf("\n\n Matrix C ( Results ) \n");
for(int i = 0;i<size_C; i ++){
printf("%6.0f ",h_C[i]);
if(((i+ 1) % WC) == 0)
printf("\n");
}
printf("\n");
// 7.clean up memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
|
2,229 | #include <fstream>
#include <iostream>
#include <chrono>
# include <mutex>
#include <stdio.h>
#include <string.h>
using namespace std;
# define NO_OF_CHARS 256
char* getFileContents(const char*);
// preprocessing function
__device__ void badCharHeuristic(char* str, int size,
int badchar[NO_OF_CHARS]) {
int i;
// Initialize all occurrences as -1
for (i = 0; i < NO_OF_CHARS; i++)
badchar[i] = -1;
// Fill the actual value of last occurrence
for (i = 0; i < size; i++)
badchar[(int)str[i]] = i;
}
//implementation Boyer Moore Algorithm
__global__ void search(const char* txt, char* pat, int chunk_size, int pat_size, int * t, int threads_per_block) {
int offset = blockIdx.x* threads_per_block* chunk_size;
int i = threadIdx.x;
int start = offset+(i * chunk_size);
int m = pat_size;
int n = chunk_size;
int badchar[NO_OF_CHARS];
badCharHeuristic(pat, m, badchar);
int s = 0; // s = shift of pattern
while (s <= (n - m)) {
int j = m - 1;
while (j >= 0 && pat[j] == txt[s + j + start]) {
j--;
}
if (j < 0) {
//cout << "found pattern at = " << s << endl;
atomicAdd(t, 1);
s += (s + m < n) ? m - badchar[txt[s + m + start]] : 1;
}
else {
int a = j - badchar[txt[s + j + start]];
if (a < 1) {
s += 1;
}
else {
s += a;
}
}
}
}
/* Driver code */
int main(int argc, char* argv[]) {
/*
input format: <file to search> <search pattern> <num thread blocks> <num threads>
*/
if (argc != 5) {
cout << "input format: <file to search> <search pattern> <num threads> <num thread blocks>";
return -1;
}
char* file = argv[1];
char* pat_ = argv[2];
int pat_len = strlen(pat_);
char* pat;
cudaMallocManaged(&pat, pat_len);
//copy input pattern from normal memory to shared CPU/GPU memory
for (int i = 0; i < pat_len; i++) {
pat[i] = pat_[i];
}
//shared count variable - needed by cpu and gpu
int* total;
cudaMallocManaged(&total, sizeof(int));
//should check all these to prevent errors but naa. my project my rules
char * contents = getFileContents(file);
int num_threads = atoi(argv[4]);
int num_blocks = atoi(argv[3]);
int partitionLength = strlen(contents) / num_threads;
auto start = chrono::high_resolution_clock::now();
int threads_per_block = num_threads / num_blocks;
//make num blocks * threads per block gpu threads
search <<<num_blocks, threads_per_block >>> (contents, pat, partitionLength, pat_len,total, threads_per_block );
//wait for GPU operations to terminate
cudaDeviceSynchronize();
auto stop = chrono::high_resolution_clock::now();
auto duration = chrono::duration_cast<chrono::microseconds>(stop - start);
cout << duration.count() << endl;
//free shared variables
cudaFree(contents);
cudaFree(pat);
cudaFree(total);
return 0;
}
char* getFileContents(const char* filename) {
ifstream in(filename, ios::in | ios::binary);
if (in) {
in.seekg(0, ios::end);
int len = in.tellg();
char* contents;
cudaMallocManaged(&contents, len * sizeof(char));
in.seekg(0, ios::beg);
in.read(&contents[0], len);
in.close();
return(contents);
}
throw(errno);
} |
2,230 | #include "includes.h"
__global__ void kernel2( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
a[idx] = (blockIdx.x + blockIdx.y);
} |
2,231 | #include<stdio.h>
#include<cuda_runtime.h>
int main(void){
int deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
cudaGetDeviceProperties(&deviceProp,0);
printf("There are %d gpu devices\n",deviceCount);
printf("Device %s has %f GB of global memory\n",
deviceProp.name,
deviceProp.totalGlobalMem/pow(1024.0,3)
);
}
|
2,232 | #include "includes.h"
__global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < in1ScalarCount; tid += stride) {
out[tid] = in1[tid] * in2[tid % in2ScalarCount];
}
} |
2,233 | __global__ void copyToOpenMM( float *target, float *source, int N ) {
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
int atom = elementNum / 3;
if( elementNum > N ) {
return;
}
//else target[elementNum] = source[elementNum];
else {
target[4 * atom + elementNum % 3] = source[elementNum];
}
}
|
2,234 | //%%cu
#include <iostream>
#include <cuda.h>
#include<bits/stdc++.h>
using namespace std;
float device_time_taken;
struct edgepairs{
int x;
int y;
int wt;
};
bool compareTwoEdgePairs(edgepairs a, edgepairs b)
{
if (a.x != b.x)
return a.x < b.x;
if (a.y != b.y)
return a.y < b.y;
return true;
}
struct Graph{
int nodes ;
int edges;
int *OA ;
int *CA ;
int *weight ;
Graph(int n,int e){
nodes = n ;
edges = e ;
OA = new int[nodes +1];
CA = new int[2 * edges +1];
weight = new int[2 * edges];
}
};
__device__ void print(float* bcW,int node_count)
{
for(int i=0;i<node_count;i++)
{
printf("%f ",bcW[i]) ;
}
printf("\n");
}
__global__ void cal_delta(Graph *graph , int *delta)
{
int idx = threadIdx.x ;
int min_wt = INT_MAX ;
for(int i = graph->OA[idx] ; i < graph->OA[idx+1] ; i++)
{
min_wt = min(min_wt,graph->weight[i]);
}
delta[idx] = min_wt ;
}
__global__ void kernel(Graph *graph , float *BC , int *delta_node , int node_count)
{
int idx = threadIdx.x;
extern __shared__ int array[];
int *U = (int*)array ;
int *F = (int*)&U[node_count] ;
int *d = (int*)&F[node_count] ;
int *sigma = (int*)&d[node_count] ;
float *dependency = (float*)&sigma[ node_count ];
int *lock = (int*)&dependency[node_count] ;
int *ends = (int*)&lock[node_count] ;
int *S = (int*)&ends[node_count] ;
int v = threadIdx.x;
__shared__ int s;
__shared__ int ends_len ;
__shared__ int s_len ;
__shared__ int delta ;
s = blockIdx.x ; // Source vertex
__syncthreads();
// Initialisation
if(v == s)
{
d[v] = 0 ;
sigma[v] = 1 ;
U[v] = 0 ;
F[v] = 1 ;
S[0] = s ;
s_len = 1 ;
ends[0] = 0 ;
ends[1] = 1 ;
ends_len = 2 ;
}
else
{
U[v] = 1 ;
F[v] = 0 ;
d[v] = INT_MAX ;
sigma[v] = 0 ;
}
dependency[v] = 0.0;
lock[v] = 0 ;
delta = 0 ;
__syncthreads();
// shortest path algorithm
while(delta < INT_MAX)
{
__syncthreads();
if(F[v] == 1)
{
for(int r = graph->OA[v]; r < graph->OA[v + 1]; r++)
{
int w = graph->CA[r];
int wt_vw = graph->weight[r] ;
bool needlock = true ;
while(needlock)
{
if(atomicCAS(&lock[w],0,1) == 0)
{
if( U[w]== 1 && d[v] + wt_vw < d[w])
{
d[w] = d[v] + wt_vw ;
sigma[w] = 0 ;
}
if ( d[w] == d[v] + wt_vw )
{
sigma[w] = sigma[w] + sigma[v] ;
}
atomicExch(&lock[w] , 0 );
needlock = false ;
}
}
}
}
if(idx == 0)
{
atomicExch(&delta,INT_MAX);
}
__syncthreads() ;
if( U[v] == 1 && d[v] < INT_MAX )
{
atomicMin( &delta , d[v] + delta_node[v] ) ;
}
__shared__ int count ;
if(idx == 0)
{
atomicExch(&count,0);
}
F[v] = 0 ;
__syncthreads() ;
if(U[v] == 1 && d[v] < delta)
{
U[v] = 0;
F[v] = 1;
int t = atomicAdd(&s_len,1);
S[t] = v ;
atomicAdd(&count,1);
}
__syncthreads();
if(idx == 0 )
{
if(count > 0)
{
ends[ends_len] = ends[ends_len - 1] + count ;
ends_len = ends_len + 1 ;
}
}
__syncthreads();
}
__shared__ int depth ;
__shared__ int start ;
__shared__ int end ;
if(idx == 0)
{
depth = ends_len - 1 ;
}
__syncthreads();
while (depth > 0)
{
__syncthreads();
if(idx==0)
{
start = ends[depth - 1 ] ;
end = ends[depth] - 1 ;
}
__syncthreads();
if ( idx >= 0 && idx <= (end-start))
{
int w = S[start+idx] ;
for(int r = graph->OA[w] ; r < graph->OA[w+1] ; r++)
{
int u = graph->CA[r] ;
int wt_wu = graph->weight[r] ;
if ( d[u] == d[w] + wt_wu )
{
if (sigma[u] != 0)
{
atomicAdd(dependency + w, (sigma[w] * 1.0 / sigma[u]) * (1 + dependency[u]));
}
}
}
if(w!=s)
{
atomicAdd(&BC[w],dependency[w]/2) ;
}
}
if(idx == 0)
{
depth--;
}
__syncthreads() ;
}
}
float *fun(Graph* h_graph)
{
int node_count = h_graph->nodes;
int edge_count = h_graph->edges;
Graph *d_graph; //DEVICE Graph
cudaMalloc((void **)&d_graph, sizeof(Graph));
//Copying graph from host to device but pointers have to be updated separately
//because pointers will contain address of host memory
cudaMemcpy(d_graph, h_graph, sizeof(Graph), cudaMemcpyHostToDevice);
// Updating the address of OA(offset array) on the device graph
int *d_OA;
cudaMalloc((void **)&d_OA, sizeof(int) * (node_count + 1));
cudaMemcpy(d_OA , h_graph->OA, sizeof(int) * (node_count + 1), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_graph->OA), &d_OA, sizeof(int *), cudaMemcpyHostToDevice);
// Updating the address of CA(Coordinates array) on the device graph
int *d_CA;
cudaMalloc((void **)&d_CA, sizeof(int) * (2 * edge_count + 1));
cudaMemcpy(d_CA, h_graph->CA, sizeof(int) * (2 * edge_count + 1), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_graph->CA), &d_CA, sizeof(int *), cudaMemcpyHostToDevice);
// Updating the address of weight array on the device graph
int *d_wt;
cudaMalloc((void **)&d_wt, sizeof(int) * (2 * edge_count));
cudaMemcpy(d_wt, h_graph->weight, sizeof(int) * (2 * edge_count), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_graph->weight), &d_wt, sizeof(int *), cudaMemcpyHostToDevice);
float *bwCentrality = new float[node_count]();
float *device_bwCentrality;
cudaMalloc((void **)&device_bwCentrality, sizeof(float) * node_count);
cudaMemcpy(device_bwCentrality, bwCentrality, sizeof(float) * node_count, cudaMemcpyHostToDevice);
//TIMER
cudaEvent_t device_start, device_end;
cudaEventCreate(&device_start);
cudaEventCreate(&device_end);
cudaEventRecord(device_start);
int *delta_v ;
cudaMalloc((void **)&delta_v, sizeof(int) * node_count);
cal_delta<<<1,node_count>>>(d_graph,delta_v);
cudaDeviceSynchronize();
kernel<<< node_count,node_count, 7*node_count*sizeof(int)+node_count*sizeof(float) >>> (d_graph ,device_bwCentrality ,delta_v , h_graph->nodes) ;
cudaDeviceSynchronize();
cudaMemcpy(bwCentrality,device_bwCentrality, sizeof(float) * node_count, cudaMemcpyDeviceToHost);
cudaEventRecord(device_end);
cudaEventSynchronize(device_end);
cudaEventElapsedTime(&device_time_taken, device_start, device_end);
cudaFree(device_bwCentrality);
cudaFree(delta_v);
return bwCentrality;
}
int main(int argc, char *argv[])
{
int m,n;
int num1,num2;
FILE *filePointer;
char *filename = argv[1];
//const char *filename = "ip1000.txt";
filePointer = fopen( filename , "r") ;
//checking if file ptr is NULL
if ( filePointer == NULL )
{
printf( "input.txt file failed to open." ) ;
return 0;
}
fscanf(filePointer, "%d", &n ); //scaning the number of vertices
fscanf(filePointer, "%d", &m ); //scaning the number of edges
Graph *graph = new Graph(n,m); //HOST GRAPH
vector <edgepairs> COO(2*m);
int it=0;
int wt ;
for(int i=0 ; i<m ; i++ ) //scanning the edges
{
fscanf(filePointer, "%d", &num1) ;
fscanf(filePointer, "%d", &num2) ;
fscanf(filePointer, "%d", &wt) ;
COO[it].x = num1 ;
COO[it].y = num2 ;
COO[it].wt = wt ;
it++;
COO[it].x = num2 ;
COO[it].y = num1 ;
COO[it].wt = wt ;
it++;
}
// COO done...
// sort the COO
sort(COO.begin(),COO.end(),compareTwoEdgePairs);
for(int i=0;i<n+1;i++)
{
graph->OA[i] = 0;
}
graph->OA[0]=0;
//initialize the Coordinates Array
for(int i=0;i<2*m;i++)
{
graph->CA[i] = COO[i].y ;
graph->weight[i] = COO[i].wt ;
}
//initialize the Offsets Array
for(int i=0;i<2*m;i++)
{
graph->OA[COO[i].x + 1]++; //store the frequency..
}
for(int i=0;i<n;i++)
{
graph->OA[i+1] += graph->OA[i]; // do cumulative sum..
}
float *bwC = fun(graph);
float maxBetweenness = -1;
vector<int>indices;
cout<<"Betweeness Centrality of all the nodes(vertices)\n";
for (int i = 0; i < n; i++)
{
maxBetweenness = max(maxBetweenness, bwC[i]);
cout<<"Node "<<i<<" : "<< bwC[i]<<endl;
}
for (int i = 0; i < n; i++)
{
if(maxBetweenness == bwC[i])
indices.push_back(i);
}
cout << endl;
cout<<"\nMaximum Betweenness Centrality = " << maxBetweenness<<endl;
cout<<"Vertices with Maximum Betweenness Centrality: [";
for(int i=0;i<indices.size();i++)
{
if(i != indices.size()-1)
cout<<indices[i]<<" , ";
else
cout<<indices[i] ;
}
cout<<"]"<<endl;
cout<<"Total device time taken : ";
cout<<device_time_taken<<endl;
delete[] bwC;
delete graph ;
}
|
2,235 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void transpose(int *a, int *t) {
int n = threadIdx.x, m = blockIdx.x, size = blockDim.x, size1 = gridDim.x;
t[n*size1 + m] = a[m*size+n];
}
int main (void) {
int *a, *t, m, n;
int *d_a, *d_t;
printf("Enter the value of m: \n");
scanf("%d", &m);
printf("Enter the value of n: \n");
scanf("%d", &n);
int size = sizeof(int)*m*n;
a = (int*)malloc(m*n*sizeof(int));
t = (int*)malloc(m*n*sizeof(int));
printf("Enter the matrix: \n");
for (int i = 0; i < m*n; ++i)
{
scanf("%d", &a[i]);
}
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_t, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
transpose<<<m,n>>>(d_a, d_t);
cudaMemcpy(t, d_t, size, cudaMemcpyDeviceToHost);
printf("Result vectors is: \n");
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < m; ++j)
{
printf("%d\t", t[i*m+j]);
}
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
}
|
2,236 | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
/* change dimension size as needed */
const int dimension = 512 ;
const int blocksize = 32;
const int K = 1;
struct timeval tv;
__global__ void gpuMM(float *A, float *B, float *C, int N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.f;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
/*float timestamp()
{
float t;
gettimeofday(&tv, NULL);
t = tv.tv_sec + (tv.tv_usec/1000000.0);
return t;
}
*/
int main(int argc, char *argv[])
{
cudaEvent_t start1, stop1;
float time;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
int i, j;
float *A, *B, *C;// start, end;
float *Ad, *Bd, *Cd;
A = (float*)malloc(dimension*dimension*sizeof(float));
B = (float*)malloc(dimension*dimension*sizeof(float));
C = (float*)malloc(dimension*dimension*sizeof(float));
srand(292);
for(i = 0; i < dimension; i++)
for(j = 0; j < dimension; j++)
{
A[dimension*i+j] = (rand()/(RAND_MAX + 1.0));
B[dimension*i+j] = (rand()/(RAND_MAX + 1.0));
C[dimension*i+j] = 0.0;
}
cudaMalloc( (void**)&Ad, dimension*dimension*sizeof(float) );
cudaMemcpy( Ad, A, dimension*dimension*sizeof(float), cudaMemcpyHostToDevice );
cudaMalloc( (void**)&Bd, dimension*dimension*sizeof(float) );
cudaMemcpy( Bd, B, dimension*dimension*sizeof(float), cudaMemcpyHostToDevice );
cudaMalloc( (void**)&Cd, dimension*dimension*sizeof(float) );
dim3 threadBlock(blocksize,blocksize);
dim3 grid(K,K);
//start = timestamp();
cudaEventRecord( start1, 0 );
gpuMM<<<grid,threadBlock>>>( Ad,Bd,Cd,dimension);
//end = timestamp();
cudaEventRecord( stop1, 0 );
cudaEventSynchronize( stop1 );
cudaEventElapsedTime( &time, start1, stop1 );
printf("\nsecs:%f\n", time);
cudaEventDestroy( start1 );
cudaEventDestroy( stop1 );
cudaMemcpy(C,Cd,dimension*dimension*sizeof(float),cudaMemcpyDeviceToHost);
//printf("\nsecs:%f\n", end-start);
free(A);
free(B);
free(C);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
return 0;
}
|
2,237 | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define MAX_NUM_BLOCKS_FLOAT 70
//#include "global_sync.cu"
#define WARP_SIZE_FLOAT 32
#define NUM_THREADS_FLOAT 512
#define NUM_WARPS_FLOAT (NUM_THREADS_FLOAT / WARP_SIZE_FLOAT)
#define LOG_NUM_THREADS_FLOAT 9
#define LOG_NUM_WARPS_FLOAT (LOG_NUM_THREADS_FLOAT - 5)
#define SCAN_STRIDE_FLOAT (WARP_SIZE_FLOAT + WARP_SIZE_FLOAT / 2 + 1)
__device__ volatile int inQueueSizeFloat[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int *inQueuePtr1Float[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int inQueueHeadFloat[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int outQueueMaxSizeFloat[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int outQueueHeadFloat[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int *outQueuePtr2Float[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int *curInQueueFloat[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int *curOutQueueFloat[MAX_NUM_BLOCKS_FLOAT];
__device__ volatile int execution_code_float;
// This variables are used for debugging purposes only
__device__ volatile int totalInserts_float[MAX_NUM_BLOCKS_FLOAT];
// Utils...
// http://www.moderngpu.com/intro/scan.html
__device__ void scan_float(const int* values, int* exclusive) {
// Reserve a half warp of extra space plus one per warp in the block.
// This is exactly enough space to avoid comparisons in the multiscan
// and to avoid bank conflicts.
__shared__ volatile int scan[NUM_WARPS_FLOAT * SCAN_STRIDE_FLOAT];
int tid = threadIdx.x;
int warp = tid / WARP_SIZE_FLOAT;
int lane = (WARP_SIZE_FLOAT - 1) & tid;
volatile int* s = scan + SCAN_STRIDE_FLOAT * warp + lane + WARP_SIZE_FLOAT / 2;
s[-16] = 0;
// Read from global memory.
int x = values[tid];
s[0] = x;
// Run inclusive scan on each warp's data.
int sum = x;
#pragma unroll
for(int i = 0; i < 5; ++i) {
int offset = 1<< i;
sum += s[-offset];
s[0] = sum;
}
// Synchronize to make all the totals available to the reduction code.
__syncthreads();
__shared__ volatile int totals[NUM_WARPS_FLOAT + NUM_WARPS_FLOAT / 2];
if(tid < NUM_WARPS_FLOAT) {
// Grab the block total for the tid'th block. This is the last element
// in the block's scanned sequence. This operation avoids bank
// conflicts.
int total = scan[SCAN_STRIDE_FLOAT * tid + WARP_SIZE_FLOAT / 2 + WARP_SIZE_FLOAT - 1];
totals[tid] = 0;
volatile int* s2 = totals + NUM_WARPS_FLOAT / 2 + tid;
int totalsSum = total;
s2[0] = total;
#pragma unroll
for(int i = 0; i < LOG_NUM_WARPS_FLOAT; ++i) {
int offset = 1<< i;
totalsSum += s2[-offset];
s2[0] = totalsSum;
}
// Subtract total from totalsSum for an exclusive scan.
totals[tid] = totalsSum - total;
}
// Synchronize to make the block scan available to all warps.
__syncthreads();
// Add the block scan to the inclusive sum for the block.
sum += totals[warp];
// Write the inclusive and exclusive scans to global memory.
// inclusive[tid] = sum;
exclusive[tid] = sum - x;
}
__device__ int queueElementFloat(int *outQueueCurPtr, int *elements){
int queue_index = atomicAdd((int*)&outQueueHeadFloat[blockIdx.x], 1);
if(queue_index < outQueueMaxSizeFloat[blockIdx.x]){
curOutQueueFloat[blockIdx.x][queue_index] = elements[0];
}else{
queue_index = -1;
}
return queue_index;
}
// Assuming that all threads in a block are calling this function
__device__ int queueElementFloat(int *elements){
int queue_index = -1;
#ifdef PREFIX_SUM
__shared__ int writeAddr[NUM_THREADS_FLOAT];
__shared__ int exclusiveScan[NUM_THREADS_FLOAT];
__shared__ int global_queue_index;
if(threadIdx.x == 0){
global_queue_index = outQueueHeadFloat[blockIdx.x];
}
// set to the number of values this threard is writing
writeAddr[threadIdx.x] = elements[0];
// run a prefix-sum on threads inserting data to the queue
scan_float(writeAddr, exclusiveScan);
// calculate index into the queue where given thread is writing
queue_index = global_queue_index+exclusiveScan[threadIdx.x];
// write elemets sequentially to shared memory
// int localIndex = exclusiveScan[threadIdx.x];
// for(int i = 0; i < elements[0]; i++){
// localElements[localIndex+i] = elements[i+1];
// }
// __syncthreads();
// for(int i = threadIdx.x; i < exclusiveScan[NUM_THREADS_FLOAT-1]+writeAddr[NUM_THREADS_FLOAT-1]; i+=blockDim.x){
// curOutQueueFloat[blockIdx.x][global_queue_index+i] = localElements[i];
// }
for(int i = 0; i < elements[0]; i++){
// If the queue storage has been exceed, than set the execution code to 1.
// This will force a second round in the morphological reconstructio.
if(queue_index+i >= outQueueMaxSizeFloat[blockIdx.x]){
// printf("List out of bounds\n");
execution_code_float=1;
}else{
curOutQueueFloat[blockIdx.x][queue_index+i] = elements[i+1];
}
}
// thread 0 updates head of the queue
if(threadIdx.x == 0){
outQueueHeadFloat[blockIdx.x]+=exclusiveScan[NUM_THREADS_FLOAT-1]+writeAddr[NUM_THREADS_FLOAT-1];
if(outQueueHeadFloat[blockIdx.x] >= outQueueMaxSizeFloat[blockIdx.x]){
outQueueHeadFloat[blockIdx.x] = outQueueMaxSizeFloat[blockIdx.x];
}
// printf("Inserting = %d - outQueueHeadFloat = %d\n", exclusiveScan[NUM_THREADS_FLOAT-1]+writeAddr[NUM_THREADS_FLOAT-1], outQueueHeadFloat[blockIdx.x]);
}
#else
if(elements[0] != 0){
queue_index = atomicAdd((int*)&outQueueHeadFloat[blockIdx.x], elements[0]);
if(queue_index < outQueueMaxSizeFloat[blockIdx.x]){
for(int i = 0; i < elements[0];i++){
curOutQueueFloat[blockIdx.x][queue_index+i] = elements[i+1];
}
}else{
queue_index = -1;
}
}
#endif
return queue_index;
}
// Assuming that all threads in a block are calling this function
__device__ int queueElementFloat(int element){
int queue_index = -1;
#ifdef PREFIX_SUM
__shared__ int writeAddr[NUM_THREADS_FLOAT];
__shared__ int exclusiveScan[NUM_THREADS_FLOAT];
__shared__ int global_queue_index;
if(threadIdx.x == 0){
global_queue_index = outQueueHeadFloat[blockIdx.x];
}
// set to 1 threards that are writing
writeAddr[threadIdx.x] = ((element) != (-1) ? (1):(0));
// run a prefix-sum on threads inserting data to the queue
scan_float(writeAddr, exclusiveScan);
// calculate index into the queue where give thread is writing
queue_index = global_queue_index+exclusiveScan[threadIdx.x];
// If there is data to be queued, do it
if(element != -1){
curOutQueueFloat[blockIdx.x][queue_index] = element;
}
// thread 0 updates head of the queue
if(threadIdx.x == 0){
outQueueHeadFloat[blockIdx.x]+=exclusiveScan[NUM_THREADS_FLOAT-1]+writeAddr[NUM_THREADS_FLOAT-1];
}
#else
if(element != -1){
queue_index = atomicAdd((int*)&outQueueHeadFloat[blockIdx.x], 1);
if(queue_index < outQueueMaxSizeFloat[blockIdx.x]){
curOutQueueFloat[blockIdx.x][queue_index] = element;
}else{
queue_index = -1;
}
}
#endif
return queue_index;
}
// Makes queue 1 point to queue 2, and vice-versa
__device__ void swapQueusFloat(int loopIt){
__syncthreads();
if(loopIt %2 == 0){
curInQueueFloat[blockIdx.x] = outQueuePtr2Float[blockIdx.x];
curOutQueueFloat[blockIdx.x] = inQueuePtr1Float[blockIdx.x];
if(threadIdx.x == 0){
inQueueSizeFloat[blockIdx.x] = outQueueHeadFloat[blockIdx.x];
outQueueHeadFloat[blockIdx.x] = 0;
inQueueHeadFloat[blockIdx.x] = 0;
// This is used for profiling only
totalInserts_float[blockIdx.x]+=inQueueSizeFloat[blockIdx.x];
}
}else{
curInQueueFloat[blockIdx.x] = inQueuePtr1Float[blockIdx.x];
curOutQueueFloat[blockIdx.x] = outQueuePtr2Float[blockIdx.x];
if(threadIdx.x == 0){
inQueueSizeFloat[blockIdx.x] = outQueueHeadFloat[blockIdx.x];
outQueueHeadFloat[blockIdx.x] = 0;
inQueueHeadFloat[blockIdx.x] = 0;
// This is used for profiling only
totalInserts_float[blockIdx.x]+=inQueueSizeFloat[blockIdx.x];
}
}
__syncthreads();
}
// -2, nothing else to be done at all
__device__ int dequeueElementFloat(int *loopIt){
// did this block got something to do?
__shared__ volatile int gotWork;
getWork:
gotWork = 0;
// Try to get some work.
// int queue_index = atomicAdd((int*)&inQueueHeadFloat, 1);
int queue_index = inQueueHeadFloat[blockIdx.x] + threadIdx.x;
// I must guarantee that idle threads are set to 0, and no other thread
// will come later and set it to 0 again
__syncthreads();
if(threadIdx.x == 0){
inQueueHeadFloat[blockIdx.x]+=blockDim.x;
// if(loopIt[0] < 1){
// printf("inQueueSizeFloat = %d loopIt[0] = %d queue_index = %d outQueueHeadFloat = %d\n", inQueueSizeFloat[blockIdx.x], loopIt[0], queue_index, outQueueHeadFloat[blockIdx.x]);
// }
}
// Nothing to do by default
int element = -1;
if(queue_index < inQueueSizeFloat[blockIdx.x]){
element = curInQueueFloat[blockIdx.x][queue_index];
gotWork = 1;
}
__syncthreads();
// This block does not have anything to process
if(!gotWork){
// if(loopIt[0] < 20 && threadIdx.x == 0)
// printf("inQueueSizeFloat = %d loopIt[0] = %d\n", inQueueSizeFloat[blockIdx.x], loopIt[0]);
element = -2;
if(outQueueHeadFloat[blockIdx.x] != 0){
swapQueusFloat(loopIt[0]);
loopIt[0]++;
goto getWork;
}
}
return element;
}
__global__ void initQueueIdFloat(int *inQueueData, int dataElements, int *outQueueData, int outMaxSize, int qId){
if(threadIdx.x < 1){
// Simply assign input data pointers/number of elements to the queue
inQueuePtr1Float[qId] = inQueueData;
// printf("initQueueVector: tid - %d dataElements = %d pointer = %p\n", threadIdx.x, dataElements, inQueueData);
inQueueSizeFloat[qId] = dataElements;
totalInserts_float[qId] = 0;
// alloc second vector used to queue output elements
outQueuePtr2Float[qId] = outQueueData;
// Maximum number of elements that fit into the queue
outQueueMaxSizeFloat[qId] = outMaxSize;
// Head of the out queue
outQueueHeadFloat[qId] = 0;
// Head of the in queue
inQueueHeadFloat[qId] = 0;
execution_code_float=0;
}
}
// Returns what should be queued
__device__ int propagateFloat(int *seeds, int *image, int x, int y, int ncols, int pval){
int returnValue = -1;
int index = y*ncols + x;
int seedXYval = seeds[index];
int imageXYval = image[index];
if((seedXYval < pval) && (imageXYval != seedXYval)){
// printf("propagation pval=%d", pval);
int newValue = min(pval, imageXYval);
// this should be a max atomic...
atomicMax(&(seeds[index]), newValue);
returnValue = index;
}
return returnValue;
}
__global__ void morphReconKernelSpeedupFloat(int* d_Result, int *d_Seeds, int*d_Image, int ncols, int nrows, int connectivity=4){
curInQueueFloat[blockIdx.x] = inQueuePtr1Float[blockIdx.x];
curOutQueueFloat[blockIdx.x] = outQueuePtr2Float[blockIdx.x];
int *seeds = d_Seeds;
int *image = d_Image;
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS_FLOAT][9];
__syncthreads();
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElementFloat(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
int pval = 0;
if(workUnit >= 0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit >= 0 && y > 0){
retWork = propagateFloat((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElementFloat(retWork);
if(workUnit >= 0 && y < nrows-1){
retWork = propagateFloat((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElementFloat(retWork);
if(workUnit >= 0 && x > 0){
retWork = propagateFloat((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElementFloat(retWork);
if(workUnit >= 0 && x < ncols-1){
retWork = propagateFloat((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// if connectivity is 8, four other neighbors have to be verified
if(connectivity == 8){
if(workUnit >= 0 && y > 0 && x >0){
retWork = propagateFloat((int*)seeds, image, x-1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y > 0 && x < ncols-1){
retWork = propagateFloat((int*)seeds, image, x+1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x >0){
retWork = propagateFloat((int*)seeds, image, x-1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x <(ncols-1)){
retWork = propagateFloat((int*)seeds, image, x+1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
}
// queueElementFloat(retWork);
queueElementFloat(localQueue[tid]);
}while(workUnit != -2);
d_Result[blockIdx.x]=totalInserts_float[blockIdx.x];
if(execution_code_float!=0){
d_Result[gridDim.x]=1;
}
}
extern "C" int morphReconSpeedupFloat( int *g_InputListPtr, int h_ListSize, int *g_Seed, int *g_Image, int h_ncols, int h_nrows, int connectivity, int nBlocks, float queue_increase_factor){
int *d_Result;
// alloc space to save output elements in the queue for each block
int **h_OutQueuePtr = (int **)malloc(sizeof(int*) * nBlocks);
// at this moment I should partition the INPUT queue
int tempNblocks = nBlocks;
int subListsInit[tempNblocks];
int subListsSize[tempNblocks];
for(int i = 0; i < tempNblocks; i++){
int curSubListInit = (h_ListSize/tempNblocks)*i;
int curSubListEnd = ((i+1<tempNblocks)?((i+1)*(h_ListSize/tempNblocks)-1):(h_ListSize-1));
// printf("BlockId = %d - init = %d end = %d size=%d\n", i, curSubListInit, curSubListEnd, curSubListEnd-curSubListInit+1);
subListsInit[i] = curSubListInit;
// subListsEnd[i] = curSubListEnd;
subListsSize[i] = curSubListEnd-curSubListInit+1;
}
// Adding code
// TODO: free data
int *blockSubLists[tempNblocks];
for(int i = 0; i < tempNblocks; i++){
cudaMalloc((void **)&blockSubLists[i], sizeof(int)*(subListsSize[i]) * queue_increase_factor);
cudaMemcpy(blockSubLists[i], &g_InputListPtr[subListsInit[i]], subListsSize[i] * sizeof(int), cudaMemcpyDeviceToDevice);
}
// End adding code
// printf("h_listSize = %d subListsSize[0]=%d\n", h_ListSize, subListsSize[0]);
// cout << "h_listSize = "<< h_ListSize<< " subListsSize[0]="<< subListsSize[0] <<endl;
for(int i = 0; i < tempNblocks;i++){
cudaMalloc((void **)&h_OutQueuePtr[i], sizeof(int) * (subListsSize[i]) * queue_increase_factor);
}
// Init queue for each image. yes, this may not be the most efficient way, but the code is far easier to read.
// Another version, where all pointer are copied at once to the GPU was also built, buit it was only about 1ms
// faster. Thus, we decide to go with this version
for(int i = 0; i < nBlocks;i++)
initQueueIdFloat<<<1, 1>>>(blockSubLists[i], subListsSize[i], h_OutQueuePtr[i], (subListsSize[i]) *queue_increase_factor, i);
// This is used by each block to store the number of queue operations performed
cudaMalloc((void **)&d_Result, sizeof(int)*(nBlocks+1)) ;
cudaMemset((void *)d_Result, 0, sizeof(int)*(nBlocks+1));
// printf("Run computation kernel!\n");
morphReconKernelSpeedupFloat<<<nBlocks, NUM_THREADS_FLOAT>>>(d_Result, g_Seed, g_Image, h_ncols, h_nrows, connectivity);
if(cudaGetLastError() != cudaSuccess){
cudaError_t errorCode = cudaGetLastError();
const char *error = cudaGetErrorString(errorCode);
printf("Error after morphRecon = %s\n", error);
}
int *h_Result = (int *) malloc(sizeof(int) * (nBlocks+1));
cudaMemcpy(h_Result, d_Result, sizeof(int) * (nBlocks+1), cudaMemcpyDeviceToHost);
int resutRet = h_Result[nBlocks];
free(h_Result);
cudaFree(d_Result);
for(int i = 0; i < nBlocks; i++){
cudaFree(h_OutQueuePtr[i]);
}
free(h_OutQueuePtr);
cudaFree(g_InputListPtr);
return resutRet;
}
|
2,238 | #include <iostream>
#include <cuda.h>
#include <cstdlib>
#include <stdlib.h>
#include <time.h>
int SIZE = 2;
__global__
void vecAddK(float *A, float *B, float *C, int len)
{
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<len) C[i] = A[i] + B[i];
}
__host__
void vecAdd(float *h_A, float *h_B, float *h_C, int len){
int size = len*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
dim3 DimGrid((len-1)/256 +1, 1, 1);
dim3 DimBlock(256, 1, 1);
vecAddK<<<DimGrid, DimBlock>>>(d_A, d_B,d_C, len);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);
}
void populateArray(float a[]){
for(int i = 0; i < SIZE; i++){
srand48(time(NULL));
a[i] = drand48() * 100;
}
}
int main(){
float A[SIZE];float B[SIZE];float C[SIZE];
populateArray(A);
populateArray(B);
int block_size = 16;
vecAdd(A,B,C,SIZE);
std::cout << A[0] << " + " << B[0] << "=" << C[0] << std::endl;
return 0;
}
|
2,239 | __global__ void kernelForMSSP(int *V, int *E, int *W, int *n, int *src, int *sn, bool *visit, int *dist, int *predist){
int u=0, stInd=0, st=0, align=0, old=0;
__shared__ int QuickExit;
const int blockId = blockIdx.z *(gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
const int threadId = threadIdx.z*(blockDim.x * blockDim.y)+ threadIdx.y* blockDim.x+ threadIdx.x;
const int blockSize= blockDim.x * blockDim.y * blockDim.z;
const int gridSize = gridDim.x * gridDim.y * gridDim.z;
if(blockId >= (*sn)) return ;
if(threadId >= (*n)) return ;
stInd = blockId;
st = src[stInd];
while(stInd < (*sn))
{
align = (stInd * (*n));
while(1){ /* this while can solve a sssp*/
QuickExit = 0;
u = threadId;
while(u < (*n)){
if(visit[u + align]){
visit[u + align]=0;
for(int adj = V[u]; adj<V[u+1]; adj++){
old=atomicMin( &predist[align + E[adj]] , dist[align + u] + W[adj]);
}
}
u+=blockSize;
}
__syncthreads();
u=threadId;
while(u < (*n)){
if(predist[align + u] < dist[align + u]){
dist[align + u] = predist[align + u];
visit[align + u] = 1;
QuickExit = 1;
}
u+=blockSize;
}
__syncthreads();
if(QuickExit==0){
break;
}
}
__syncthreads();
stInd += gridSize;
}
} |
2,240 | /*
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
const int TILE_WIDTH=2;
const int width=4;
__global__ void matrixmul(int *d_M,int *d_N,int *d_P)
{
__shared__ int dS_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int dS_N[TILE_WIDTH][TILE_WIDTH];
int by= blockIdx.y;
int ty=threadIdx.y;
int bx=blockIdx.x;
int tx=threadIdx.x;
int Row= by*TILE_WIDTH+ty;
int Col= bx*TILE_WIDTH+tx;
int pvalue=0;
for(int m=0;m<(width/TILE_WIDTH);m++)
{ dS_M[ty][tx]=d_M[Row*width + (m*TILE_WIDTH+tx)] ;
dS_N[ty][tx]=d_N[Col+(m*TILE_WIDTH+ty)*width] ;
__syncthreads();
for(int k=0;k<TILE_WIDTH;k++)
{
pvalue += dS_M[ty][k]*dS_N[k][tx] ;
__syncthreads();
}
d_P [Row*width +Col]=pvalue;
}
}
int main()
{
int i,j,size;
int h_M[width][width],h_N[width][width],h_P[width][width];
int *d_M,*d_N,*d_P;
printf("\n Enter according to width= %d \n ", width);
for(i=0;i<width;i++)
{
for(j=0;j<width;j++)
{ h_M[i][j]=2;
h_N[i][j]=2;
if(i==j)
{
h_N[i][j]= 1;
h_M[i][j]= 1;
}
}
}
printf("\nh_M array : \n");
for(i=0;i<width;i++)
{ printf("\n");
for(j=0;j<width;j++)
printf("%d ",h_M[i][j]);
}
printf("\n\nh_N array : \n");
for(i=0;i<width;i++)
{ printf("\n");
for(j=0;j<width;j++)
printf("%d ",h_N[i][j]);
}
size=sizeof(int)*width*width;
cudaMalloc((void**)&d_M,size);
cudaMalloc((void**)&d_N,size);
cudaMalloc((void**)&d_P,size);
cudaMemcpy(d_M,h_M,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,size,cudaMemcpyHostToDevice);
dim3 dimGrid((width/TILE_WIDTH),(width/TILE_WIDTH),1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
matrixmul<<<dimGrid,dimBlock>>>(d_M,d_N,d_P);
cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
printf("\n Resultant Array ( h_M * h_N ): \n");
for(i=0;i<width;i++)
{ printf("\n");
for(j=0;j<width;j++)
printf("%d ",h_P[i][j]);
}
getch();
return 0;
}
*/ |
2,241 | #include<stdio.h>
__global__ void Array_add(int *a, int *b, int *c, int *n)
{
unsigned short tid = threadIdx.x;
if(tid < *n)
c[tid] = a[tid] + b[tid];
}
int main()
{
int n = 5, i;
int a[n], b[n], c[n];
int *cuda_a, *cuda_b, *cuda_c, *cuda_n;
for(i=0; i<n; i++)
a[i] = rand()%100;
for(i=0; i<n; i++)
b[i] = rand()%100;
cudaMalloc((void**)&cuda_a, n*sizeof(int));
cudaMalloc((void**)&cuda_b, n*sizeof(int));
cudaMalloc((void**)&cuda_c, n*sizeof(int));
cudaMalloc((void**)&cuda_n, sizeof(int));
cudaMemcpy(cuda_a, a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b, b, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_n, &n, sizeof(int), cudaMemcpyHostToDevice);
Array_add <<<1, n>>>(cuda_a, cuda_b, cuda_c, cuda_n);
cudaMemcpy(c, cuda_c, n*sizeof(int), cudaMemcpyDeviceToHost);
for(i=0; i<n; i++)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
cudaFree(cuda_a);
cudaFree(cuda_b);
cudaFree(cuda_c);
cudaFree(cuda_n);
return 0;
} |
2,242 | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 64
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
/* *** Basic version ***************
int row = blockIdx.y * blockDim.y + threadIdx.y ;
int col = blockIdx.x * blockDim.x + threadIdx.x ;
if( (row < m) && (col < n))
{
float prod = 0 ;
for (int i = 0; i < k; ++i)
{
prod += (A[row*k + i]) * (B[i*n + col]) ;
}
C[row*n + col] = prod ;
}
*/
/********* TILED MEMORY Version ***********/
//Allocate Shared Memory
__shared__ float shared_M[TILE_SIZE][TILE_SIZE] ;
__shared__ float shared_N[TILE_SIZE][TILE_SIZE] ;
//Note TILE_SIZE = BLOCK_SIZE = blockDim.y = blockDim.x
int row = blockIdx.y * blockDim.y + threadIdx.y ;
int col = blockIdx.x * blockDim.x + threadIdx.x ;
float prod = 0 ;
for (int i = 0; i < (k+TILE_SIZE -1)/TILE_SIZE ; ++i )
{
//Check bounds and
//Load into Shared Memory
int curx = i*TILE_SIZE + threadIdx.x ;
int cury = i*TILE_SIZE + threadIdx.y ;
if ((row < m) && (curx < k) )
shared_M[threadIdx.y][threadIdx.x] = A[row*k + curx ] ;
else
shared_M[threadIdx.y][threadIdx.x] = 0;
if((col < n) && (cury < k))
shared_N[threadIdx.y][threadIdx.x] = B[ cury*n + col] ;
else
shared_N[threadIdx.y][threadIdx.x] = 0 ;
//Wait for all threads
__syncthreads() ;
for(int j =0; j < TILE_SIZE ; ++j )
{
prod += shared_M[threadIdx.y][j] * shared_N[j][threadIdx.x] ;
}
//Wait for all the threads
__syncthreads() ;
}
//Check for bounds and copy result
if(row < m && col < n)
C[row*n + col] = prod ;
}
void tiledSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE) ;
dim3 dimGrid((n + dimBlock.x -1)/dimBlock.x, (m + dimBlock.y - 1)/dimBlock.y ) ;
// Invoke CUDA kernel -----------------------------------------------------
mysgemm<<<dimGrid, dimBlock>>> (m, n, k, A, B, C) ;
cudaThreadSynchronize();
}
|
2,243 | //errorcheck_soln.cu: This program is designed to produce output
//'data = 7'. Error checking has been added and all errors have
//been removed.
#include <stdio.h>
#include <stdlib.h>
__global__ void setData(int *ptr)
{
*ptr = 7;
}
int main(void)
{
int *data_d = 0;
int *data_h = 0;
cudaError_t error;
//UINT_MAX is a huge number. The device runs out of memory.
error = cudaMalloc((void**)&data_d, sizeof(int));
if( error != cudaSuccess)
{
printf("cudaMalloc error: %s\n", cudaGetErrorString(error));
}
data_h = (int *)malloc(sizeof(int));
//0 is a null pointer. The device tries to dereference a
//null pointer producing an 'unspecified launch error'.
//This can be thought of as a CUDA segmentation fault.
setData<<<1,1>>>(data_d);
cudaThreadSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("setData error: %s\n", cudaGetErrorString(error));
}
error = cudaMemcpy(data_h, data_d, sizeof(int), cudaMemcpyDeviceToHost);
if(error != cudaSuccess)
{
printf("cudaMemcpy error: %s\n", cudaGetErrorString(error));
}
printf("data = %d\n", *data_h);
free(data_h);
//We only need to free data_d once. After this, it is no
//longer a CUDA device pointer, and cant be cudaFree()'d again.
cudaFree(data_d);
return 0;
}
|
2,244 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/* demo to show the usage of share memory*/
#define DEBUG
typedef float dataType;
void checkCudaError(cudaError_t error, const char* filename, const int linenum)
{
if(error != cudaSuccess){
printf("File: %s, line: %d, CUDA error: %s\n", filename, linenum, cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
#define CHECK_CUDA_ERROR(error) checkCudaError(error, __FILE__, __LINE__)
#define BLOCK_SIZE 16
typedef struct Matrix
{
int width;
int height;
int stride;
dataType* element;
}Matrix;
__device__ dataType getElement(Matrix x, int row, int col)
{
return x.element[row * x.stride + col];
}
__device__ void setElement(Matrix x, int row, int col, dataType val)
{
x.element[col + row * x.stride] = val;
}
__device__ Matrix getSubMatrix(Matrix x, int row, int col)
{
Matrix subX;
int row_ = (row + 1) * BLOCK_SIZE, col_ = (col + 1) * BLOCK_SIZE;
subX.height = ((row_ <= x.height) ? BLOCK_SIZE : x.height%BLOCK_SIZE);
subX.width = ((col_ <= x.width) ? BLOCK_SIZE : x.width%BLOCK_SIZE);
//subX.height = subX.width = BLOCK_SIZE;
subX.stride = x.stride;
subX.element = &x.element[x.stride * row * BLOCK_SIZE + col * BLOCK_SIZE];
return subX;
}
__device__ int divCeil(int x, int y)
{
return (x%y == 0) ? x/y : (x/y+1);
}
// no shared memory
__global__ void MatMulKernel(Matrix x, Matrix y, Matrix z)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
dataType ret = 0;
int i = 0;
if(col < z.width && row < z.height){
for(i = 0; i < x.width; ++i)
ret += getElement(x, row, i) * getElement(y, i, col);
setElement(z, row, col, ret);
}
}
// shared memory
__global__ void MatMulSharedMemory1(Matrix x, Matrix y, Matrix z)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix subZ = getSubMatrix(z, blockRow, blockCol);
int row = threadIdx.y;
int col = threadIdx.x;
if(row >= subZ.height || col >= subZ.width)
return;
dataType val = 0.0;
int i = 0;
int size = divCeil(x.width, BLOCK_SIZE);
for(; i < size; ++i){
Matrix subX = getSubMatrix(x, blockRow, i);
Matrix subY = getSubMatrix(y, i, blockCol);
__shared__ dataType tmpX[BLOCK_SIZE][BLOCK_SIZE];
__shared__ dataType tmpY[BLOCK_SIZE][BLOCK_SIZE];
tmpX[row][col] = getElement(subX, row, col);
tmpY[row][col] = getElement(subY, row, col);
__syncthreads();
int j = 0;
for(;j < subX.width; ++j)
val += tmpX[row][j] * tmpY[j][col];
__syncthreads();
}
setElement(subZ, row, col, val);
}
bool checkCorrectness(Matrix x, Matrix y, Matrix z)
{
Matrix z_;
z_.width = z.width;
z_.height = z.height;
z_.stride = z.stride;
z_.element = (dataType*)malloc(sizeof(dataType) * z_.width * z_.height);
for(int i = 0; i < z_.height; ++i){
for(int j = 0; j < z_.width; ++j){
dataType val = 0;
for(int k = 0; k < x.width; ++k)
val += x.element[i*x.stride+k] * y.element[k*y.stride+j];
z_.element[j + i * z_.stride] = val;
}
}
bool flag;
if(memcmp(z_.element, z.element, z_.width * z_.height * sizeof(dataType)) == 0)
flag = true;
else
flag = false;
free(z_.element);
return flag;
}
void MatMul(Matrix x, Matrix y, Matrix z)
{
Matrix dev_x, dev_y, dev_z;
dev_x.width = x.width;
dev_x.height = x.height;
dev_x.stride = x.stride;
dev_y.width = y.width;
dev_y.height = y.height;
dev_y.stride = y.stride;
dev_z.width = z.width;
dev_z.height = z.height;
dev_z.stride = z.stride;
int nByte = sizeof(dataType) * dev_x.width * dev_x.height;
CHECK_CUDA_ERROR(cudaMalloc((void**)(&dev_x.element), nByte));
CHECK_CUDA_ERROR(cudaMemcpy(dev_x.element, x.element, nByte, cudaMemcpyHostToDevice));
nByte = sizeof(dataType) * dev_y.width * dev_y.height;
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_y.element, nByte));
CHECK_CUDA_ERROR(cudaMemcpy(dev_y.element, y.element, nByte, cudaMemcpyHostToDevice));
nByte = sizeof(dataType) * dev_z.width * dev_z.height;
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_z.element, nByte));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(dev_z.width/dimBlock.x + (dev_z.width%dimBlock.x == 0 ? 0 : 1), dev_z.height/dimBlock.y + (dev_z.height%dimBlock.y == 0 ? 0 : 1));
//MatMulKernel<<<dimGrid, dimBlock>>>(dev_x, dev_y, dev_z);
MatMulSharedMemory1<<<dimGrid, dimBlock>>>(dev_x, dev_y, dev_z);
CHECK_CUDA_ERROR(cudaMemcpy(z.element, dev_z.element, nByte, cudaMemcpyDeviceToHost));
if(checkCorrectness(x, y, z) == false)
printf("Error occur\n");
else
printf("Correct\n");
CHECK_CUDA_ERROR(cudaFree(dev_x.element));
CHECK_CUDA_ERROR(cudaFree(dev_y.element));
CHECK_CUDA_ERROR(cudaFree(dev_z.element));
}
void randomFillMatrix(Matrix x)
{
int size = x.width*x.height;
for(int i = 0; i < size; ++i){
srand(time(NULL)+i);
x.element[i] = rand()%100;
}
}
int main(void)
{
Matrix x, y, z;
x.width = x.height = x.stride = 32;
y.width = y.height = y.stride = 32;
z.width = z.height = z.stride = 32;
int nByte = sizeof(dataType) * x.width * x. height;
x.element = (dataType*)malloc(nByte);
y.element = (dataType*)malloc(nByte);
z.element = (dataType*)malloc(nByte);
randomFillMatrix(x);
randomFillMatrix(y);
MatMul(x, y, z);
free(x.element);
free(y.element);
free(z.element);
return 0;
}
|
2,245 | #include<bits/stdc++.h>
__global__
void gpufib(){
double a,b,c;
a=0;b=1;
for(long long int i=0;i< (1<<29);i++){
c=a+b;
a=b;
b=c;
}
}
int main(){
gpufib<<<1,1>>>();
cudaDeviceSynchronize();
std::cout<<"Done";
}
|
2,246 | #include <cuda_runtime.h>
#include <stdio.h>
#include <cuda.h>
__global__ void check(double* d_norm, int n, double* val)
{
double temp = 0;
double f ;
for(int i=0; i<n; i++)
{
f = d_norm[i];
temp += f*f;
}
*val = sqrt(temp);
//if(*val <= 0) *val *= -1;
}
__global__ void calc_norm(double* d_A, double* d_q, int n, double* d_norm)
{
int id = (blockIdx.x*blockDim.x) + threadIdx.x;
if(id<n)
{
double temp = 0;
int c = id*n;
for(int i=0; i<n; i++, c++) temp += d_A[c]*d_q[i];
d_norm[id] = temp - d_q[id];
}
}
__global__ void saxpy(double *Q, double* v ,double* q, int n, int k)
{
extern __shared__ double sv[];
if(threadIdx.x == 0)
{
for(int i=0; i<k; i++) sv[i] = v[i];
}
__syncthreads();
int id = (blockIdx.x*blockDim.x) + threadIdx.x;
if(id < n){
double temp = 0;
int i = id*(k+1), j = i+k , count=-1;
for(; i<j; i++) temp += Q[i]*sv[++count];
q[id] = temp;
}
}
void printMatrix(int m, int n, const double*A, int lda, const char* name)
{
for(int row = 0 ; row < m ; row++)
{
for(int col = 0 ; col < n ; col++)
{
double Areg = A[row + col*lda];
printf(" %f ", Areg);
}
printf("\n");
}
}
__global__ void get_v(double* d_VT, double* d_v, int k)
{
int id = threadIdx.x;
d_v[id] = d_VT[id*k +k-1+id];
}
__global__ void initCudaMat(double* A, int row, int col)
{
for(int i=0; i<row*col; i++) A[i] = 0;
}
__global__ void printMatt(double* A, int row, int col)
{
int z;
for(z=0; z< (row*col); z++)
{
printf("%f ", A[z]);
if((z+1)%col == 0) printf("\n");
}
}
__global__ void dev(int* col)
{
printf("devinfo is %d\n", *col);
}
|
2,247 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <math.h>
#define N 1000000
__global__ void counts(float *x, float *y, int *results)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N)
{
float result = x[tid] * x[tid] + y[tid] * y[tid];
if(result <= 1)
results[tid] = 1;
else
results[tid] = 0;
}
}
int main(void)
{
int T = 500; // threads per block
int B = 2000; // blocks per grid
int *dev_results, *host_results;
float *dev_x;
float *dev_y;
//host memory
host_results = (int *) calloc(N, sizeof(float));
//device memory
cudaMalloc((void**)&dev_x, N * sizeof(float));
cudaMalloc((void**)&dev_y, N * sizeof(float));
cudaMalloc((void**)&dev_results, N * sizeof(int));
//random generator
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
//generate random numbers on device
curandGenerateUniform(gen, dev_x, N);
curandGenerateUniform(gen, dev_y, N);
//timmers
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//get results
counts<<<B,T>>>(dev_x, dev_y, dev_results);
//stop timers
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventRecord(stop, 0);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy results to host's memory
cudaMemcpy(host_results, dev_results, N*sizeof(int), cudaMemcpyDeviceToHost);
//sum the results
int counts = 0;
for(int i=0;i<N;i++)
{
if(host_results[i])
counts++;
printf("%d ", host_results[i]);
}
float pi = 4.0 * counts / N;
printf("Pi: %1.10f\n", pi);
printf("Execution Time: %1.10f\n", elapsedTime);
//cleanup
curandDestroyGenerator(gen);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_results);
free(host_results);
return 0;
}
|
2,248 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
int main() {
std::vector<double> stocks_a;
std::vector<double> stocks_m;
int n = 0;
double stock_a;
double stock_m;
while (std::cin){
n = n + 1;
std::cin >> stock_a;
std::cin >> stock_m;
stocks_a.push_back(stock_a);
stocks_m.push_back(stock_m);
}
thrust::device_vector<double> apple(stocks_a);
thrust::device_vector<double> microsoft(stocks_m);
thrust::device_vector<double> diff(n);
// diferenca
thrust::transform(apple.begin(), apple.end(), microsoft.begin(), diff.begin(), thrust::minus<double>());
double preco_medio = thrust::reduce(diff.begin(), diff.end(), 0, thrust::plus<double>()) / n;
std::cout << "media entre os precos da AAPL e MSFT: " << preco_medio << "\n";
} |
2,249 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
//__global__ void AddVec(const float* A, const float* B, float* C, int N)
//{
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < N)
// C[i] = A[i] + B[i];
//}
int main()
{
return 0;
} |
2,250 | #include "includes.h"
__global__ void reduceUnrolling8New (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int *ptr = g_idata + idx;
int tmp = 0;
// Increment tmp 8 times with values strided by blockDim.x
for (int i = 0; i < 8; i++) {
tmp += *ptr; ptr += blockDim.x;
}
g_idata[idx] = tmp;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
2,251 | #include "includes.h"
__global__ void _bcnn_backward_depthwise_conv_weight_kernel( int nthreads, float *dst_grad, float *src_data, int batch_size, const int channels, int dst_h, int dst_w, const int src_h, const int src_w, int kernel_sz, int stride, int pad, float *weight_diff) {
int i, n, c, h, w, kw, kh, h_out_s, w_out_s, h_out, w_out, offset;
float *p_weight_diff = NULL;
for (i = blockIdx.x * blockDim.x + threadIdx.x; i < nthreads;
i += blockDim.x * gridDim.x) {
n = i / channels / src_h / src_w;
c = (i / src_h / src_w) % channels;
h = (i / src_w) % src_h;
w = i % src_w;
p_weight_diff = weight_diff + c * kernel_sz * kernel_sz;
for (kh = 0; kh < kernel_sz; ++kh) {
for (kw = 0; kw < kernel_sz; ++kw) {
h_out_s = h + pad - kh;
w_out_s = w + pad - kw;
if (((h_out_s % stride) == 0) && ((w_out_s % stride) == 0)) {
h_out = h_out_s / stride;
w_out = w_out_s / stride;
if ((h_out >= 0) && (h_out < dst_h) && (w_out >= 0) &&
(w_out < dst_w)) {
offset = ((n * channels + c) * dst_h + h_out) * dst_w +
w_out;
*p_weight_diff += src_data[i] * dst_grad[offset];
}
}
++p_weight_diff;
}
}
}
} |
2,252 | struct VMState {
int a;
float *b;
};
static const int kOpsPerThread = 1;
__device__ void a0(const VMState& vm) { vm.b[vm.a] = 0.; }
__device__ void a1(const VMState& vm) { vm.b[vm.a] = 1.; }
__device__ void a2(const VMState& vm) { vm.b[vm.a] = 2.; }
__device__ void a3(const VMState& vm) { vm.b[vm.a] = 3.; }
__device__ void a4(const VMState& vm) { vm.b[vm.a] = 4.; }
__device__ void a5(const VMState& vm) { vm.b[vm.a] = 5.; }
__device__ void a6(const VMState& vm) { vm.b[vm.a] = 6.; }
typedef void (*Fn)(const VMState&);
__device__ static Fn table[7] = { &a0, &a1, &a2, &a3, &a4, &a5, &a6 };
__global__ void c(float *out) {
const int block_size = blockDim.y * blockDim.x;
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * block_size * kOpsPerThread;
int registers[8];
// for (int i = 0; i < kOpsPerThread; ++i) {
// }
registers[threadIdx.x] = 0;
VMState vm;
vm.b = out;
for (int i = 0; i < kOpsPerThread; ++i) {
vm.a = offset + (i * block_size) + threadIdx.x + registers[threadIdx.x];
// vm.a = offset + threadIdx.x * kOpsPerThread + i;
table[threadIdx.x % 7](vm);
}
}
|
2,253 | __global__ void cudaWarp(double * warpedImg, const double * indBase, const double * img, const double *resImg1, const double * tri, const double * nTri, const double * pixelTri, const double * x, const double * y, const double * alphas, const double * betas, const double * gammas, const double * idMax)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id < (int) idMax[0])
{
int idTri = (int) pixelTri[id];
int idTriVer1 = (int) tri[idTri] - 1;
int idTriVer2 = (int) tri[idTri + (int) nTri[0]] - 1;
int idTriVer3 = (int) tri[idTri + 2 * (int) nTri[0]] - 1;
float x1 = x[idTriVer1] - 1;
float x2 = x[idTriVer2] - 1;
float x3 = x[idTriVer3] - 1;
float y1 = y[idTriVer1] - 1;
float y2 = y[idTriVer2] - 1;
float y3 = y[idTriVer3] - 1;
float alpha = alphas[id];
float beta = betas[id];
float gamma = gammas[id];
float tX = x1 * gamma + x2 * alpha + x3 * beta;
float tY = y1 * gamma + y2 * alpha + y3 * beta;
int fTX = floor(tX);
int fTY = floor(tY);
int fTX1 = fTX + 1;
int fTY1 = fTY + 1;
float dX = tX - fTX;
float dY = tY - fTY;
float dX1 = 1 - dX;
float dY1 = 1 - dY;
float w1 = dX1 * dY1;
float w2 = dX * dY1;
float w3 = dX1 * dY ;
float w4 = dX * dY ;
int aux1 = fTX * resImg1[0];
int aux2 = fTX1 * resImg1[0];
int idImg1 = fTY + aux1;
int idImg2 = fTY + aux2;
int idImg3 = fTY1 + aux1;
int idImg4 = fTY1 + aux2;
int uv = (int) indBase[id];
warpedImg[uv] = w1 * img[idImg1] + w2 * img[idImg2] + w3 * img[idImg3] + w4 * img[idImg4];
}
} |
2,254 | /**
* Simple example provided by NVIDIA for profiling and understanding GPU acceleration.
* Source: https://devblogs.nvidia.com/even-easier-introduction-cuda/
* Retrieved: 30 June 2018
*/
#include <iostream>
#include <math.h>
#include <stdio.h>
// Kernel function to add the elements of two arrays
__global__
void add_1_thread(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
__global__
void add_1_block(int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
__global__
void add_grid(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(int argc, char * argv[])
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = atoi(argv[2]);
int numBlocks = atoi(argv[1]);
int numThreads = numBlocks * blockSize;
if (numThreads != N)
printf("%d elements can't be processed by %d threads!\n",
N, numThreads);
add_grid<<<numBlocks, blockSize>>>(N, x, y);
add_1_block<<<1, blockSize>>>(N, x, y);
add_1_thread<<<1, 1>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
2,255 | #include<cuda.h>
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
float *hs_device, *gs_device;
double *hd_device, *gd_device;
const unsigned int SINGLE_PRECISION = 1;
const unsigned int DOUBLE_PRECISION = 0;
//generate matrix
template<typename T>
T *GenMatrix(const unsigned int width, const unsigned int height)
{
T *matrix;
const unsigned int M_SIZE = width*height;
unsigned int i = 0, j = 0;
matrix = (T*) malloc(M_SIZE * sizeof(double));
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
matrix[i * width + j] = (rand()*1.0)/ RAND_MAX;
}
}
return matrix;
}
//display matrix
template<typename T>
int PrintMatrix(T *P, const unsigned int width, const unsigned int height)
{
unsigned int i = 0, j = 0;
printf("\n");
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
printf("%.3f\t", P[i * width + j]);
}
printf("\n");
}
return 1;
}
//Init data
template<typename T>
void Init_Cuda(T *M, T *N, const unsigned int width, const unsigned int height, const int sp)
{
const unsigned int size = width*height*sizeof(T);
//allocate matrix
if(sp==SINGLE_PRECISION){
cudaMalloc((void**)&hs_device, size);
cudaMemcpy(hs_device, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gs_device, size);
cudaMemcpy(gs_device, N, size,cudaMemcpyHostToDevice);
}
else
{
cudaMalloc((void**)&hd_device, size);
cudaMemcpy(hd_device, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gd_device, size);
cudaMemcpy(gd_device, N, size,cudaMemcpyHostToDevice);
}
}
//Free memory
void Free_Cuda(const int sp)
{
if(sp==SINGLE_PRECISION){
cudaFree(hs_device);
cudaFree(gs_device);
}
else
{
cudaFree(hd_device);
cudaFree(gd_device);
}
}
template<typename T>
__global__ void sor_kernel (T *h, T *g, const unsigned int width, const unsigned int height) {
int i, r, c;
i = blockIdx.x*blockDim.x + threadIdx.x;
while(i<width*height)
{
r = i / width;
c = i % width;
if (r > 0 && r < height - 1 && c > 0 && c < width - 1) { //printf("(%d, %d) %.2f %.2f %.2f %.2f\n", i, j, h[(i-1) * N + j] , h[(i+1) * N + j] , h[i * N + (j-1)] , h[i * N + (j+1)] );
//printf("\n[%d, %d]", r, c);
g[r * width + c] = 0.25
* (h[(r - 1) * width + c] + h[(r + 1) * width + c]
+ h[r * width + (c - 1)] + h[r * width + (c + 1)]);
}
i+=blockDim.x*gridDim.x;
}
}
template<typename T>
void sor_cpu (T *h, T *g, const unsigned int width, const unsigned int height) {
// note this does Gauss-Seidel relaxation, not Jacobi that is done on gpu
int i, j;
for(i=1; i < height-1; i++)
for(j=1; j < width-1; j++){
//printf("\n[%d, %d]", i, j);
//printf("(%d, %d) %.2f %.2f %.2f %.2f\n", i, j, h[(i-1) * N + j] , h[(i+1) * N + j] , h[i * N + (j-1)] , h[i * N + (j+1)] );
g[i * width + j] = 0.25 * (h[(i-1) * width + j] + h[(i+1) * width + j]
+ h[i * width + (j-1)] + h[i * width + (j+1)]);
}
}
template<typename T>
int Check(const T *KP, const T *CP, const unsigned int width, const unsigned int height)
{
int i, j;
float e = 0.001;
int correct = 1;
for(i = 1; i < height-1 ; i++)
for(j = 1; j < width-1; j++)
{ if(abs(KP[i * width + j] - CP[i * width + j]) > e)
{ printf("%.5f %.5f\n", KP[i * width + j], CP[i * width + j]);
return 0;
}
}
return correct;
}
int main(int argc, char *argv[]) {
// loop counters
//const int N = 16384;
unsigned int width = 512;//16384;
float *hs_host, *gs_host, *ks_host; // ptr to array holding numbers on host and device to include fixed borders
double *hd_host, *gd_host, *kd_host;
int single_double = SINGLE_PRECISION; //1 single, 0 double
cudaEvent_t start, stop;
float elapsed_time_ms;
int T = 128;
dim3 block(T, 1, 1);
dim3 grid(((width*width)+ T - 1) / T, 1, 1);
if (argc != 5) /* argc should be 4 for correct execution */
{
/* We print argv[0] assuming it is the program name */
printf("Wrong parameters. Please use the following format for running.\n");
printf(" Usage: %s %s %s %s %s", argv[0], "[matrix_size]", "[single|double]", "[divide_val]", "[num_threads]\n");
exit(EXIT_FAILURE);
} else {
//printf("Arguments: %d %d", atoi(argv[1]), atoi(argv[2]));
if(atoi(argv[2])!=0)
single_double = SINGLE_PRECISION;
else
single_double = DOUBLE_PRECISION;
width = atoi(argv[1]);
block.x = atoi(argv[4]);
grid.x = ((width*width)/atoi(argv[3]) + block.x - 1) / block.x;
}
if(single_double==SINGLE_PRECISION)
{
hs_host = GenMatrix<float>(width, width);
gs_host = GenMatrix<float>(width, width);
ks_host = GenMatrix<float>(width, width);
//PrintMatrix<float>(hs_host, N);
Init_Cuda<float>(hs_host, gs_host, width, width, SINGLE_PRECISION);
}
else
{
hd_host = GenMatrix<double>(width, width);
gd_host = GenMatrix<double>(width, width);
kd_host = GenMatrix<double>(width, width);
Init_Cuda<double>(hd_host, gd_host, width, width, DOUBLE_PRECISION);
}
cudaEventCreate( &start ); // instrument code to measure start time
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
cudaEventSynchronize( start );
if(single_double==SINGLE_PRECISION)
{
sor_kernel<float><<<grid,block>>>(hs_device, gs_device, width, width);
cudaMemcpy(gs_host, gs_device, width*width*sizeof(float) ,cudaMemcpyDeviceToHost); // copy results back to host, array g
}
else
{
sor_kernel<double><<<grid,block>>>(hd_device, gd_device, width, width);
cudaMemcpy(gd_host, gd_device, width*width*sizeof(float) ,cudaMemcpyDeviceToHost); // copy results back to host, array g
}
//PrintMatrix(g, width, height);
cudaEventRecord( stop, 0 ); // instrument code to measue end time
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
printf("[ %s ][ %4dx%4d ][ %10d blocks ][ %5d threads ]\t>\t[ %7.3f (ms) ]\n", ((single_double==SINGLE_PRECISION)?"Single Precision":"Double Precision"), width, width, grid.x, block.x, elapsed_time_ms);
if(single_double==SINGLE_PRECISION)
{
//sor_cpu<float>(hs_host, ks_host, width, height);
// /PrintMatrix(ks_host, width, height);
//if(Check(gs_host, ks_host, width, height)==1)
// printf("We do it.\n");
//else
// printf("Something is wrong.\n");
free(hs_host);
free(gs_host);
free(ks_host);
Free_Cuda(SINGLE_PRECISION);
}
else
{
//sor_cpu<double>(hd_host, kd_host, width, height);
// /PrintMatrix(kd_host, width, height);
//if(Check(gd_host, kd_host, width, height)==1)
// printf("We do it.\n");
//else
// printf("Something is wrong.\n");
free(hd_host);
free(gd_host);
free(kd_host);
Free_Cuda(DOUBLE_PRECISION);
}
return 0;
}
|
2,256 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#include <cstdio>
#include <ctime>
// global counter to count points that fall into circle
__device__ int dnum = 0;
__global__ void countPoints(float* xs, float* ys) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
float x = xs[idx];
float y = ys[idx];
int n = (x*x + y*y < 1.0f) ? 1 : 0;
// int n = 1;
atomicAdd(&dnum, n);
}
int main() {
// number of points that we're going to generate
const int count = 512*512; // 262144
const int size = count * sizeof(float);
// status/error variables?
cudaError_t cudaStatus;
curandStatus_t curandStatus;
// random number generator
curandGenerator_t gen;
// initialize random number generator
curandStatus = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(gen, time(0));
// allocate memory on device for (x,y) coordinates of points
float *x, *y;
cudaStatus = cudaMalloc(&x, size);
cudaStatus = cudaMalloc(&y, size);
// generate a bunch of random numbers for x and y between [0, 1]
curandStatus = curandGenerateUniform(gen, x, count);
curandStatus = curandGenerateUniform(gen, y, count);
// count the points that fall inside the circle
countPoints<<<512,512>>>(x, y);
// copy the result back to host
int hnum;
// why does function declaration say `dnum` should be a pointer, but
// it doesn't work when I pass `&dnum`? does `__device__` implicitly
// declare a pointer?
cudaMemcpyFromSymbol(&hnum, dnum, sizeof(int));
cudaFree(x);
cudaFree(y);
// print result
float pi = 4.0f * ((float)hnum / (float)count);
printf("pi is approximately %f\n", pi);
return cudaStatus | curandStatus;
// compile with `nvcc -lcurand monte_carlo_pi.cu` to include curand lib
} |
2,257 | #include "includes.h"
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
__global__ void gameOfLifeKernel(unsigned char* d_src, unsigned char* d_dst, const size_t width, const size_t height) {
extern __shared__ unsigned char board_sh[];
size_t glob_x = blockDim.x * blockIdx.x + threadIdx.x;
size_t glob_y = blockDim.y * blockIdx.y + threadIdx.y;
size_t glob_idx = glob_y * width + glob_x;
size_t index = blockDim.x * threadIdx.y + threadIdx.x;
int share_width = blockDim.x + 2;
int share_height = blockDim.y + 2;
int share_size = share_width * share_height;
bool isActive = (glob_x < width && glob_y < height);
// Set shared memory
for (int share_idx = index; share_idx < share_size; share_idx += (blockDim.x * blockDim.y)) {
int x_img = (blockDim.x * blockIdx.x - 1) + (share_idx % share_width);
int y_img = (blockDim.y * blockIdx.y - 1) + (share_idx / share_width);
if (x_img < 0) {
x_img = width - 1;
} else if (x_img > width - 1) {
x_img = 0;
}
if (y_img < 0) {
y_img = height - 1;
} else if (y_img > height - 1) {
y_img = 0;
}
board_sh[share_idx] = d_src[width * y_img + x_img];
}
__syncthreads();
if (isActive) {
unsigned char me = board_sh[share_width * (threadIdx.y + 1) + threadIdx.x + 1];
// Count neighbors
int count = board_sh[share_width * (threadIdx.y) + threadIdx.x];
count += board_sh[share_width * (threadIdx.y) + threadIdx.x + 1];
count += board_sh[share_width * (threadIdx.y) + threadIdx.x + 2];
count += board_sh[share_width * (threadIdx.y + 1) + threadIdx.x];
count += board_sh[share_width * (threadIdx.y + 1) + threadIdx.x + 2];
count += board_sh[share_width * (threadIdx.y + 2) + threadIdx.x];
count += board_sh[share_width * (threadIdx.y + 2) + threadIdx.x + 1];
count += board_sh[share_width * (threadIdx.y + 2) + threadIdx.x + 2];
// Game of life rules
if (me == 1) {
if (count < 2) {
d_dst[glob_idx] = 0;
}
else if (count < 4) {
d_dst[glob_idx] = 1;
}
else {
d_dst[glob_idx] = 0;
}
} else {
if (count == 3) {
d_dst[glob_idx] = 1;
}
}
}
} |
2,258 | #include <cuda.h>
#include <stdio.h>
#include <string.h>
#define ITERS 32768
char* concat(const char *s1, const char *s2)
{
char *result = (char*)malloc(strlen(s1) + strlen(s2) + 1); // +1 for the null-terminator
// in real code you would check for errors in malloc here
strcpy(result, s1);
strcat(result, s2);
return result;
}
#define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){
CUDA_SAFE_CALL( cudaEventCreate(start) );
CUDA_SAFE_CALL( cudaEventCreate(stop) );
CUDA_SAFE_CALL( cudaEventRecord(*start, 0) );
}
float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){
CUDA_SAFE_CALL( cudaGetLastError() );
CUDA_SAFE_CALL( cudaEventRecord(stop, 0) );
CUDA_SAFE_CALL( cudaEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( cudaEventDestroy(start) );
CUDA_SAFE_CALL( cudaEventDestroy(stop) );
return kernel_time;
}
template<typename T>
void run(char * name, T scal, int type_size, int threads){
char * file_name = concat(name, ".cubin");
int *output;
cudaMalloc((void**)&output, sizeof(int)*32);
cudaMemset(output, 0, 32*sizeof(int));
CUmodule module;
CUfunction kernel;
cuModuleLoad(&module, file_name);
cuModuleGetFunction(&kernel, module, "kern");
int blk_size = 32;
int total_blks = 1;//size/blk_size;
int sh_mem_size = blk_size*sizeof(float)*type_size;
void * args[2] = {&scal, &output};
//cudaEvent_t start, stop;
//initializeEvents(&start, &stop);
cuLaunchKernel(kernel, total_blks, 1, 1,
blk_size, 1, 1,
sh_mem_size, 0, args, 0);
//float krn_time_shmem_32b = finalizeEvents(start, stop);
int *output_h = (int*)malloc(sizeof(int)*32);
cudaMemcpy(output_h, output, sizeof(int)*32, cudaMemcpyDeviceToHost);
/*for(int i=0; i<32; i++){
printf("%d ", output_h[i]);
}printf("\n");*/
printf("%s took %d clocks \n", name, output_h[0]);
double clocks_instr = (float)output_h[0]/(128.0*128.0); // wokload of a thread
printf("Each instruction takes %.2f clocks.\n", clocks_instr);
printf("Throughput %.2f bytes/cycle.\n\n", ((double)threads*128*128*type_size*4)/output_h[0]); // Size of information stores divided by the number of threads of the latest thread
cudaFree(output);
free(output_h);
}
int main(){
float scal = 4;
run("sts32", scal, 1, 32);
printf("\n");
float2 scal2;
scal2.x = 4; scal2.y = 4;
run("sts64", scal2, 2, 32);
printf("\n");
float4 scal4;
scal4.x = 4; scal4.y = 4;
scal4.z = 4; scal4.w = 4;
// No thread-divergence
run("sts128_0", scal4, 4, 32);
printf("\n");
// Only half of the threads store data
run("sts128", scal4, 4, 16);
/* run2_aux("sts64_2bank_conflict");
printf("\n");
run2_aux("sts64_broadcast");
printf("\n");
run2_aux("sts64_opt3");
printf("\n"); */
return 0;
}
|
2,259 | #include "assignments.cuh"
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
//The two different definitions seem to have no noticable performance differnce.
//abs(a) has a tiiiny performance drop vs the other, but it's not worth being so hw-implementation specific
//for it.
//#define FAST_32_BIT_ABS(a)((((a)>>31)^a)-((a)>>31))
#define FAST_32_BIT_ABS(a) abs(a)
//Exploits the fact LIT_TRUE is 01 and LIT_FALSE is 10.
//
//Right side: ((l & 1) ^ (lit < 0))
//The LSB of LIT_TRUE and LIT_FALSE are true and false respectively.
//lit<0 is 1 if lit is negated, 0 otherwise, l XOR'd with (lit<0) will yield
//0^1 when not negated and true, and 1^0 when negated and false.
//
//Left side: ((l>>1) ^ (l & 1))
//To capture the other cases (LIT_ERROR=11, LIT_UNSET=00) and return 0 in both,
//we get the two bits of error and unset as separate bits, and XOR them together
//resulting in 1 only if LIT_TRUE or LIT_FALSE.
//
//Note: On compute capability 7.X to 8.6 (currently), we get 64 "results per clock cycle per multiprocessor" on
//AND OR and XOR: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#arithmetic-instructions__throughput-native-arithmetic-instructions
__device__ __host__ int is_lit_sated(const assignment_t* const a, const int32_t lit) {
int l = get_lit(a, lit);
return ((l >> 1) ^ (l & 1)) & ((l & 1) ^ (lit < 0));
}
__device__ __host__ int is_lit_sated_complete(const assignment_t* const a, const int32_t lit, const uint64_t altBWAssignment) {
int l = get_lit(a, lit);
//TODO: lit<0 appears to be branching (according to visual profiler on the 1080's) so
//instead, try (i>>31)&0x1 to get the top bit. If it's 1 it's negative, otherwise positive. Relies on how
//the numbers are repsesented!
return ((l == LIT_UNSET) & (((lit < 0) & (~((altBWAssignment >> ((-lit) - 1)) & 1))) | ((lit > 0) & (((altBWAssignment >> (lit - 1)) & 1))))) | ((l != LIT_UNSET) & ((l >> 1) ^ (l & 1)) & ((l & 1) ^ (lit < 0)));
}
__device__ __host__ int clear_assignment(assignment_t* a, uint32_t litCount) {
if (!a)
return 0;
//Abuse the fact LIT_UNSET is 0 and zero out memory. If LIT_UNSET changes, then:
//uint32_t mask = LIT_UNSET | (LIT_UNSET<<2) | (LIT_UNSET<<4) | (LIT_UNSET<<6) | (LIT_UNSET<<8) | (LIT_UNSET<<10) | (LIT_UNSET<<10) | ...
memset(a, 0, sizeof(assignment_t)* ASSIGNMENT_COUNT(litCount));
return 1;
}
__device__ __host__ litval_t get_lit(const assignment_t* a, int32_t lit) {
lit = FAST_32_BIT_ABS(lit);
const uint32_t word = ((lit - 1) / LITS_PER_WORD);
const uint32_t indx = (lit - 1) % LITS_PER_WORD;
return (litval_t)((a[word] >> (indx * 2)) & 3);
}
__device__ __host__ void set_lit(assignment_t* a, int32_t lit, litval_t value) {
lit = FAST_32_BIT_ABS(lit);
const uint32_t word = ((lit - 1) / LITS_PER_WORD);
const uint32_t shifts = 2 * ((lit - 1) % LITS_PER_WORD);
a[word] = (a[word] & ~(3 << shifts)) | (value << shifts);
}
|
2,260 | extern "C"
{
__global__ void vsign(const int n, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (a[i]<0)
{b[i]=-1.0;}
else
{if (a[i]>0)
{b[i]=1.0;}
else
{b[i]=0.0;}
}
}
}
} |
2,261 | #include <iostream>
#include <fstream>
#include <math.h>
#include <cstdio>
#include <ctime>
#include <assert.h> /* assert */
using namespace std;
//0 3 6
//1 4 7
//2 5 8
// row idx: i
// col idx: j
__global__
void _mask_conv(float diag_coef_1, float diag_coef_2, float side_coef_1, float side_coef_2, int N, float *input_tensor, float *mask_tensor, float *output_tensor)
{
float diag_coef_diff = diag_coef_1 - diag_coef_2;
float side_coef_diff = side_coef_1 - side_coef_2;
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int stride_x = blockDim.x * gridDim.x;
int stride_y = blockDim.y * gridDim.y;
for (int i = index_x+1; i < N-1; i += stride_x)
{
for (int j = index_y+1; j < N-1; j += stride_y)
{
/*
// this one is 10% faster, but has some hidden bug...
int output_index = N*(i-1) + (j-1);
int input_index_0 = N*(i-1) + j-1;
int input_index_1 = N*(i) + j-1;
int input_index_2 = N*(i+1) + j-1;
int input_index_3 = N*(i-1) + j;
int input_index_5 = N*(i+1) + j;
int input_index_6 = N*(i-1) + j+1;
int input_index_7 = N*(i) + j+1;
int input_index_8 = N*(i+1) + j+1;
int mask_index_0 = (N-1)*(i-1) + j-1;
int mask_index_1 = (N-1)*(i) + j-1;
int mask_index_3 = (N-1)*(i-1) + j;
int mask_index_4 = (N-1)*(i) + j;
float mask_conv_diag = input_tensor[input_index_0] * mask_tensor[mask_index_0]
+ input_tensor[input_index_6] * mask_tensor[mask_index_3]
+ input_tensor[input_index_2] * mask_tensor[mask_index_1]
+ input_tensor[input_index_8] * mask_tensor[mask_index_4];
float mask_conv_side = input_tensor[input_index_3] * (mask_tensor[mask_index_0]+mask_tensor[mask_index_3])
+ input_tensor[input_index_1] * (mask_tensor[mask_index_0]+mask_tensor[mask_index_1])
+ input_tensor[input_index_7] * (mask_tensor[mask_index_3]+mask_tensor[mask_index_4])
+ input_tensor[input_index_5] * (mask_tensor[mask_index_1]+mask_tensor[mask_index_4]);
float sum_resp_diag = input_tensor[input_index_0] + input_tensor[input_index_6] + input_tensor[input_index_2] + input_tensor[input_index_8];
float sum_resp_side = input_tensor[input_index_3] + input_tensor[input_index_1] + input_tensor[input_index_7] + input_tensor[input_index_5];
output_tensor[output_index] = mask_conv_diag *diag_coef_diff
+ mask_conv_side *side_coef_diff/2
+ sum_resp_diag * diag_coef_2
+ sum_resp_side * diag_coef_2;
*/
output_tensor[(i-1) *N+ j-1] = input_tensor[(i-1)*N+ j-1] * mask_tensor[(i-1) *N+ j-1] *diag_coef_1
+ input_tensor[(i-1) *N+ j+1] * mask_tensor[(i-1) *N+ j] *diag_coef_1
+ input_tensor[(i+1) *N+ j-1] * mask_tensor[(i) *N+ j-1] *diag_coef_1
+ input_tensor[(i+1) *N+ j+1] * mask_tensor[(i) *N+ j] *diag_coef_1
+ input_tensor[(i-1) *N+ j] * (mask_tensor[(i-1) *N+ j-1] + mask_tensor[(i-1) *N+ j]) / 2. *side_coef_1
+ input_tensor[(i) *N+ j-1] * (mask_tensor[(i-1) *N+ j-1] + mask_tensor[(i) *N+ j-1]) / 2. *side_coef_1
+ input_tensor[(i) *N+ j + 1] * (mask_tensor[(i-1) *N+ j] + mask_tensor[(i) *N+ j]) / 2. *side_coef_1
+ input_tensor[(i+1), j] * (mask_tensor[(i) *N+ j-1] + mask_tensor[(i) *N+ j]) / 2. *side_coef_1
+ input_tensor[(i-1) *N+ j-1] * (1-mask_tensor[(i-1) *N+ j-1]) *diag_coef_2
+ input_tensor[(i-1) *N+ j+1] * (1-mask_tensor[(i-1) *N+ j])*diag_coef_2
+ input_tensor[(i+1) *N+ j-1] * (1-mask_tensor[(i) *N+ j-1]) *diag_coef_2
+ input_tensor[(i+1) *N+ j+1] * (1-mask_tensor[(i) *N+ j]) *diag_coef_2
+ input_tensor[(i-1) *N+ j] * (2-mask_tensor[(i-1) *N+ j-1] - mask_tensor[(i-1) *N+ j]) / 2. *side_coef_2
+ input_tensor[(i) *N+ j-1] * (2-mask_tensor[(i-1) *N+ j-1] - mask_tensor[(i) *N+ j-1]) / 2. *side_coef_2
+ input_tensor[(i) *N+ j + 1] * (2-mask_tensor[(i-1) *N+ j] - mask_tensor[(i) *N+ j]) / 2. *side_coef_2
+ input_tensor[(i+1) *N+ j] * (2-mask_tensor[(i) *N+ j-1] - mask_tensor[(i) *N+ j]) / 2. *side_coef_2;
}
}
}
void mask_conv_gpu(int mat_row_num, float *input_tensor, float *weights_tensor, float *output_tensor) {
float diag_coef_1 = 16;
float side_coef_1 = 16;
float diag_coef_2 = 1;
float side_coef_2 = 1;
dim3 blocksize( 32, 32 );
int bx = (mat_row_num+blocksize.x-1)/blocksize.x ;
int by = (mat_row_num+blocksize.y-1)/blocksize.y ;
dim3 gridsize(bx, by);
_mask_conv<<<gridsize,blocksize>>>(diag_coef_1, diag_coef_2, side_coef_1, side_coef_2, mat_row_num, input_tensor, weights_tensor, output_tensor);
cudaDeviceSynchronize();
}
void mask_conv_cpu(int N, float *input_tensor, float *weights_tensor, float * output_tensor)
{
float diag_coef_1 = 16;
float side_coef_1 = 16;
float diag_coef_2 = 1;
float side_coef_2 = 1;
float diag_coef_diff = diag_coef_1 - diag_coef_2;
float side_coef_diff = side_coef_1 - side_coef_2;
for (int i = 1; i < N-1; i++) {
for (int j = 1; j < N-1; j++) {
output_tensor[(i-1) *N+ j-1] = input_tensor[(i-1)*N+ j-1] * weights_tensor[(i-1) *N+ j-1] *diag_coef_1
+ input_tensor[(i-1) *N+ j+1] * weights_tensor[(i-1) *N+ j] *diag_coef_1
+ input_tensor[(i+1) *N+ j-1] * weights_tensor[(i) *N+ j-1] *diag_coef_1
+ input_tensor[(i+1) *N+ j+1] * weights_tensor[(i) *N+ j] *diag_coef_1
+ input_tensor[(i-1) *N+ j] * (weights_tensor[(i-1) *N+ j-1] + weights_tensor[(i-1) *N+ j]) / 2. *side_coef_1
+ input_tensor[(i) *N+ j-1] * (weights_tensor[(i-1) *N+ j-1] + weights_tensor[(i) *N+ j-1]) / 2. *side_coef_1
+ input_tensor[(i) *N+ j + 1] * (weights_tensor[(i-1) *N+ j] + weights_tensor[(i) *N+ j]) / 2. *side_coef_1
+ input_tensor[(i+1), j] * (weights_tensor[(i) *N+ j-1] + weights_tensor[(i) *N+ j]) / 2. *side_coef_1
+ input_tensor[(i-1) *N+ j-1] * (1-weights_tensor[(i-1) *N+ j-1]) *diag_coef_2
+ input_tensor[(i-1) *N+ j+1] * (1-weights_tensor[(i-1) *N+ j])*diag_coef_2
+ input_tensor[(i+1) *N+ j-1] * (1-weights_tensor[(i) *N+ j-1]) *diag_coef_2
+ input_tensor[(i+1) *N+ j+1] * (1-weights_tensor[(i) *N+ j]) *diag_coef_2
+ input_tensor[(i-1) *N+ j] * (2-weights_tensor[(i-1) *N+ j-1] - weights_tensor[(i-1) *N+ j]) / 2. *side_coef_2
+ input_tensor[(i) *N+ j-1] * (2-weights_tensor[(i-1) *N+ j-1] - weights_tensor[(i) *N+ j-1]) / 2. *side_coef_2
+ input_tensor[(i) *N+ j + 1] * (2-weights_tensor[(i-1) *N+ j] - weights_tensor[(i) *N+ j]) / 2. *side_coef_2
+ input_tensor[(i+1) *N+ j] * (2-weights_tensor[(i) *N+ j-1] - weights_tensor[(i) *N+ j]) / 2. *side_coef_2;
}
}
}
int main(void)
{
int N = 2048;
float *resp, *mask, *load_gpu;
float * load_cpu = (float*) malloc(N*N * sizeof(float));
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&resp, N*N*sizeof(float));
cudaMallocManaged(&mask, (N-1)*(N-1)*sizeof(float));
cudaMallocManaged(&load_gpu, N*N*sizeof(float));
// assume they are row first stored
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
resp[i*N+j] = 1.0;
if (i<N-1 & j<N-1){
if (i<N/2)
mask[i*N+j] = 1.0;
else
mask[i*N+j] = 0.0;
}
}
}
// Run kernel on CPU
std::clock_t start = std::clock();
mask_conv_cpu(N, resp, mask, load_cpu);
double duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<<"CPU time cost: "<<duration<<std::endl;
// Run kernel on GPU
start = std::clock();
mask_conv_gpu(N, resp, mask, load_gpu);
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<<"GPU time cost: "<<duration<<std::endl;
/*
// check results
ofstream myfile_cpu, myfile_gpu;
myfile_cpu.open ("cpu.txt");
myfile_gpu.open ("gpu.txt");
for (int i = 0; i < N-2; i++){
for (int j=0; j< N-2; j++){
myfile_cpu <<load_cpu[N*i+j]<<" ";
myfile_gpu <<load_gpu[N*i+j]<<" ";
}
myfile_cpu << std::endl;
myfile_gpu << std::endl;
}
myfile_cpu.close();
myfile_gpu.close();
*/
// Free memory
cudaFree(resp);
cudaFree(mask);
cudaFree(load_cpu);
cudaFree(load_gpu);
return 0;
}
//nvcc -arch=sm_61 -O3 mask_conv.cu -o mask_conv
/*
cpu gpu
256 0.001611 2.9e-05
1028 0.037622 4.2e-05
*/
//nvcc mask_conv.cu -o mask_conv.cu.o
|
2,262 | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d,
vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop,
cudaStream_t stream[])
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaMalloc(&buffer_s[i], size);
cudaMalloc(&buffer_d[i], size);
cudaEventCreate(&start[i]);
cudaEventCreate(&stop[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceEnablePeerAccess(g[j], 0);
cudaStreamCreate(&stream[i*g.size()+j]);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceEnablePeerAccess(g[i], 0);
cudaStreamCreate(&stream[i*g.size()+j]);
cudaDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d,
vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop,
cudaStream_t stream[])
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaFree(buffer_s[i]);
cudaFree(buffer_d[i]);
cudaEventDestroy(start[i]);
cudaEventDestroy(stop[i]);
cudaStreamDestroy(stream[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceDisablePeerAccess(g[j]);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceDisablePeerAccess(g[i]);
cudaDeviceSynchronize();
}
}
}
}
}
void burst_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d,
vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop,
cudaStream_t stream[])
{
float bw[g.size()], time_taken[g.size()];
printf("\nBurst copy: Every GPU is memcpy-ing to every other GPU\n");
printf("%4d%8d%12s\n%4s%8s%12s\n%4s%8s%12s\n",
1, 2,"n","^","^", "^","|","|","|");
printf("3<-0->2 4<-1->3 ... %s<-%s->%s\n","q", "m", "p");
printf("%4s%8s%12s\n%4s%8s%12s\n%4d%8d%12s\n\n",
"|","|","|","v","v", "v",4,0,"r");
configure(size, buffer_s, buffer_d, start, stop, stream);
for (int i=0; i<g.size(); i++)
{
cudaEventRecord(start[i]);
for (int j=0; j<g.size(); j++)
if (i!=j)
cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size,
stream[i*g.size()+j]);
cudaEventRecord(stop[i]);
}
for (int i=0; i<g.size(); i++)
{
cudaEventSynchronize(stop[i]);
float time_ms;
cudaEventElapsedTime(&time_ms,start[i],stop[i]);
time_taken[i] = time_ms*1e3;
bw[i] = (float)size*1000/time_ms/(1<<30);
}
printf("\t\tTime(ms)\tBandwidth(Gbps)\n");
for (int i=0; i<g.size(); i++)
printf("GPU%d\t\t%6.2f\t\t%6.2f\n",g[i], time_taken[i], bw[i]);
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
vector<cudaEvent_t> start(g.size());
vector<cudaEvent_t> stop(g.size());
cudaStream_t stream[g.size() * g.size()];
configure(size, buffer_s, buffer_d, start, stop, stream);
// Cyclic
burst_copy(size, buffer_s, buffer_d, start, stop, stream);
reset(size, buffer_s, buffer_d, start, stop, stream);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
cudaGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
}
|
2,263 | /* 2013
* Maciej Szeptuch
* II UWr
* ----------
* bez shared, pozbywanie sie jak najwiecej pamieci + loop unrolling
* czasy okolo 10x szybciej niz na CPU.
word | gpu | cpu | distance
------------------|--------------------------------|--------------------------------|----------
kot | kot [ 13.373088] | kot [ 120.616997] | 0
czesc | czescy [ 17.563328] | czescy [ 182.584000] | 1
onomatopeja | onomatopeja [ 31.341473] | onomatopeja [ 367.598022] | 0
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <errno.h>
#define WORD_MAXLEN 16
#define STEP_0_THREADS 128
#define STEP_R_THREADS 128
#define __CUDA__
#define __CPU__
__device__ __host__ inline unsigned char MIN(const unsigned char a, const unsigned char b) { return a<b?a:b; }
__host__ unsigned char LevenshteinDistanceH(const unsigned char *const A, const unsigned char *const B);
__device__ unsigned char LevenshteinDistanceD(const unsigned char *const A, const unsigned char *const B);
unsigned char *loadDictionary(const char *const file, unsigned int &words, unsigned int &size);
void printHead(void);
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern, unsigned int *result);
__global__ void LevenshteinCUDA_STEP_R(const unsigned int *from, unsigned int *to, const unsigned int words);
#endif // __CUDA__
#ifdef __CPU__
unsigned int LevenshteinCPU(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern);
#endif // __CPU__
int main(const int argc, const char *const* argv)
{
if(argc < 3)
{
fprintf(stderr, "usage: %s dictionary words...\nError: not enough arguments\n", argv[0]);
return 1;
}
unsigned int dictionarySize = 0,
dictionaryWords = 0;
unsigned char *dictionary = loadDictionary(argv[1], dictionaryWords, dictionarySize);
if(!dictionary)
{
fprintf(stderr, "usage: %s dictionary words...\nError: loading dictionary: %s\n", argv[0], strerror(errno));
return 2;
}
#ifdef __CUDA__
// GPU INIT
unsigned char *cudaDictionary = NULL,
*cudaPattern = NULL;
unsigned int *cudaResult = NULL;
cudaMalloc(&cudaDictionary, dictionarySize * sizeof(unsigned char));
cudaMemcpy(cudaDictionary, dictionary, dictionarySize * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMalloc(&cudaPattern, WORD_MAXLEN * sizeof(unsigned char));
cudaMalloc(&cudaResult, dictionarySize * 2 * sizeof(unsigned int));
#endif // __CUDA__
printHead();
for(unsigned int a = 2; a < argc; ++ a)
{
unsigned int result[2] = {1 << 30, 1 << 30};
unsigned char pattern[WORD_MAXLEN + 2] = {};
memcpy(pattern, argv[a], strlen(argv[a]) * sizeof(unsigned char));
printf(" %-16s | ", pattern);
#ifdef __CUDA__
{
// GPU TEST
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, NULL);
cudaMemcpy(cudaPattern, pattern, WORD_MAXLEN * sizeof(unsigned char), cudaMemcpyHostToDevice);
LevenshteinCUDA_STEP_0<<<(dictionaryWords + STEP_0_THREADS - 1) / STEP_0_THREADS, STEP_0_THREADS>>> (cudaDictionary, dictionaryWords, cudaPattern, cudaResult);
for(unsigned int size = STEP_R_THREADS; size < dictionaryWords; size <<= 1)
LevenshteinCUDA_STEP_R<<<(dictionaryWords + size - 1) / size, STEP_R_THREADS>>> (cudaResult, cudaResult, dictionaryWords);
cudaMemcpy(result, cudaResult, 2 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaEventRecord(end, NULL);
cudaEventSynchronize(end);
float gputotal = 0;
cudaEventElapsedTime(&gputotal, start, end);
printf("%-16s [%11.6f] | ", &dictionary[result[0] * WORD_MAXLEN], gputotal, result[1]);
}
#endif // __CUDA__
#ifdef __CPU__
{
// CPU TEST
timeval start, end;
gettimeofday(&start, NULL);
result[0] = LevenshteinCPU(dictionary, dictionaryWords, pattern);
gettimeofday(&end, NULL);
float cputotal = (end.tv_sec - start.tv_sec) * 1000.0f + (end.tv_usec - start.tv_usec) / 1000.0f;
printf("%-16s [%11.6f] | ", dictionary + result[0] * WORD_MAXLEN, cputotal);
}
#endif // __CPU__
printf("%u\n", LevenshteinDistanceH(pattern, dictionary + result[0] * WORD_MAXLEN));
}
#ifdef __CUDA__
cudaFree(cudaDictionary);
#endif // __CUDA__
free(dictionary);
return 0;
}
unsigned char *loadDictionary(const char *const file, unsigned int &words, unsigned int &size)
{
FILE *handle = fopen(file, "rb");
if(!handle)
return NULL;
unsigned char *dictionary = NULL,
*current = NULL;
char buffer[64] = {};
words = 0;
while(fgets(buffer, 64, handle))
++ words;
fseek(handle, 0, SEEK_SET);
size = words * WORD_MAXLEN;
current = dictionary = new unsigned char[size];
memset(dictionary, 0, size * sizeof(unsigned char));
while(fgets((char *) current, WORD_MAXLEN + 8, handle))
{
current[strlen((const char *) current) - 1] = 0; // remove \n
current[strlen((const char *) current) - 1] = 0; // remove \r
current += WORD_MAXLEN;
}
fclose(handle);
return dictionary;
}
#ifdef __CPU__
unsigned int LevenshteinCPU(const unsigned char *const dictionary, const unsigned int words, const unsigned char *const pattern)
{
const unsigned char *word = dictionary;
unsigned int best = 1 << 30,
r = 0;
for(unsigned int w = 0; w < words; ++ w, word += WORD_MAXLEN)
{
unsigned int dist = LevenshteinDistanceH(pattern, word);
if(dist < best)
{
best = dist;
r = w;
}
}
return r;
}
#endif // __CPU__
__host__ unsigned char LevenshteinDistanceH(const unsigned char *const A, const unsigned char *const B)
{
unsigned char sb = strlen((const char *) B);
unsigned char *AA = (unsigned char *) A,
*BB = (unsigned char *) B;
unsigned char temp[2][WORD_MAXLEN + 1];
unsigned char t = 1;
for(unsigned char a = 0; a <= sb; ++ a)
temp[0][a] = a;
AA = (unsigned char *) A;
for(unsigned char a = 1; *AA > 0; ++ a, t ^= 1, ++ AA)
{
temp[t][0] = a;
BB = (unsigned char *) B;
for(unsigned char b = 1; b <= sb; ++ b, ++ BB)
temp[t][b] = MIN(temp[t ^ 1][ b ] + 1,
MIN(temp[ t ][b - 1] + 1,
temp[t ^ 1][b - 1] + (*AA != *BB)));
}
return temp[t ^ 1][sb];
}
__device__ unsigned char LevenshteinDistanceD(const unsigned char *const A, const unsigned char *const B)
{
unsigned char sb = 0;
unsigned char *AA = (unsigned char *) A,
*BB = (unsigned char *) B;
while(*BB ++ > 0)
++ sb;
unsigned char temp[2][WORD_MAXLEN + 1];
unsigned char t = 1;
#pragma unroll
for(unsigned char a = 0; a <= WORD_MAXLEN; ++ a)
temp[0][a] = a;
for(unsigned char a = 1; *AA > 0; ++ a, t ^= 1, ++ AA)
{
temp[t][0] = a;
BB = (unsigned char *) B;
#pragma unroll
for(unsigned char b = 1; b <= WORD_MAXLEN; ++ b, ++ BB)
temp[t][b] = MIN(temp[t ^ 1][ b ] + 1,
MIN(temp[ t ][b - 1] + 1,
temp[t ^ 1][b - 1] + (*AA != *BB)));
}
return temp[t ^ 1][sb];
}
void printHead(void)
{
printf(" word | ");
#ifdef __CUDA__
printf(" gpu | ");
#endif // __CUDA__
#ifdef __CPU__
printf(" cpu | ");
#endif // __CPU__
printf("distance\n");
printf("------------------|-");
#ifdef __CUDA__
printf("-------------------------------|-");
#endif // __CUDA__
#ifdef __CPU__
printf("-------------------------------|-");
#endif // __CPU__
printf("---------\n");
}
#ifdef __CUDA__
__global__ void LevenshteinCUDA_STEP_0(const unsigned char *dictionary, const unsigned int words, const unsigned char *pattern, unsigned int *result)
{
int word = blockIdx.x * STEP_0_THREADS + threadIdx.x;
if(word >= words)
return;
result[word * 2] = word;
result[word * 2 + 1] = LevenshteinDistanceD(pattern, dictionary + word * WORD_MAXLEN);
}
__global__ void LevenshteinCUDA_STEP_R(const unsigned int *from, unsigned int *to, const unsigned int words)
{
__shared__ unsigned int local_data[STEP_R_THREADS * 2];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
local_data[tid * 2] = from[i * 2];
local_data[tid * 2 + 1] = from[i * 2 + 1];
__syncthreads();
for(unsigned int s = 1; s < blockDim.x && tid + s < words; s <<= 1)
{
if(tid % (2 * s) == 0 && local_data[tid * 2 + 1] > local_data[(tid + s) * 2 + 1])
{
local_data[tid * 2] = local_data[(tid + s) * 2];
local_data[tid * 2 + 1] = local_data[(tid + s) * 2 + 1];
}
__syncthreads();
}
if(tid == 0)
{
to[blockIdx.x * 2] = local_data[0];
to[blockIdx.x * 2 + 1] = local_data[1];
}
}
#endif // __CUDA__
|
2,264 | /* Write GPU code to perform the step(s) involved in counting sort.
Add additional kernels and device functions as needed. */
__global__ void counting_sort_kernel(int *histogram, int *input_array, int length)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (length <= tx) {
return;
}
atomicAdd(&histogram[input_array[tx]], 1);
}
__global__ void bins_to_inclusive(int *histogram, int length) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx == 0) {
for (int i = 0; i < length; i++) {
histogram[i] = histogram[i - 1] + histogram[i];
}
}
}
__global__ void inclusive_to_sorted(int *histogram, int *sorted_array) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int start_idx = 0;
if (tx != 0) {
start_idx = histogram[tx - 1];
}
for (int j = start_idx; j < histogram[tx]; j++) {
atomicAdd(&sorted_array[j],tx);
}
return;
}
|
2,265 | /**
* @author Alejandro Brugarolas
* @since 2019-12
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define N 1024
__global__ void arrayReduction(float *d_array){
int idx = threadIdx.x;
int idx2 = 0;
for (int i = blockDim.x; i >= 1 ; i /=2) {
if (idx < i){
idx2 = idx + i;
d_array[idx] += d_array[idx2];
}
__syncthreads();
}
}
int main() {
float *h_array;
float *d_array;
int memSize = sizeof(float) * N;
h_array = (float*) malloc(memSize);
cudaError_t error;
error = cudaMalloc((void**)&d_array, memSize);
if (error != cudaSuccess){
fprintf(stderr, "Error al reservar memoria");
return -1;
}
//Fills the array
for (int i = 0; i < N; ++i) {
h_array[i] = 1.0f;
}
//Transfers
error = cudaMemcpy(d_array, h_array, memSize, cudaMemcpyHostToDevice);
if (error != cudaSuccess){
fprintf(stderr, "Error al transferir información.");
}
dim3 block (N / (N/2));
dim3 thread (N/2);
arrayReduction<<<block, thread>>>(d_array);
cudaMemcpy(h_array, d_array, sizeof(float), cudaMemcpyDeviceToHost);
printf("El resultado es: %f\n", h_array[0]);
cudaFree(d_array);
} |
2,266 | extern "C"
{
__global__ void gscale(const int lengthB, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthB)
{
b[i] = a[0]*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
}
} |
2,267 | #include <cuda.h>
#include <stdio.h>
#define NUM 4
__global__
void clzKernel(int *uA, int *uB) {
unsigned tid = threadIdx.x;
uB[tid] = __clz(uA[tid]);
}
__global__
void ffsKernel(int *uA, int *uB) {
unsigned tid = threadIdx.x;
uB[tid] = __ffs(uA[tid]);
}
__global__
void popcKernel(unsigned *uA, unsigned *uB) {
unsigned tid = threadIdx.x;
uB[tid] = __popc(uA[tid]);
}
__global__
void brevKernel(unsigned *uA, unsigned *uB) {
unsigned tid = threadIdx.x;
uB[tid] = __brev(uA[tid]);
}
__global__
void bytePermKernel(unsigned *uA, unsigned *uB,
unsigned *uC, unsigned *uD) {
unsigned tid = threadIdx.x;
uD[tid] = __byte_perm(uA[tid], uB[tid], uC[tid]);
}
int main(int argv, char **argc) {
int hA[NUM] = {1, 2, 3, 4};
int hB[NUM] = {1, 2, 3, 4};
int *dA, *dB;
cudaMalloc((void**)&dA, sizeof(int)*NUM);
cudaMalloc((void**)&dB, sizeof(int)*NUM);
cudaMemcpy(dA, hA, sizeof(int)*NUM, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, sizeof(int)*NUM, cudaMemcpyHostToDevice);
clzKernel<<<1, NUM>>>(dA, dB);
cudaMemcpy(hB, dB, sizeof(int)*NUM, cudaMemcpyDeviceToHost);
printf("After clz intrinsic: \n");
for (unsigned i = 0; i < NUM; i++) {
printf("hB[%u]: %d\n", i, hB[i]);
}
ffsKernel<<<1, NUM>>>(dA, dB);
cudaMemcpy(hB, dB, sizeof(int)*NUM, cudaMemcpyDeviceToHost);
printf("After ffs intrinsic: \n");
for (unsigned i = 0; i < NUM; i++) {
printf("hB[%u]: %d\n", i, hB[i]);
}
unsigned huA[NUM] = {1, 2, 3, 4};
unsigned huB[NUM] = {1, 2, 3, 4};
unsigned huC[NUM] = {1, 2, 3, 4};
unsigned huD[NUM] = {1, 2, 3, 4};
unsigned *duA, *duB, *duC, *duD;
cudaMalloc((void**)&duA, sizeof(unsigned)*NUM);
cudaMalloc((void**)&duB, sizeof(unsigned)*NUM);
cudaMalloc((void**)&duC, sizeof(unsigned)*NUM);
cudaMalloc((void**)&duD, sizeof(unsigned)*NUM);
cudaMemcpy(duA, huA, sizeof(unsigned)*NUM, cudaMemcpyHostToDevice);
cudaMemcpy(duB, huB, sizeof(unsigned)*NUM, cudaMemcpyHostToDevice);
cudaMemcpy(duC, huC, sizeof(unsigned)*NUM, cudaMemcpyHostToDevice);
cudaMemcpy(duD, huD, sizeof(unsigned)*NUM, cudaMemcpyHostToDevice);
popcKernel<<<1, NUM>>>(duA, duB);
cudaMemcpy(huB, duB, sizeof(unsigned)*NUM, cudaMemcpyDeviceToHost);
printf("After popc intrinsic: \n");
for (unsigned i = 0; i < NUM; i++) {
printf("huB[%u]: %u\n", i, huB[i]);
}
brevKernel<<<1, NUM>>>(duA, duB);
cudaMemcpy(huB, duB, sizeof(unsigned)*NUM, cudaMemcpyDeviceToHost);
printf("After brev intrinsic: \n");
for (unsigned i = 0; i < NUM; i++) {
printf("huB[%u]: %u\n", i, huB[i]);
}
bytePermKernel<<<1, NUM>>>(duA, duB, duC, duD);
cudaMemcpy(huD, duD, sizeof(unsigned)*NUM, cudaMemcpyDeviceToHost);
printf("After bytePerm intrinsic: \n");
for (unsigned i = 0; i < NUM; i++) {
printf("huD[%u]: %u\n", i, huD[i]);
}
cudaFree(dA);
cudaFree(dB);
cudaFree(duA);
cudaFree(duB);
cudaFree(duC);
cudaFree(duD);
}
|
2,268 | /*
============================================================================
Filename : implementation.cu
Author : Romain Jufer
SCIPER : 229801
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
const int NB_THREADS = 32;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
__global__ void GPU_array_rowKernel(double *input, double *output, int length) {
int xCuda = blockDim.x * blockIdx.x + threadIdx.x;
int yCuda = blockDim.y * blockIdx.y + threadIdx.y;
int idx = yCuda * length + xCuda;
if(xCuda >= length || yCuda >= length)
return;
if(xCuda == 0 || xCuda == length - 1) {
output[idx] = 0;
return;
}
output[idx] = input[idx];
output[idx] += xCuda == 0 ? 0 : input[idx - 1];
output[idx] += xCuda == length - 1 ? 0 : input[idx + 1];
}
__global__ void GPU_array_colKernel(double *input, double *output, int length) {
int xCuda = blockDim.x * blockIdx.x + threadIdx.x;
int yCuda = blockDim.y * blockIdx.y + threadIdx.y;
int idx = yCuda * length + xCuda;
if(xCuda >= length || yCuda >= length)
return;
if(yCuda == 0 || yCuda == length - 1) {
output[idx] = 0;
return;
}
output[idx] = input[idx];
output[idx] += yCuda == 0 ? 0 : input[idx - length];
output[idx] += yCuda == length - 1 ? 0 : input[idx + length];
output[idx] /= 9;
if((yCuda == length / 2 || yCuda == length / 2 - 1)
&& (xCuda == length / 2 - 1 || xCuda == length / 2)) {
output[idx] = 1000;
}
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
cudaSetDevice(0);
double* input_d = NULL;
double* output_d = NULL;
size_t dataSize = length * length * sizeof(double);
int nbThreadsPerDim = NB_THREADS;
dim3 threadPerBlocks(nbThreadsPerDim, nbThreadsPerDim);
int nbBlockPerDim = length / nbThreadsPerDim + 1;
dim3 blocks(nbBlockPerDim,nbBlockPerDim);
double* tmpSwap = input_d;
if(cudaMalloc((void**) &input_d, dataSize) != cudaSuccess) {
cout << "Cuda Malloc Error : cannot allocate memory for input\n";
}
if(cudaMalloc((void**) &output_d, dataSize) != cudaSuccess) {
cout << "Cuda Malloc Error : cannot allocate memory for output\n";
}
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
if(cudaMemcpy(input_d, input, dataSize, cudaMemcpyHostToDevice) != cudaSuccess)
cout << "Cuda Memcpy HostToDevice Error: cannot copy input\n";
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
cudaEventRecord(comp_start);
/* GPU calculation goes here */
for(int i = 0; i < iterations; ++i) {
GPU_array_rowKernel <<< blocks, threadPerBlocks >>> (input_d, output_d, length);
GPU_array_colKernel <<< blocks, threadPerBlocks >>> (output_d, input_d, length);
}
cudaThreadSynchronize();
tmpSwap = input_d;
input_d = output_d;
output_d = tmpSwap;
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
if(cudaMemcpy(output, output_d, dataSize, cudaMemcpyDeviceToHost) != cudaSuccess)
cout << "Cuda Memcpy DeviceToHost Error: cannot copy output\n";
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
cudaFree(input_d);
cudaFree(output_d);
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
|
2,269 |
/*
xor_pcases_ref.cu
Implementation of a XOR neural network in CUDA,
calculating output of many input cases in parallel.
(Refactored version.)
Andrei de A. Formiga, 2012-03-31
*/
#include <stdio.h>
// weights for the hidden layer
float weights_h[] = { 0.5f, -1.0f, -1.0f,
-1.5f, 1.0f, 1.0f };
float weights_h2[2][3] =
{
{ 0.5f, -1.0f, -1.0f },
{ -1.5f, 1.0f, 1.0f } };
// weights for the output layer
float weights_o[] = { 0.5f, -1.0f, -1.0f };
// weight arrays for the device
float *dev_hw;
float *dev_ow;
// device input
float *dev_in;
// device hidden outputs
float *dev_hidden;
// device output
float *dev_out;
// inputs
float inputs[] = { 0.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f };
const int ncases = 4;
const int input_size = 2;
const int hidden_size = 2;
// weights per neuron
const int ws_per_node[2] = { 3, 2 };
int *dev_wpn;
// index weight from node j to node i in layer l, using array ws of weights
#define W(ws, l, i, j) ( ws[i * dev_wpn[l] + j] )
// desired outputs
float outputs[] = { 0.0f, 1.0f, 1.0f, 0.0f };
// kernel for hidden layer (indexed as layer 0)
__global__ void calculate_hidden(float *dev_hw, int *dev_wpn, float *input, float *hidden)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int input_ix = blockIdx.x * blockDim.x;
int node = threadIdx.x;
float h;
// h = dev_hw[toff * 3] * 1.0f +
// dev_hw[toff * 3 + 1] * input[input_ix] +
// dev_hw[toff * 3 + 2] * input[input_ix+1];
h = W(dev_hw, 0, node, 0) * 1.0f +
W(dev_hw, 0, node, 1) * input[input_ix] +
W(dev_hw, 0, node, 2) * input[input_ix+1];
// threshold
if (h > 0.0f)
hidden[tid] = 1.0f;
else
hidden[tid] = 0.0;
}
// kernel for output layer
__global__ void calculate_output(float *dev_ow, float *hidden, float *output)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int hidden_ix = blockIdx.x * blockDim.x;
int toff = threadIdx.x;
float o;
o = dev_ow[toff] * 1.0f +
dev_ow[toff+1] * hidden[2*hidden_ix] +
dev_ow[toff+2] * hidden[2*hidden_ix+1];
// threshold
if (o > 0.0f)
output[tid] = 1.0f;
else
output[tid] = 0.0f;
}
int main(int argc, char **argv)
{
float out[ncases];
printf("### XOR test (forward propagation)\n");
cudaMalloc((void**) &dev_hw, 6 * sizeof(float));
cudaMalloc((void**) &dev_ow, 3 * sizeof(float));
cudaMalloc((void**) &dev_in, ncases * input_size * sizeof(float));
cudaMalloc((void**) &dev_hidden, ncases * hidden_size * sizeof(float));
cudaMalloc((void**) &dev_out, ncases * sizeof(float)); // output size = 1
cudaMalloc((void**) &dev_wpn, 2 * sizeof(int));
cudaMemcpy(dev_hw, weights_h, 6 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_ow, weights_o, 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_wpn, ws_per_node, 2 * sizeof(int), cudaMemcpyHostToDevice);
// try inputs
cudaMemcpy(dev_in, inputs, ncases * input_size * sizeof(float), cudaMemcpyHostToDevice);
calculate_hidden<<<4, 2>>>(dev_hw, dev_wpn, dev_in, dev_hidden);
calculate_output<<<4, 1>>>(dev_ow, dev_hidden, dev_out);
cudaMemcpy(out, dev_out, ncases * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < ncases; ++i)
printf("Input: %2.1f %2.1f -- Output: %f\n", inputs[input_size*i],
inputs[input_size*i+1], out[i]);
cudaFree(dev_hw);
cudaFree(dev_ow);
cudaFree(dev_in);
cudaFree(dev_hidden);
cudaFree(dev_out);
return 0;
}
|
2,270 | #include <iostream>
#include <complex>
#include <math.h>
#include <thrust/complex.h>
#include <sys/time.h>
#include <cassert>
#include <cufft.h>
using namespace std;
int main(){
int n;cin>>n;
cufftComplex *data_host = (cufftComplex*) malloc (sizeof (cufftComplex)* n);
cufftComplex *data_back = (cufftComplex*) malloc (sizeof (cufftComplex)* n);
for(int i=0; i<n; i++){
cin>>data_host[i].x;
cin>>data_host[i].y;
}
cufftHandle plan;
cufftComplex *data1;
cudaMalloc ((void **) &data1, sizeof(cufftComplex)*n);
cudaMemcpy(data1, data_host, n*sizeof(cufftComplex), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int batch=1;
cufftPlan1d(&plan, n, CUFFT_C2C, batch);
cufftExecC2C(plan, data1, data1, CUFFT_FORWARD);
cudaEventRecord(stop);
cudaMemcpy(data_back, data1, n*sizeof(cufftComplex), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<milliseconds;
cufftDestroy(plan);
// for(int i=0; i<n; i++){
// cout<<"("<<data_back[i].x<<","<<data_back[i].y<<")"<<endl;
// }
}
|
2,271 | #ifdef _GLIBCXX_USE_INT128
#undef _GLIBCXX_USE_INT128
#endif
#ifdef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_ATOMIC_BUILTINS
#endif
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <cstdlib>
int main(void)
{
// generate random data on the host
thrust::host_vector<int> h_vec(20);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
// transfer to device
thrust::device_vector<int> d_vec = h_vec;
// sort on device
thrust::sort(d_vec.begin(), d_vec.end());
std::cout << "TEST PASSED\n";
return 0;
}
|
2,272 | __device__ __host__ inline double cal_mean(const double *observations, int n_observations){
double mean = 0;
for(int o = 0; o < n_observations; o++){
mean += observations[o];
}
mean /= double(n_observations);
return mean;
}
__device__ __host__ inline double cal_variance(const double *observations, int n_observations){
double mean = cal_mean(observations, n_observations);
double variance = 0;
for(int o = 0; o < n_observations; o++){
variance += (observations[o] - mean) * (observations[o] - mean);
}
variance /= double(n_observations - 1);
return variance;
}
__device__ __host__ inline double cal_covariance(const double *observations1, const double *observations2,
int n_observations){
double mean_1 = cal_mean(observations1, n_observations);
double mean_2 = cal_mean(observations2, n_observations);
double covariance = 0;
for(int o = 0; o < n_observations; o++){
covariance += (observations1[o] - mean_1) * (observations2[o] - mean_2);
}
covariance /= double(n_observations - 1);
return covariance;
}
|
2,273 | // Stolen from Seb
/*#include <cstdint>*/
#include <stdint.h>
#define WARP_SIZE 32
// macro function
#define min(a,b) (a > b ? b : a);
// -------------------------------------------------------------------
// helper functions
// -------------------------------------------------------------------
// Get largest memory address that is aligned to a warp worth of floats
// and smaller than x.
__forceinline__ __device__ uintptr_t getBlockBeginning(void const * x)
{
return (uintptr_t)(x) & (~((uintptr_t)(WARP_SIZE*sizeof(float)) - 1)) ;
}
__forceinline__ __device__ void blockReduce2(volatile float * mdata,
volatile float * sdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; }
}
}
// This kernel accumulates means and variances for the data.
// Each block of thread sums over one or more data planes, resulting
// in an array accumulator[] of dimension numBlocksPerChannel x 2*numChannels.
//
// If each thread block scans all the images, then numBlocksPerChannel = 1.
// However, for efficiency different thread blocks do different
// subset of images, resulting in numBlocksPerChannel partial results to be summed
// later by a second kernel.
//
// The first part accumulator[:,0:numChannels-1] stores the data for the mean
// and the second part accumulator[:,numChannels,2*numChannels-1] the data
// for the sigmas.
//
// This function uses the sliding-window summing technique described
// above. It requires
//
// 2*sizeof(float)*blockSize
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
__global__ void accumulate_moments_partial(float * accumulator,
float const * data,
int planeArea,
int numPlanes,
int numChannels,
int numBlocksPerChannel)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
extern __shared__ float s [] ;
/*SharedMemory<float> smem ;*/
/*float* s = smem.getPointer() ;*/
float* mdata = s ;
float* sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
float const * planeBegin = data + plane * planeArea ;
float const * planeEnd = planeBegin + planeArea ;
float const * block = (float const*) getBlockBeginning(planeBegin) + tid ;
while (block < planeEnd) {
if (block >= planeBegin) {
float x = *block ;
mdata[tid] += x ;
sdata[tid] += x * x ;
}
block += blockSize ;
}
plane += planeStride ;
}
blockReduce2(sdata, mdata, tid, blockSize, planeArea) ;
if (tid == 0) {
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numBlocksPerChannel ;
accumulator[i] = mdata[0];
accumulator[i + gridDim.x] = sdata[0];
}
}
|
2,274 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREAD 128
__global__ void dot(int N,float *x,float*y,float *ans);
int main(void){
/*for CPU*/
int i;
int size = 1024;
int block = (size + THREAD -1);//number of block
float *x,*y,*ans;//(x,y)
float z;
cudaMallocHost((void **)&x,sizeof(float)*size);
cudaMallocHost((void **)&y,sizeof(float)*size);
cudaMallocHost((void **)&ans,sizeof(float)*block);
/*fo GPU*/
float *d_x,*d_y,*d_ans;
cudaMalloc((void **)&d_x,sizeof(float)*size);
cudaMalloc((void **)&d_y,sizeof(float)*size);
cudaMalloc((void **)&d_ans,sizeof(float)*block);
for(i=0;i<size;i++){
x[i]=1.0;
y[i]=1.0;
}
/*Memory copy Host to Device*/
cudaMemcpy(d_x,x,sizeof(float)*size,cudaMemcpyHostToDevice);
cudaMemcpy(d_y,y,sizeof(float)*size,cudaMemcpyHostToDevice);
dot<<<block,THREAD>>>(size,d_x,d_y,d_ans);
/*Memory copy Device to Host*/
cudaMemcpy(ans,d_ans,sizeof(float)*block,cudaMemcpyDeviceToHost);
z = 0.0;
for(i=0;i<block;i++)
z+=ans[i];
//show answer
printf("%f\n",z);
/*CPU Memory free*/
cudaFree(x);
cudaFree(y);
cudaFree(d_ans);
/*GPU Memory free*/
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_ans);
return 0;
}
__global__ void dot(int N,float *x,float *y,float *ans){
int i,j;
__shared__ float tmp[THREAD];
tmp[threadIdx.x]=0;
j = blockDim.x * blockIdx.x + threadIdx.x;
if(j<N){
tmp[threadIdx.x] += x[j] * y[j];
}
else {
tmp[threadIdx.x] =0.0;
}
for(i = THREAD/2;i>31;i=i/2){
if(threadIdx.x<i){
tmp[threadIdx.x] += tmp[threadIdx.x+i];
__syncthreads();
}
}
if(threadIdx.x<16){
tmp[threadIdx.x] += tmp[threadIdx.x + 16];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 8];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 4];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 2];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 1];
__syncthreads();
}
if(threadIdx.x == 0){
ans[blockIdx.x] = tmp[0];
}
}
|
2,275 | #include <stdio.h>
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ?"Yes" : "No"));
return;
}
int main()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
return 0;
}
|
2,276 | // linear algebra calculation on GPU devices
// By Zheshu Wu, Jun 1, 2018
#include<stdio.h>
#define N 33 * 1024
__global__ void add(int *a, int *b, int *c)
{
// int tid = 0; // CPU zero, so we start at zero
// while (tid < N)
// {
// c[tid] = a[tid] + b[tid];
// tid += 1; // we have one CPU, so we increment by one
// }
// int tid = blockIdx.x; //handle the data at this index
// if (tid < N)
// {
// c[tid] = a[tid] + b[tid];
// }
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x; // increment the index
}
}
int main(int argc, char const *argv[])
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
// fill the array 'a' and 'b' on the CPU
for (int i=0; i<N; i++)
{
a[i] = -i;
b[i] = i * i;
}
// add ( a, b, c );
//copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<< 128, 128 >>>(dev_a, dev_b, dev_c);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// display the results
// for (int i=0; i<N; i++)
// {
// printf( "%d + %d = %d\n", a[i], b[i], c[i]);
// }
// verify that the GPU did the work we requested
bool success = true;
for (int i=0; i<N; i++)
{
if ((a[i] + b[i]) != c[i])
{
printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] );
success = false;
}
}
if (success) printf( "We did it!\n");
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
} |
2,277 | // What if we are given a device pointer that is offset from any of the device pointers we provided to the client?
//
// This file is a test-case for this. Then we can look at handling that...
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
// __global__ void getValue(float *data, int idx, float value) {
// if(threadIdx.x == 0) {
// data[idx] += value;
// }
// }
int main(int argc, char *argv[]) {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
float *hostFloats;
cuMemHostAlloc((void **)&hostFloats, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceFloats;
cuMemAlloc(&deviceFloats, N * sizeof(float));
hostFloats[128] = 123.456f;
hostFloats[129] = 444.0f;
hostFloats[130] = 321.0f;
hostFloats[131] = 111.0f;
// now we will copy 16 bytes, starting at location 128...
cuMemcpyHtoDAsync(
(CUdeviceptr)(((float *)deviceFloats) + 64),
hostFloats + 128,
4 * sizeof(float),
stream
);
cuStreamSynchronize(stream);
// now copy back entire buffer
hostFloats[64] = 0.0f;
hostFloats[65] = 0.0f;
hostFloats[66] = 0.0f;
hostFloats[67] = 0.0f;
cuMemcpyDtoHAsync(hostFloats, deviceFloats, N * sizeof(float), stream);
cuStreamSynchronize(stream);
// and check the values...
cout << hostFloats[64] << endl;
cout << hostFloats[65] << endl;
cout << hostFloats[66] << endl;
cout << hostFloats[67] << endl;
assert(hostFloats[64] == 123.456f);
assert(hostFloats[65] == 444.0f);
assert(hostFloats[66] == 321);
assert(hostFloats[67] == 111);
cuMemFreeHost(hostFloats);
cuMemFree(deviceFloats);
cuStreamDestroy(stream);
return 0;
}
|
2,278 | #include <stdio.h>
#include <time.h>
__global__ void matrixMultiply(int *matrix1, int *matrix2, int *matrix3, int m, int p) {
int sum=0;
int i = blockIdx.x*64 + threadIdx.x;
for (int j = 0; j < p; j++)
{
for (int k = 0; k < m; k++)
{
sum = sum + matrix1[m*i+k]*matrix2[p*k+j];
}
matrix3[p*i+j] = sum;
sum = 0;
}
}
int main(int argc, char **argv)
{
clock_t start_cpu,stop_cpu,start_gpu,stop_gpu;
srand(time(NULL));
int n = 0,m,p;
if (argc>1) {n=strtol(argv[1],NULL,10);}
m=n;
p=n;
//if (argc>2) {m=strtol(argv[2],NULL,10);}
//if (argc>3) {p=strtol(argv[3],NULL,10);}
//printf("n: %d, m:%d, p:%d\n",n,m,p);
int *mat1 = (int *)malloc(n*m*sizeof(int));
int *mat2 = (int *)malloc(m*p*sizeof(int));
int *mat3 = (int *)malloc(n*p*sizeof(int));
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
mat1[m*i+j] = (int) rand()%5;
}
}
for (int i = 0; i < m; i++)
{
for (int j = 0; j < p; j++)
{
mat2[p*i+j] = (int) rand()%5;
}
}
int *c_mat1;
cudaMalloc(&c_mat1,n*m*sizeof(int));
int *c_mat2;
cudaMalloc(&c_mat2,m*p*sizeof(int));
int *c_mat3;
cudaMalloc(&c_mat3,n*p*sizeof(int));
cudaMemcpy(c_mat1, mat1, n*m*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(c_mat2, mat2, m*p*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(c_mat3, mat3, n*p*sizeof(int), cudaMemcpyHostToDevice);
start_cpu = clock();
int sum=0;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < p; j++)
{
for (int k = 0; k < m; k++)
{
sum = sum + mat1[m*i+k]*mat2[p*k+j];
}
mat3[p*i+j] = sum;
//printf("%d, ",mat3[p*i+j]);
sum = 0;
}
//printf("\n");
}
stop_cpu = clock();
int time_spent = (int)(1000 * (stop_cpu - start_cpu) / CLOCKS_PER_SEC);
printf("%d ",time_spent);
start_gpu = clock();
int bloki = n / 64;
int watki = 64;
if (n%64 != 0 ) bloki++;
matrixMultiply<<<bloki,watki>>>(c_mat1,c_mat2,c_mat3,m,p);
cudaDeviceSynchronize();
stop_gpu = clock();
int time_spent2 = (int)(1000 * (stop_gpu - start_gpu) / CLOCKS_PER_SEC);
printf("%d\n",time_spent2);
cudaMemcpy(mat3, c_mat3, n*p*sizeof(int), cudaMemcpyDeviceToHost);
/*for (int i = 0; i < n; i++)
{
for (int j = 0; j < p; j++)
{
printf("%d, ",mat3[p*i+j]);
}
printf("\n");
}*/
return 0;
}
|
2,279 | #include <stdio.h>
#include "cuda_runtime.h"
// CUDA Kernel Function
__global__ void add(int *a, int *b, int *c){
int i = threadIdx.x;
c[i] = b[i] + a[i];
}
// main Function
int main(){
// define A, B, and C
// These are three array and we will do A + B = C
int A[5] = {1, 2, 3, 4, 5};
int B[5] = {7, 8, 10, 18, 20};
int C[5];
int C_check[5];
// define arrays for A, B, and C
// and copy these memory from host memory to device memory
int *A_gpu;
int *B_gpu;
int *C_gpu;
int size = 5 * sizeof(int);
// allocate memory for A, B, and C
cudaMalloc((void **)&A_gpu, size);
cudaMalloc((void **)&B_gpu, size);
cudaMalloc((void **)&C_gpu, size);
// copy the memory
cudaMemcpy(A_gpu, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, size, cudaMemcpyHostToDevice);
add<<<1, 5>>>(A_gpu, B_gpu, C_gpu);
// copy memory from device to host
// since the result is stored in C_gpu
// we just need this memory
cudaMemcpy(C, C_gpu, size, cudaMemcpyDeviceToHost);
int i;
for (i = 0; i < 5; i++){
C_check[i] = A[i] + B[i];
}
for (i = 0; i < 5; i++){
printf("The Sum is %d\n", C[i] == C_check[i]);
}
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
return 0;
}
// compile with: nvcc -o add add.cu
/* The three important steps of coding in CUDA C:
First, we need to allocate gpu memory and let it ready to get the value;
Then, we copy the variable from cpu into gpu;
After that, we use the kernel function to do the computing;
Lastly, we copy the value(s) we got from gpu to cpu and use this/these value(s).
Basically, not that difficult... (for now)
*/
|
2,280 | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void add(int *a,const int *b){
int i=blockIdx.x;
a[i]+=b[i];
}
int main(){
const int N=10;
int *a,*b,*temp;
temp=new int[N];
cudaMalloc(&a,N*sizeof(int));
cudaMalloc(&b,N*sizeof(int));
for(int i=0;i<N;i++)
temp[i]=i;
cudaMemcpy(a,temp,N*sizeof(int),cudaMemcpyHostToDevice);
for(int i=0;i<N;i++)
temp[i]=2*i;
cudaMemcpy(b,temp,N*sizeof(int),cudaMemcpyHostToDevice);
add<<<N,1>>>(a,b);
cudaMemcpy(temp,a,N*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
cout<<temp[i]<<endl;
delete[] temp;
cudaFree(a);
cudaFree(b);
}
|
2,281 | #include "includes.h"
__global__ void Overlay_Cuda( int x_position, int y_position, unsigned char* main, int main_linesize, unsigned char* overlay, int overlay_linesize, int overlay_w, int overlay_h, unsigned char* overlay_alpha, int alpha_linesize, int alpha_adj_x, int alpha_adj_y)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= overlay_w + x_position ||
y >= overlay_h + y_position ||
x < x_position ||
y < y_position ) {
return;
}
int overlay_x = x - x_position;
int overlay_y = y - y_position;
float alpha = 1.0;
if (alpha_linesize) {
alpha = overlay_alpha[alpha_adj_x * overlay_x + alpha_adj_y * overlay_y * alpha_linesize] / 255.0f;
}
main[x + y*main_linesize] = alpha * overlay[overlay_x + overlay_y * overlay_linesize] + (1.0f - alpha) * main[x + y*main_linesize];
} |
2,282 | #include "includes.h"
__global__ void BitonicMergeSort(float * d_output, float * d_input, int subarray_size)
{
extern __shared__ float shared_data[];
// internal index for sorting of the subarray
int index = threadIdx.x;
int index_global = index + blockDim.x * blockIdx.x;
double portions = log2(double(subarray_size)) - 1;
//copying of data portion dedicated to this block into shared memory
shared_data[index] = d_input[index_global];
__syncthreads();
for (short portion = 0; portion <= portions; portion++)
{
short offset = 1 << portion;
short threads_in_box = offset << 1;
// calculated at the beginning of each portion
//int boxI = index % (threads_in_box + (blockDim.x * blockIdx.x));
int boxI = threadIdx.x / threads_in_box;
for (short subportion = portion; subportion >= 0; subportion--)
{
offset = 1 << subportion;
threads_in_box = offset << 1;
int arrow_bottom = index % threads_in_box;
if (((boxI + 1) % 2) == 1) {
// top down
if (arrow_bottom < offset) {
float temp = shared_data[index];
if (shared_data[index + offset] < temp) {
shared_data[index] = shared_data[index + offset];
shared_data[index + offset] = temp;
}
}
}
else {
// bottom up
if (arrow_bottom >= offset) {
float temp = shared_data[index];
if (shared_data[index - offset] < temp) {
shared_data[index] = shared_data[index - offset];
shared_data[index - offset] = temp;
}
}
}
__syncthreads();
}
}
d_output[index_global] = shared_data[index];
} |
2,283 | #include "includes.h"
__global__ void cudaAcc_dev_t_funct(float PulseThresh, int PulseMax, int di, float *dev_t_funct_cache, float pulse_display_thresh) {
// do nothing
} |
2,284 | __global__ void getSortedDegree(int numNodes, int *offset, int *workspace1, int *workspace2, int *workspace3)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<numNodes; i++)
{
// initiate all workspace to 0
workspace1[i] = 0;
workspace2[i] = 0;
workspace3[i] = 0;
// compute each neighlist's length
int neighlistLen=offset[i+1]-offset[i];
// group the nodes by their degree
if(neighlistLen >= 512) workspace1[i] = 1;
else if(neighlistLen > 32) workspace2[i] = 1;
else if(neighlistLen >0 && neighlistLen <= 32) workspace3[i] = 1;
}
}
__global__ void filter(int *predicateArray, int* scanArray, int *newPlace, int sizeScan)
{
for(int i=blockDim.x*blockIdx.x+threadIdx.x; i<sizeScan; i++)
{
if(predicateArray[i] ==1)
newPlace[scanArray[i]-1] = i;
}
}
|
2,285 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <ctype.h>
#include <vector>
#include <string>
#include <chrono>
typedef std::vector<double> double_vec;
int main()
{
double_vec stocks;
std::string value;
while (true)
{
std::getline(std::cin, value);
if (!isdigit(value[0]))
{
break;
}
else
{
stocks.push_back(std::stod(value));
}
}
thrust::host_vector<double> host(int(stocks.size()));
host = stocks;
auto start = std::chrono::high_resolution_clock::now();
thrust::device_vector<double> dev(host);
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
printf("Device vector: ");
for (auto i = dev.begin(); i != dev.end(); i++)
{
std::cout << *i << " "; // este acesso é lento! -- GPU
}
printf("\n");
std::cerr << duration.count() << "ms" << std::endl;
}
|
2,286 | // Matrix Multiplication in gpu with 2D grid of blocks with 1D block shape
// Compile with: nvcc -o test matrix_multiplication_2D_2D.cu -std=c++11
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <chrono>
// Multiplies matrices using GPU with 2D grid
__global__ void multiply_matrix_gpu(long *matA, long *matB, long *matC, const int n) {
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
if (ix < n && iy < n) {
for(int k=0; k<n; k++) {
matC[iy*n+ix] += matA[iy*n+k] * matB[k*n+ix];
}
}
}
// Multiplies matrices in host
void multiply_matrix_host(long *matA, long *matB, long *matC, int n) {
for(int i = 0; i<n; i++) {
for(int j=0; j<n; j++) {
for(int k=0; k<n; k++) {
matC[i*n+j] += matA[i*n+k] * matB[j+k*n];
}
}
}
}
// Compares two matrices
void checkResult(long *hostRef, long *gpuRef, const int n) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < n*n; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("host %ld gpu %ld\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match) printf("Matrix match.\n\n");
else printf("Matrix does not not match.\n\n");
}
int main(int argc, char* argv[]) {
// Set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
// Size of matrix
int n = 1000;
int bytes = n * n * sizeof(long*);
// Host matrix memory
long *h_a = (long *)malloc(bytes);
long *h_b = (long *)malloc(bytes);
// Results
long *hostRef = (long *)malloc(bytes);
long *gpuRef = (long *)malloc(bytes);
// Initialize matrix on host
for(int i = 0; i < n*n; i++ ) {
h_a[i] = i+1;
h_b[i] = i+1;
}
// Initialize matrix with 0s
memset(hostRef, 0, bytes);
memset(gpuRef, 0, bytes);
// Multiply matrix on host
auto start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_host(h_a, h_b, hostRef, n);
auto end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time in host
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_host elapsed %f ms\n", duration_ms.count());
// Device matrix global memory
long *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a, bytes);
cudaMalloc((void **)&d_b, bytes);
cudaMalloc((void **)&d_c, bytes);
// Transfer data from host to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
cudaMemset(d_c, 0, bytes); // Initialize matrix with 0s
// Kernel execution configuration
dim3 block(128);
dim3 grid((n + block.x - 1) / block.x, n);
printf("grid.x %d grid.y %d block.x %d \n", grid.x, grid.y, block.x);
// Execute kernel
start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_gpu<<<grid, block>>>(d_a, d_b, d_c, n);
cudaDeviceSynchronize();
end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time
duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_gpu elapsed %f ms\n", duration_ms.count());
// Copy result from device to host
cudaMemcpy(gpuRef, d_c, bytes, cudaMemcpyDeviceToHost);
// Check results
checkResult(hostRef, gpuRef, n);
// Free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
}
|
2,287 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
// utility function provided by https://gist.github.com/jefflarkin/5390993
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s' (err: %d)\n",__FILE__,__LINE__,cudaGetErrorString(e), e); \
exit(0); \
} \
}
class cuStopwatch{
public:
cuStopwatch();
~cuStopwatch();
void start();
float stop();
private:
float elapsedTime;
bool started;
cudaEvent_t startTime;
cudaEvent_t endTime;
};
cuStopwatch::cuStopwatch(){
started = false;
elapsedTime = 0;
cudaError_t res = cudaEventCreate(&startTime); /*cudaCheckError();*/
if (res != 0)
printf("Return code when recording startTime : %d\n", res);
res = cudaEventCreate(&endTime); /*cudaCheckError();*/
if (res != 0)
printf("Return code when recording endTime : %d\n", res);
}
cuStopwatch::~cuStopwatch(){
cudaEventDestroy(startTime);
cudaEventDestroy(endTime);
}
void cuStopwatch::start(){
if (started) {
return;
}
cudaError_t res = cudaEventRecord(startTime); /*cudaCheckError();*/
if (res != 0)
printf("Return code when recording startTime : %d\n", res);
started = true;
}
float cuStopwatch::stop(){
if (! started) {
return 0;
}
cudaError_t res = cudaEventRecord(endTime); /*cudaCheckError();*/
if (res != 0)
printf("Return code when recording endTime : %d\n", res);
cudaEventSynchronize(endTime); /*cudaCheckError();*/
res = cudaEventElapsedTime(&elapsedTime, startTime, endTime); /*cudaCheckError();*/
if (res != 0)
printf("Return code when computing elapsed time : %d\n", res);
started = false;
return elapsedTime;
}
|
2,288 |
////////////// Parallelization of PageRank Algorithm using CUDA and OpenMp /////////////////////////
#include <iostream>
#include <fstream>
#include <string>
#include<stdlib.h>
#include<bits/stdc++.h>
#include <stdio.h>
#include<time.h>
#include <sys/time.h>
using namespace std;
#define TILE_WIDTH 32
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } ////////// Function to check error in device functions///////
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void matrixmult(float *a, float *b, float *c,int N,int d) /////////// Matrix Multiplication a*v1=v2 ////////
{
int row = blockIdx.y * blockDim.y +threadIdx.y;
int col = blockIdx.x * blockDim.x +threadIdx.x;
if(row < N && col < d)
{
float temp=0;
for(int p=0; p<N ; p++)
{
temp=temp + a[row * N + p] * b[p * d + col];
}
c[row*d + col] = temp ;
}
}
float *length(float *a,float *len,int n)
{
for(int j=0;j<n;j++)
{
int sum=0;
for(int i=0;i<n;i++)
{
if(a[i*n+j]>0){sum++;}
}
len[j]=sum;
}
return len;
}
float *stochastic(float *a,float *len,float t,int n) ///////////// Formation of Stochastic Matrix/////////////
{
for(int i=0;i<n;i++)
{
float g=len[i];
if(g>0){
for(int j=0;j<n;j++)
{
a[j*n+i]=(a[j*n+i]*t)/(g);
}
}
else {
for(int j=0;j<n;j++)
{ a[j*n+i]=(1.0*t)/n; }
}
}
return a;
}
float *transmatrix(float *a,float t , int n) ////////////// Formation of Transformation matrix considering damping factor/////////
{
float b[n][n];
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
b[i][j]=((1-t)*1.0)/n;
}
}
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
a[i*n+j]=a[i*n+j]+b[i][j];
}
}
return a;
}
bool error(float *b, float *c,float tol,float sum,int n,int f,int d) ////////// Function to check Convergence /////////
{
for(int i=0;i<n;i++)
{
sum =sum + (c[d*i+f]-b[d*i+f])*(c[d*i+f]-b[d*i+f]);
}
if(sum<tol)
{
return false;
}
else return true;
}
int main(){ ////// Main function starts //////
struct timeval t1, t2;
gettimeofday(&t1, 0);
int n=100; /////// NUMBER OF NODES ///////////
int d=15;
float tol=0.00000000005; //////////// TOLERANCE VALUE ////////
float *len;
float b=0.85; ////////// Damping Factor //////////
float *v1,*v2,*matri;
size_t bytes = n*n*sizeof(float);
matri = (float*)malloc( bytes ); ////// DYNAMIC MEMORY ALLOCATION /////////
v1 = (float*)malloc( n*d*sizeof(float) );
v2 = (float*)malloc( n*d*sizeof(float) );
len = (float*)malloc( n*sizeof(float));
gpuErrchk( cudaMallocManaged(&matri,bytes)); //////// Device Memory Allocation///////
gpuErrchk(cudaMallocManaged(&v1,d*n*sizeof(float)));
gpuErrchk(cudaMallocManaged(&v2,d*n*sizeof(float)));
for(int i=0;i<d*n;i++){
v1[i]=1.0/n;
v2[i]=1.0;}
///////
int i=0;
float *first,*second;
int t=291;
first = (float*)malloc( t*sizeof(float));
second = (float*)malloc( t*sizeof(float));
string line;
ifstream myfile("Barbasi.txt");
while(std::getline(myfile,line))
{
std::stringstream linestream(line);
float val1;
float val2;
while(linestream>>val1>>val2){
first[i]=val1;
second[i]=val2;
}
i++;
}
myfile.close();
int f1,f2;
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
matri[i*n+j]=0;
}
}
for(int i=0;i<t;i++){
f1=first[i];
f2=second[i];
matri[f2*n+f1]=1;
}
length(matri,len,n);
stochastic(matri,len,b,n);
transmatrix(matri,b,n);
int blocks = (n+TILE_WIDTH-1)/TILE_WIDTH; ////////// Number of blocks to be used///////
dim3 dim_block(TILE_WIDTH, TILE_WIDTH); ////////// Number of threads per block ///////
dim3 dim_grid( blocks, blocks);
matrixmult<<<dim_grid, dim_block>>>(matri, v1, v2, n,d); ///////// Cuda function call /////
cudaDeviceSynchronize();
int f=0;
while(error(v1,v2,tol,0,n,f,d)) /////// Iterative Multiplication /////
{
for(int i=0;i<n;i++){v1[i*d+f+1]=v2[i*d+f];}
matrixmult<<<dim_grid, dim_block>>>(matri, v1, v2, n,d);
cudaDeviceSynchronize();
f++;
}
double coutnnn=0;
for(int i=0;i<n;i++){
//cout<<v2[i*d+f]<<endl;
coutnnn=coutnnn+v2[i*d+f];
}
cout<<coutnnn<<endl;
cout<<"Number of iterations to converge = "<<f<<endl;
cout<<endl<<endl;
gettimeofday(&t2, 0);
double timee = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; ////////// Time taken to to calculate final PageRank//////
cout<<timee/1000<<endl;
}
|
2,289 | #ifndef MarshalStructs_H
#define MarshalStructs_H
#include <cuda_runtime.h>
class SoundPacketStruct{
public:
__host__ __device__ SoundPacketStruct(float _amplitude): amplitude(_amplitude), minRange(0.0f), maxRange(1.0f) { }
__host__ __device__ SoundPacketStruct(float _amplitude, float _minRange, float _maxRange) : amplitude(_amplitude), minRange(_minRange), maxRange(_maxRange) { }
__host__ __device__ SoundPacketStruct() : amplitude(0.0f), minRange(0.0f), maxRange(1.0f) { }
float amplitude;
float minRange;
float maxRange;
};
struct SoundGridStruct {
SoundGridStruct(int _x, int _z)
{ x = _x;
z = _z;
epsilon = 0.001f;
absorptionRate = 0.98f;
reflectionRate = 0.01f;
flagWall = false;
updated = false;
for (int i = 0; i < 4; i++)
{
sizeOfIn[i] = 0;
sizeOfOut[i] = 0;
for (int j = 0; j < 100; j++)
{
IN[i][j] = SoundPacketStruct(0.0f);
OUT[i][j] = SoundPacketStruct(0.0f);
}
}
}
__host__ __device__ void addPacketToIn(int direction, SoundPacketStruct packet)
{
int index = sizeOfIn[direction];
IN[direction][index] = packet;
sizeOfIn[direction] = index + 1;
}
__host__ __device__ void addPacketToOut(int direction, SoundPacketStruct packet)
{
int index = sizeOfOut[direction];
OUT[direction][index] = packet;
sizeOfOut[direction] = index + 1;
}
SoundPacketStruct IN[4][100];
SoundPacketStruct OUT[4][100];
int sizeOfIn[4];
int sizeOfOut[4];
float epsilon;
float absorptionRate;
float reflectionRate;
bool flagWall;
bool updated;
int x;
int z;
};
struct SoundGridToReturn {
SoundGridToReturn(float e, float a, float r, bool w, bool u, int _x, int _z) : epsilon(e), absorptionRate(a), reflectionRate(r), flagWall(w), updated(u), x(_x), z(_z)
{
for (int i = 0; i < 400; i++)
{
IN[i] = SoundPacketStruct(0.0f);
OUT[i] = SoundPacketStruct(0.0f);
}
}
SoundPacketStruct IN[400];
SoundPacketStruct OUT[400];
int sizeOfIn[4];
int sizeOfOut[4];
int x;
int z;
float epsilon;
float absorptionRate;
float reflectionRate;
bool flagWall;
bool updated;
};
struct SoundSourceStruct {
SoundSourceStruct(int _x, int _z)
{
x =_x;
z = _z;
limitTickCount = 100000;
int len = 10;
for (int i = 0; i < 150; ++i) {
packetList[i][0] = SoundPacketStruct(i < len ? 10.0f : 0.1f);
sizesOfPacketList[i] = 1;
}
}
SoundPacketStruct packetList[150][10];
int sizesOfPacketList[150];
int limitTickCount;
int x;
int z;
};
struct SoundStructToReturn
{
SoundStructToReturn(int _limitTick, int _x, int _z) : x(_x), z(_z), limitTick(_limitTick)
{
for (int i = 0; i < 150; i ++)
{
for (int j = 0; j < 10; j++)
{
SoundPacketStruct packet = SoundPacketStruct(0.0f);
packetList[(i*10)+j] = packet;
}
sizeOfPacketList[i] = 0;
}
}
SoundPacketStruct packetList[1500];
int sizeOfPacketList[150];
int limitTick;
int x;
int z;
};
#endif |
2,290 | //#include "bits/stdc++.h"
//#include<iostream>
//#include<string>
#include<stdio.h>
#include<stdlib.h>
extern "C"
//typedef struct testStruct{
// int x;
//}testStruct;
__global__ void gpu(float* input, float* output, int* startPoints, int* endPoints, float* distancePoints){
int block = blockIdx.x;
output[block*3] = startPoints[block];
output[block*3+1] = startPoints[block]+1;
output[block*3+2] = distancePoints[block];
for(int i1=startPoints[block];i1<endPoints[block];++i1){
for(int i2=i1+1; i2<endPoints[block];++i2){
float x_distance = powf(((float)input[i1*2] - (float)input[i2*2]),(float)2);
float y_distance = powf(((float)input[(i1*2)+1] - (float)input[(i2*2)+1]),(float)2);
float distance = sqrt(x_distance+y_distance);
if(distance < output[block*3+2]){
output[block*3] = (float)i1;
output[block*3+1] = (float)i2;
output[block*3+2] = distance;
}
}
}
}
|
2,291 | #include<fstream>
#include<iostream>
#include<vector>
#include<ctime>
#include<cmath>
using namespace std;
int N;
vector<float> readVector(ifstream &fin)
{
//fin.open();
int n;
int c;
fin>>n;
vector<float> result;
for (int i=0;i<n;i++){
fin>>c;
result.push_back(c);
}
N=n;
return result;
}
__global__ void add(float*a, float*b, float*c) {
c[blockIdx.x] = sinf(cosf(sinf(a[blockIdx.x]))) + sinf(cosf(sinf(b[blockIdx.x])));
}
void doIt(float* sample,ofstream &fout){
clock_t begin=clock();
float *a,*b,*c; //host variables
float *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(float);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
//a = (int *)malloc(size);
a=sample;
//b = (int *)malloc(size);
b=sample;
c = (float *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch kernel for N blocks
add<<<N,1>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Error: %s\n",cudaGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
//free(a);
//free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
clock_t end=clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout<<". Elapsed time: "<<elapsed_secs<<endl;
fout<<"{"<<N<<", "<<elapsed_secs<<"},";
}
int main(int argc, char ** argv)
{
cout<<"file name: "<<argv[1]<<endl;
cout<<"Sample count: "<<argv[2]<<endl;
string fileName=argv[1];
int sample_count=stoi(argv[2]);
//cout<<"Sample count: "<<sample_count<<endl;
vector<float> sample;
ifstream fin(fileName);
ofstream fout("result.txt");
for (int i=0;i<sample_count;i++){
cout<<"Sample №"<<i;
sample=readVector(fin);
doIt(&sample[0],fout);
}
fout.close();
return 0;
} |
2,292 | #include "reader.cuh"
|
2,293 | #include "includes.h"
__global__ void histogram(const float* d_in, unsigned int* d_out, const float lumMin, const float lumRange, const size_t numBins, const size_t size)
{
int abs_x = threadIdx.x + blockDim.x * blockIdx.x;
if (abs_x > size)
{
return;
}
int bin = (d_in[abs_x] - lumMin) / lumRange * numBins;
//then increment:
atomicAdd(&(d_out[bin]), 1);
} |
2,294 | #include "asset.cuh"
#include <math.h>
#include <numeric>
#include <algorithm>
#include <stdio.h>
namespace fin
{
CUDA_CALLABLE_MEMBER
Asset::Asset(int id)
{
this->id = id;
this->size = 0;
this->closes = 0;
}
CUDA_CALLABLE_MEMBER
Asset::Asset()
{
this->id = -1;
this->size = 0;
this->closes = 0;
}
CUDA_CALLABLE_MEMBER
Asset::Asset(const Asset& asset)
{
this->id = asset.id;
this->size = asset.size;
this->closes = 0;
if (asset.closes != 0)
{
// deep copy asset closes
this->closes = new Close [asset.size];
for (int i = 0; i < asset.size; ++i)
this->closes[i] = asset.closes[i];
}
}
CUDA_CALLABLE_MEMBER
Asset Asset::operator=(const Asset& asset)
{
this->id = asset.id;
this->size = asset.size;
this->closes = 0;
if (asset.closes != 0)
{
// deep copy asset closes
this->closes = new Close [asset.size];
for (int i = 0; i < asset.size; ++i)
this->closes[i] = asset.closes[i];
} else
printf("Hahaaa = \n");
return *this;
}
CUDA_CALLABLE_MEMBER
Asset::~Asset()
{
if (this->closes != 0)
{
delete [] this->closes;
this->closes = 0;
}
}
void Asset::set_closes(std::vector<Close> closes)
{
if (closes.size() == 0)
{
printf("closes is empty\n");
return;
}
// sort closes by date
this->sort_closes(closes);
// set closes size
this->size = closes.size();
// delete old closes if existing
if (this->closes != 0)
{
delete [] this->closes;
this->closes = 0;
}
// allocate closes
this->closes = new Close[this->size];
// set closes
for (int i = 0; i < this->size; ++i)
this->closes[i] = closes[i];
}
CUDA_CALLABLE_MEMBER
void Asset::set_id(int id)
{
if (id > 0)
this->id = id;
}
CUDA_CALLABLE_MEMBER
int Asset::get_id()
{
return this->id;
}
CUDA_CALLABLE_MEMBER
Close* Asset::get_closes(int *n) const
{
*n = this->size;
// allocate memory for closes
Close* closes = new Close[*n];
// set closes
for (int i = 0; i < *n; ++i)
closes[i] = this->closes[i];
return closes;
}
CUDA_CALLABLE_MEMBER
Close* Asset::get_closes(hlp::Date start_date, hlp::Date end_date,
int *n) const
{
int start = 0;
int end = -1;
// get period (start -> end)
for(int i = 0; i < this->size; ++i)
{
if (this->closes[i].date == start_date)
start = i;
if (this->closes[i].date == end_date)
{
end = i;
continue;
}
}
// set size of return array
*n = end - start;
if (*n <= 0)
{
printf("Woow, couldn't find closes with theses dates\n");
return 0;
}
// allocate memory for closes
Close* closes = new Close[*n];
// set closes
for (int i = 0; i < *n; ++i)
closes[i] = this->closes[i + start];
return closes;
}
CUDA_CALLABLE_MEMBER
float Asset::get_return() const
{
float v1 = this->closes[0].value;
float v2 = this->closes[this->size - 1].value;
return (v2 - v1) / v1;
}
CUDA_CALLABLE_MEMBER
float Asset::get_return(hlp::Date start_date, hlp::Date end_date) const
{
float v1, v2;
for (int i = 0; i < this->size; ++i) {
Close close = this->closes[i];
if (close.date == start_date)
v1 = close.value;
if (close.date == end_date)
v2 = close.value;
}
return (v2 - v1) / v1;
}
CUDA_CALLABLE_MEMBER
float* Asset::get_returns(int *n) const
{
*n = this->size - 1;
// allocate memory for returns
float* returns = new float[*n];
// compute all daily returns on that period
for (int i = 0; i < *n; ++i) {
float v1 = this->closes[i].value;
float v2 = this->closes[i + 1].value;
returns[i] = (v2 - v1) / v1;
}
return returns;
}
CUDA_CALLABLE_MEMBER
float* Asset::get_returns(hlp::Date start_date, hlp::Date end_date,
int* n) const
{
// get asset closes on this period (start->end)
Close* closes = this->get_closes(start_date, end_date, n);
// set n to returns size (closes - 1: it's dayly return)
*n -= 1;
// allocate memory for returns
float* returns = new float[*n];
// compute all daily returns on that period
for (int i = 0; i < *n; ++i) {
float v1 = closes[i].value;
float v2 = closes[i + 1].value;
returns[i] = (v2 - v1) / v1;
}
// free memory reserved for closes
delete[] closes;
return returns;
}
CUDA_CALLABLE_MEMBER
float Asset::get_volatility() const
{
int n;
// get daily returns
float* returns = this->get_returns(&n);
// compute average return
float avg = 0;
for (int i = 0; i < n; ++i)
avg += returns[i];
avg /= n;
// compute variance
float var = 0;
for (int i = 0; i < n; ++i)
var += pow(returns[i] - avg, 2);
var /= n;
// free memory of daily returns
delete[] returns;
// return volatility: sqrt(variance)
return sqrtf(var);
}
CUDA_CALLABLE_MEMBER
float Asset::get_volatility(hlp::Date start_date, hlp::Date end_date) const
{
int n;
// get dayly returns
float* returns = this->get_returns(start_date, end_date, &n);
// compute average return
float avg = 0;
for (int i = 0; i < n; ++i)
avg += returns[i];
avg /= n;
// compute variance
float var = 0;
for (int i = 0; i < n; ++i)
var += pow(returns[i] - avg, 2);
var /= n;
// free memory of daily returns
delete[] returns;
// return volatility: sqrt(variance)
return sqrtf(var);
}
CUDA_CALLABLE_MEMBER
float Asset::get_sharp() const
{
float ret = this->get_return();
float vol = this->get_volatility();
return ret / vol;
}
CUDA_CALLABLE_MEMBER
float Asset::get_sharp(hlp::Date start_date, hlp::Date end_date) const
{
float ret = this->get_return(start_date, end_date);
float vol = this->get_volatility(start_date, end_date);
return ret / vol;
}
// sort using a custom function object
bool date_less(Close a, Close b)
{
return a.date < b.date;
}
// sort closes by date
void Asset::sort_closes(std::vector<Close> closes)
{
std::sort(closes.begin(), closes.end(), date_less);
}
}
|
2,295 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//nvcc -o mutual_outlinks mutual_outlinks.cu -arch sm_20
//find mean number of mutual outlinks
//among all pairs of websites
//checking all (i,j) pairs
//thread k will handle all i such that
//i%totth = k, where totth is the number of threads
__global__ void procpairs(int *m, int *tot, int n){
int totth = gridDim.x * blockDim.x;
int me = blockIdx.x*blockDim.x + threadIdx.x;
int i,j,k,sum = 0;
for(i = me; i<n; i+= totth){
for(j = i+i; j<n; j++){
for(k= 0; k<n ; k++)
sum += m[n*i+k]*m[n*j+k];
}
}
atomicAdd(tot, sum);
}
int main(){
int n = 10, nblk = 4;
int *hm, *dm, htot, *dtot;
int msize = n*n*sizeof(int);
hm = (int *) malloc(msize);
int i,j;
for( i = 0; i < n; i++){
hm[n*i + i] = 0;
for(j=0;j<n; j++)
if(j != i) hm[i*n +j] = rand()%2;
}
cudaMalloc((void **) &dm, msize);
cudaMemcpy(dm, hm, msize, cudaMemcpyHostToDevice);
htot = 0;
cudaMalloc((void **) &dtot, sizeof(int));
cudaMemcpy(dtot, &htot, sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(nblk, 1);
dim3 dimBlock(192, 1, 1);
procpairs<<<dimGrid, dimBlock>>>(dm, dtot, n);
cudaThreadSynchronize();
cudaMemcpy(&htot, dtot, sizeof(int), cudaMemcpyDeviceToHost);
return 0;
}
|
2,296 | // mmm_cuda.cu, Crispin Bernier, chb2ab
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
//----------------------------------- Structures and Globals---------------------------------------------
typedef struct {
int dimension1;
int dimension2;
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
float *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------------
void allocateAndInitializeAB();
void computeCpuMMM();
void copyMatricesToGPU();
void copyResultFromGPU();
void compareHostAndGpuOutput();
void die(const char *error);
void check_error(cudaError e);
//----------------------------------- CUDA function definitions -----------------------------------------
void computeGpuMMM();
// For 10,000x10,000 matrices use a 313x313 block grid to completely cover the matrix
const int blocks2d_sz = 313;
const int threads2d_sz = 32;
//-------------------------------------------------------------------------------------------------------
int main(int argc, char **argv) {
// Read in the matrix sizes
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2);
printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2);
printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2);
// Initialize A and B to random floats.
// Initialization is not part of total runtime.
allocateAndInitializeAB();
clock_t start;
clock_t end;
double elapsed;
// // matrix matrix multiplication in the CPU for comparison purposes
// start = clock();
// computeCpuMMM();
// end = clock();
// elapsed = (end - start) / (double) CLOCKS_PER_SEC;
// printf("Computation time in the CPU: %f seconds\n", elapsed);
// GPU matrix multiplication
start = clock();
computeGpuMMM();
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the GPU: %f seconds\n", elapsed);
// // compare if GPU implementation is correct
// compareHostAndGpuOutput();
return 0;
}
// Matrix multiply kernel
__global__ void mm_multiply_kernel(float *A, float *B, float *C, int N) {
// calculate index into the result matrix
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
// shared matrices to hold a tile from the global matrices
__shared__ float a_shared[threads2d_sz][threads2d_sz];
__shared__ float b_shared[threads2d_sz][threads2d_sz];
// final result of vector product
float result = 0;
int tile_iter, k, A_row_glob, A_col_glob, B_row_glob, B_col_glob;
// iterate over the tiles in the global matrices
int tile_length = N/blockDim.x;
for (tile_iter = 0; tile_iter < tile_length; tile_iter++) {
// calculate the indexes into A and B to load into shared memory
A_row_glob = blockIdx.y*blockDim.y + threadIdx.y;
A_col_glob = tile_iter*blockDim.x + threadIdx.x;
B_row_glob = tile_iter*blockDim.y + threadIdx.y;
B_col_glob = blockIdx.x*blockDim.x + threadIdx.x;
// each thread loads 1 entry from A and B into shared memory
a_shared[threadIdx.y][threadIdx.x] = A[A_row_glob*N + A_col_glob];
b_shared[threadIdx.y][threadIdx.x] = B[B_row_glob*N + B_col_glob];
// wait for all threads in the block to load
__syncthreads();
// Unrolled loop to calculate vector product
result += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x];
result += a_shared[threadIdx.y][k+1] * b_shared[k+1][threadIdx.x];
result += a_shared[threadIdx.y][k+2] * b_shared[k+2][threadIdx.x];
result += a_shared[threadIdx.y][k+3] * b_shared[k+3][threadIdx.x];
result += a_shared[threadIdx.y][k+4] * b_shared[k+4][threadIdx.x];
result += a_shared[threadIdx.y][k+5] * b_shared[k+5][threadIdx.x];
result += a_shared[threadIdx.y][k+6] * b_shared[k+6][threadIdx.x];
result += a_shared[threadIdx.y][k+7] * b_shared[k+7][threadIdx.x];
result += a_shared[threadIdx.y][k+8] * b_shared[k+8][threadIdx.x];
result += a_shared[threadIdx.y][k+9] * b_shared[k+9][threadIdx.x];
result += a_shared[threadIdx.y][k+10] * b_shared[k+10][threadIdx.x];
result += a_shared[threadIdx.y][k+11] * b_shared[k+11][threadIdx.x];
result += a_shared[threadIdx.y][k+12] * b_shared[k+12][threadIdx.x];
result += a_shared[threadIdx.y][k+13] * b_shared[k+13][threadIdx.x];
result += a_shared[threadIdx.y][k+14] * b_shared[k+14][threadIdx.x];
result += a_shared[threadIdx.y][k+15] * b_shared[k+15][threadIdx.x];
result += a_shared[threadIdx.y][k+16] * b_shared[k+16][threadIdx.x];
result += a_shared[threadIdx.y][k+17] * b_shared[k+17][threadIdx.x];
result += a_shared[threadIdx.y][k+18] * b_shared[k+18][threadIdx.x];
result += a_shared[threadIdx.y][k+19] * b_shared[k+19][threadIdx.x];
result += a_shared[threadIdx.y][k+20] * b_shared[k+20][threadIdx.x];
result += a_shared[threadIdx.y][k+21] * b_shared[k+21][threadIdx.x];
result += a_shared[threadIdx.y][k+22] * b_shared[k+22][threadIdx.x];
result += a_shared[threadIdx.y][k+23] * b_shared[k+23][threadIdx.x];
result += a_shared[threadIdx.y][k+24] * b_shared[k+24][threadIdx.x];
result += a_shared[threadIdx.y][k+25] * b_shared[k+25][threadIdx.x];
result += a_shared[threadIdx.y][k+26] * b_shared[k+26][threadIdx.x];
result += a_shared[threadIdx.y][k+27] * b_shared[k+27][threadIdx.x];
result += a_shared[threadIdx.y][k+28] * b_shared[k+28][threadIdx.x];
result += a_shared[threadIdx.y][k+29] * b_shared[k+29][threadIdx.x];
result += a_shared[threadIdx.y][k+30] * b_shared[k+30][threadIdx.x];
result += a_shared[threadIdx.y][k+31] * b_shared[k+31][threadIdx.x];
__syncthreads();
// wait for all threads in the block before loading
}
// handle leftover rows and columns for matrix sizes that aren't evenly divisible
int leftover = N-tile_length*blockDim.x;
if (leftover > 0) {
// calculate the indexes into A and B to load into shared memory
A_row_glob = blockIdx.y*blockDim.y + threadIdx.y;
A_col_glob = tile_iter*blockDim.x + threadIdx.x;
B_row_glob = tile_iter*blockDim.y + threadIdx.y;
B_col_glob = blockIdx.x*blockDim.x + threadIdx.x;
// if the indexes are within bounds, load them
// only loads the leftover entries into the tile
if (A_col_glob < N) {
a_shared[threadIdx.y][threadIdx.x] = A[A_row_glob*N + A_col_glob];
}
if (B_row_glob < N) {
b_shared[threadIdx.y][threadIdx.x] = B[B_row_glob*N + B_col_glob];
}
__syncthreads();
// edge case calculations are not unrolled because they don't have a big impact on performance
for (k = 0; k < leftover; k++) {
result += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x];
}
__syncthreads();
}
// put result into global memory if it is a valid index
if (r < N && c < N) {
C[r*N + c] = result;
}
}
void computeGpuMMM() {
// Allocate GPU memory for the inputs and the result
clock_t start = clock();
copyMatricesToGPU();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("\nGPU: \tTransfer to GPU: %f seconds\n", elapsed);
// block size and grid size
dim3 blocks2d(blocks2d_sz,blocks2d_sz);
dim3 threads2d(threads2d_sz,threads2d_sz);
// Execute the kernel to compute the vector sum on the GPU
start = clock();
mm_multiply_kernel <<<blocks2d, threads2d>>> (A_GPU, B_GPU, C_GPU, C_MD.dimension1);
// make the CPU main thread waite for the GPU kernel call to complete
cudaThreadSynchronize(); // This is only needed for timing and error-checking purposes
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("\tKernel execution: %f seconds\n", elapsed);
// Check for kernel errors
check_error(cudaGetLastError());
// Transfer the result from the GPU to the CPU
start = clock();
copyResultFromGPU();
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("\tTransfer from GPU: %f seconds\n", elapsed);
// Free the GPU memory
check_error(cudaFree(A_GPU));
check_error(cudaFree(B_GPU));
check_error(cudaFree(C_GPU));
}
// allocate and initialize A and B using a random number generator
void allocateAndInitializeAB() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
A = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++) {
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
A[index] = (rand() % 1000) * 0.001;
}
}
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.dimension1; i++) {
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
B[index] = (rand() % 1000) * 0.001;
}
}
}
// allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
check_error(cudaMemcpy(A_GPU, A, sizeofA, cudaMemcpyHostToDevice));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
check_error(cudaMemcpy(B_GPU, B, sizeofB, cudaMemcpyHostToDevice));
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
}
// copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison
void copyResultFromGPU() {
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_CPU = (float*) malloc(sizeofC);
check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
// function to determine if the GPU computation is done correctly by comparing the output from the GPU with that
// from the CPU
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C[i] - C_CPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
|
2,297 | #include <stdio.h>
#include <fstream>
#include <time.h>
// Generic utils
typedef float3 pixel;
void check_result(cudaError_t value) {
cudaError_t status = value;
if (status != cudaSuccess) {
printf("Error %s at line %d in file %s\n",
cudaGetErrorString(status), __LINE__, __FILE__);
// exit(1);
}
}
__device__ float3 operator+(const float3 &a, const float3 &b) {
return make_float3(a.x + b.x,a.y + b.y,a.z + b.z);
}
__device__ float3 operator*(const float3 &a, const float &b) {
return make_float3(a.x * b, a.y * b, a.z * b);
}
__device__ float length(const float3 &vec) {
return sqrt(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z);
}
__device__ float3 normalize(const float3 vec) {
float inverted_len = 1.0f / length(vec);
return vec * inverted_len;
}
// Raymarcher
typedef struct {
float3 o;
float3 d;
} ray;
__device__ ray get_ray(const float& u, const float& v) {
ray r;
r.o = make_float3(-5.0, 0.0, 0.0);
r.d = normalize(make_float3(1.0, u, v));
return r;
}
__device__ float mandelbulb_de(float3 pos) {
// pos = fmod(fabs(pos), 4.0) - 2.0;
float3 z = pos;
float dr = 1.0;
float r = 0.0;
int Iterations = 4;
float Bailout = 4.0;
float Power = 16.0;
for(int i = 0; i < Iterations; i++) {
r = length(z);
if (r > Bailout) break;
// convert to polar coordinates
float theta = acos(z.z / r);
float phi = atan2(z.y, z.x);
dr = powf(r, Power - 1.0) * Power * dr + 1.0;
// scale and rotate the point
float zr = pow(r, Power);
theta = theta * Power;
phi = phi * Power;
// convert back to cartesian coordinates
z = make_float3(sin(theta) * cos(phi),
sin(phi) * sin(theta), cos(theta)) * zr;
z = z + pos;
//z += pos * cos(time * 2.0);
}
return 0.5 * log(r) * r / dr;
}
__device__ float march(ray r) {
float total_dist = 0.0;
int max_ray_steps = 64;
float min_distance = 0.002;
int steps;
for (steps = 0; steps < max_ray_steps; ++steps) {
float3 p = r.o + r.d * total_dist;
float distance = mandelbulb_de(p);
total_dist += distance;
if (distance < min_distance) break;
}
return 1.0 - (float) steps / (float) max_ray_steps;
}
// Main kernel
__global__ void d_main(
pixel* screen_buffer,
const size_t width,
const size_t height
) {
size_t x = (blockIdx.x * blockDim.x) + threadIdx.x;
size_t y = (blockIdx.y * blockDim.y) + threadIdx.y;
if(x < width && y < height) {
float min_w_h = (float) min(width, height);
float ar = (float) width / (float) height;
float u = (float) x / min_w_h - ar * 0.5f;
float v = (float) y / min_w_h - 0.5f;
ray r = get_ray(u, v);
float c = march(r) * 255.0f;
float3 color = make_float3(c, c, c);
screen_buffer[y * width + x] = color;
}
}
void write_image(
char* file_name,
pixel* screen_buff,
size_t width,
size_t height
) {
FILE* image = fopen(file_name, "w");
fprintf(image, "P3\n");
fprintf(image, "%i %i\n", width, height);
fprintf(image, "%i\n", 255);
for (size_t y = 0; y < height; y++) {
for (size_t x = 0; x < width; x++) {
float3 pixel = screen_buff[y * width + x];
fprintf(image, "%i %i %i\n", (int) pixel.x, (int) pixel.y, (int) pixel.z);
}
}
fclose(image);
}
int main(int argc, char** argv) {
// printf("Mandelbulb\n");
if(argc < 7) {
printf("Not enought params.\n");
return 1;
}
char* file_name = argv[1];
size_t width = atoi(argv[2]);
size_t height = atoi(argv[3]);
size_t num_pixels = width * height;
size_t group_width = atoi(argv[4]);
size_t group_height = atoi(argv[5]);
bool test = false;
if (*argv[6] == 't') {
test = true;
}
// Setup buffers
pixel* h_screen_buff;
pixel* d_screen_buff;
check_result(cudaMallocHost(&h_screen_buff, num_pixels * sizeof(pixel)));
check_result(cudaMalloc(&d_screen_buff, num_pixels * sizeof(pixel)));
dim3 block_dim(width / group_width, height / group_height);
dim3 group_dim(group_width, group_height);
// Execute on devicie
clock_t t_start = clock();
if(!test)
printf("Starting kernel execution...\n");
d_main<<<block_dim, group_dim>>>(d_screen_buff, width, height);
if(!test)
printf("Kernel execution ended.\n");
if(!test)
printf("Reading screan buffer from device...\n");
check_result(cudaMemcpy(h_screen_buff, d_screen_buff, num_pixels * sizeof(pixel), cudaMemcpyDeviceToHost));
if(!test)
printf("Done.\n");
printf("Time taken (ms): %i\n", (int) ((double) (clock() - t_start) / CLOCKS_PER_SEC * 1000.0f));
if(!test){
printf("Writing to file...\n");
write_image(file_name, h_screen_buff, width, height);
printf("Done\n");
}
//for(size_t y = 0;y < height;y++) {
// for(size_t x = 0;x < width;x++) {
// printf("%i ", (int) h_screen_buff[y * width + x].x);
// }
// printf("\n");
//}
cudaFreeHost(h_screen_buff);
cudaFree(d_screen_buff);
return 0;
}
|
2,298 | #include <iostream>
#include <cstdlib>
#include <ctime>
#include "cuda_runtime.h"
#define VEC_SIZE 20000
#define START 1
#define STOP 100
using namespace std;
__global__ void vect_mul(int *arr_a, int *arr_b, int *arr_c)
{
arr_c[threadIdx.x] = arr_a[threadIdx.x] * arr_b[threadIdx.x];
}
int main()
{
int *arr_a, *arr_b, *arr_c, total_sum = 0, dev_count;
int *d_arr_a, *d_arr_b, *d_arr_c;
int size = sizeof(int) * VEC_SIZE;
float working_time = 0;
cudaEvent_t e_start, e_stop;
cudaError_t cuda_status;
cuda_status = cudaEventCreate(&e_start);
if(cuda_status != cudaSuccess)
{
cout << "Can not create cuda event!" << endl;
}
cuda_status = cudaEventCreate(&e_stop);
if(cuda_status != cudaSuccess)
{
cout << "Can not create cuda event!" << endl;
}
arr_a = new int[VEC_SIZE];
arr_b = new int[VEC_SIZE];
arr_c = new int[VEC_SIZE];
cuda_status = cudaMalloc((void**)&d_arr_a, size);
if(cuda_status != cudaSuccess)
{
cout << "Cuda malloc error!" << endl;
goto cuda_error;
}
cuda_status = cudaMalloc((void**)&d_arr_b, size);
if(cuda_status != cudaSuccess)
{
cout << "Cuda malloc error!" << endl;
goto cuda_error;
}
cuda_status = cudaMalloc((void**)&d_arr_c, size);
if(cuda_status != cudaSuccess)
{
cout << "Cuda malloc error!" << endl;
goto cuda_error;
}
srand(time(NULL));
for (int i = 0; i < VEC_SIZE; i++)
{
arr_a[i] = START + rand() % STOP;
arr_b[i] = START + rand() % STOP;
}
cuda_status = cudaMemcpy(d_arr_a, arr_a, VEC_SIZE, cudaMemcpyHostToDevice);
if(cuda_status != cudaSuccess)
{
cout << "Cuda memcpy error!" << endl;
goto cuda_error;
}
cuda_status = cudaMemcpy(d_arr_b, arr_b, VEC_SIZE, cudaMemcpyHostToDevice);
if(cuda_status != cudaSuccess)
{
cout << "Cuda memcpy error!" << endl;
goto cuda_error;
}
cuda_status = cudaGetDeviceCount(&dev_count);
if(cuda_status != cudaSuccess)
{
cout << "Cuda get device count error!" << endl;
goto cuda_error;
}
cuda_status = cudaEventRecord(e_start);
if(cuda_status != cudaSuccess)
{
cout << "Cuda event error while recording!" << endl;
goto cuda_error;
}
vect_mul<<<VEC_SIZE, 1>>>(d_arr_a, d_arr_b, d_arr_c);
cudaDeviceSynchronize();
cuda_status = cudaGetLastError();
if(cuda_status != cudaSuccess)
{
cout << "Kernel error!" << endl;
goto cuda_error;
}
cuda_status = cudaEventRecord(e_stop);
if(cuda_status != cudaSuccess)
{
cout << "Cuda event error while recording!" << endl;
goto cuda_error;
}
cuda_status = cudaMemcpy(arr_c, d_arr_c, VEC_SIZE, cudaMemcpyDeviceToHost);
if(cuda_status != cudaSuccess)
{
cout << "Cuda memcpy error!" << endl;
goto cuda_error;
}
for(int i = 0; i < VEC_SIZE; i++)
total_sum += arr_c[i];
cuda_status = cudaEventSynchronize(e_stop);
if(cuda_status != cudaSuccess)
{
cout << "Cuda event error while synchronizing!" << endl;
goto cuda_error;
}
cuda_status = cudaEventElapsedTime(&working_time, e_start, e_stop);
if(cuda_status != cudaSuccess)
{
cout << "Cuda event error while elapsing!" << endl;
goto cuda_error;
}
cout << "CUDA devices: " << dev_count << endl;
cout << "Result of vectors multiplication is " << total_sum << endl;
cout << "Working time: " << working_time << " ms"<< endl;
cuda_error:
delete[] arr_a;
delete[] arr_b;
delete[] arr_c;
cudaFree(d_arr_a);
cudaFree(d_arr_b);
cudaFree(d_arr_c);
cudaDeviceReset();
return 0;
}
|
2,299 | #include <stdlib.h>
#include <iostream>
using namespace std;
__global__ void Plus(float A[], float B[], float C[], int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main()
{
float*A, *Ad, *B, *Bd, *C, *Cd;
int n = 1024 * 1024;
int size = n * sizeof(float);
// CPU端分配内存
A = (float*)malloc(size);
B = (float*)malloc(size);
C = (float*)malloc(size);
// 初始化数组
for(int i=0;i<n;i++)
{
A[i] = 90.0;
B[i] = 10.0;
}
// GPU端分配内存
cudaMalloc((void**)&Ad, size);
cudaMalloc((void**)&Bd, size);
cudaMalloc((void**)&Cd, size);
// CPU的数据拷贝到GPU端
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
// 定义kernel执行配置,(1024*1024/512)个block,每个block里面有512个线程
dim3 dimBlock(512);
dim3 dimGrid(n/512);
// 执行kernel
Plus<<<dimGrid, dimBlock>>>(Ad, Bd, Cd, n);
// 将在GPU端计算好的结果拷贝回CPU端
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
// 校验误差
float max_error = 0.0;
for(int i=0;i<n;i++)
{
max_error += fabs(100.0 - C[i]);
}
cout << "max error is " << max_error << endl;
// 释放CPU端、GPU端的内存
free(A);
free(B);
free(C);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
return 0;
} |
2,300 | #include <iostream>
#include <cuda_runtime_api.h>
using namespace std;
__global__ void kernel(int* a, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < n) {
a[tid] = a[tid] + tid;
printf("a[i] = %d\n", a[tid]);
}
}
int main()
{
const int n = 100;
int a[n];
int *dev_a;
// started values
for (int i = 0; i < n; i++)
{
a[i] = i;
}
// memory on GPU
cudaMalloc((void**)&dev_a, n * sizeof(int));
// copy to GPU
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
kernel<<<10, 10>>>(dev_a, n);
cudaMemcpy(a, dev_a, n * sizeof(int), cudaMemcpyDeviceToHost);
// output
for( int i = 0; i < n; i++) {
cout << "a[i] = " << a[i] << endl;
}
// release memory
cudaFree(dev_a);
system("Pause");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.