serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,501 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
// INSERT KERNEL(S) HERE
__global__ void myHisto(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
__shared__ unsigned int binL[4096]; // support num bins no more than 4096
int step = 0;
while (step < num_bins) {
if (step + threadIdx.x < num_bins) binL[step + threadIdx.x] = 0;
step += blockDim.x;
}
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < num_elements) {
atomicAdd(&binL[input[i]], 1);
i += stride;
}
__syncthreads();
step = 0;
while (step < num_bins) {
if (step + threadIdx.x < num_bins) atomicAdd(&bins[step + threadIdx.x], binL[step + threadIdx.x]);
step += blockDim.x;
}
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// Criteria for choosing gird size and block size
// 1. shared memory size. 4k unsigned int use 16kB memory
// 2. efficiency. If each block creates its own local bin, there will be #blocks * size_of_bins writes to global bin
// Thus, the numbers a block deals must be levels larger than the size of bins.
// INSERT CODE HERE
int grids = sqrt(num_elements) / 64 + 1;
dim3 dimGrid(grids, 1, 1);
dim3 dimBlock(512, 1, 1);
myHisto <<<dimGrid, dimBlock>>> (input, bins, num_elements, num_bins);
}
|
12,502 | #include <stdio.h>
#include <iostream>
#include <fstream>
using namespace std;
__global__ void decode(char *msg);
int main(int argc, char *argv[]){
if (argc > 2){
cout << "You have entered too many arguments, the program will now exit.\n";
exit(0);
}else if(argc == 1){
cout << "You have entered too few arguments, the program will now exit.\n";
exit(0);
}
char* filename = argv[1];
cout << "File name: " << filename << '\n';
char *r;
char *dev_r;
r = (char*)malloc(sizeof(char) * (256));
cudaMalloc((void**)&dev_r, sizeof(char) * (256));
cout << r << '\n';
FILE *file;
file = fopen(filename, "r");
if (file){
cout << "File opened" << '\n';
fscanf(file, "%s,", r);
cout << "File scanned\n";
}else{
cout << "That file does not exist, the program will now exit.\n";
exit(0);
}
cout << "Original Text:\n" << r << "\n\n";
cudaMemcpy(dev_r, r, sizeof(char) * (256), cudaMemcpyHostToDevice);
decode<<<1, (sizeof(char) * (256))>>>(dev_r);
cudaDeviceSynchronize();
cudaMemcpy(r, dev_r, sizeof(char) * (256), cudaMemcpyDeviceToHost);
cout << "Decoded Text:\n" << r << '\n';
cudaFree(dev_r);
free(r);
exit(0);
}
__global__ void decode(char *m){
int i = threadIdx.x;
int temp = -1;
if (int(m[i]) != 0){
temp = int(m[i]);
temp -= 1;
m[i] = char(temp);
}
}
|
12,503 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
void init_in(int *h_in,const int size){
srand((unsigned int)time(NULL));
for(int i=0;i<size;i++)
h_in[i] = rand()%size;
}
void init_out(int *h_out,const int size){
for(int i=0;i<size;i++)
h_out[i] = 0;
}
void hist_normal(int *h_in,int *h_out,const int size,const int bin_size){
for(int i=0;i<size;i++){
int no = h_in[i]/bin_size;
h_out[no]++;
}
}
__global__ void hist_atomic(int *d_in,int *d_out,const int bin_size){
int idx = threadIdx.x+blockIdx.x*blockDim.x;
int no = d_in[idx]/bin_size;
atomicAdd(&d_out[no],1);
}
__global__ void hist_local(int *d_in,int *d_out,const int bin_size,const int thread_size){
int idx = threadIdx.x;
int l_out[32] = {0};
for(int i=idx*thread_size;i<idx*thread_size+thread_size;i++){
int no = d_in[i]/bin_size;
l_out[no]++;
}
for(int i=0;i<32;i++)
atomicAdd(&d_out[i],l_out[i]);
}
void show_hist(int *h_out,const int bin_num){
for(int i=0;i<bin_num;i++)
printf("%d : %d\n",i+1,h_out[i]);
printf("\n");
}
int main(){
int size = 262144;
int bin_num = 32;
int bin_size = size/bin_num;
int *h_in,*h_out;
int *d_in,*d_out;
h_in = (int *)malloc(size*sizeof(int));
h_out = (int *)malloc(bin_num*sizeof(int));
init_in(h_in,size);
init_out(h_out,bin_num);
time_t t_start = clock();
hist_normal(h_in,h_out,size,bin_size);
time_t t_end = clock();
printf("hist_normal_time: %fms\n",difftime(t_end,t_start));
printf("hist_normal_result: \n");
show_hist(h_out,bin_num);
cudaMalloc((int **)&d_in,size*sizeof(int));
cudaMalloc((int **)&d_out,bin_num*sizeof(int));
cudaMemcpy(d_in,h_in,size*sizeof(int),cudaMemcpyHostToDevice);
dim3 block1(256);
dim3 thread1(1024);
t_start = clock();
hist_atomic<<<block1,thread1>>>(d_in,d_out,bin_size);
t_end = clock();
cudaMemcpy(h_out,d_out,bin_num*sizeof(int),cudaMemcpyDeviceToHost);
printf("hist_atomic_time: %fms\n",difftime(t_end,t_start));
printf("hist_atomic_result: \n");
show_hist(h_out,bin_num);
int n = 64;
dim3 thread2(n);
init_out(h_out,bin_num);
cudaMemcpy(d_out,h_out,bin_num*sizeof(int),cudaMemcpyHostToDevice);
t_start = clock();
hist_local<<<1,thread2>>>(d_in,d_out,bin_size,size/n);
t_end =clock();
cudaMemcpy(h_out,d_out,bin_num*sizeof(int),cudaMemcpyDeviceToHost);
printf("hist_local_time: %fms\n",difftime(t_end,t_start));
printf("hist_local_result: \n");
show_hist(h_out,bin_num);
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
cudaDeviceReset();
return 0;
}
|
12,504 | __global__ void
conv(float *t, float *tk, float *out,
int t_rows, int t_columns, int n_channels,
int k_rows, int k_columns, int n_kernels)
{
const int i_out = blockDim.y * blockIdx.y + threadIdx.y,
j_out = blockDim.x * blockIdx.x + threadIdx.x;
int i0 = i_out - k_rows/2,
j0 = j_out - k_columns/2;
if (i_out < t_rows && j_out < t_columns)
for (int k = 0; k < n_kernels; k++)
{
float convolution = 0;
for (int m = 0; m < k_rows; m++)
for (int n = 0; n < k_columns; n++)
for (int c = 0; c < n_channels; c++)
if (-1 < i0 + m && i0 + m < t_rows &&
-1 < j0 + n && j0 + n < t_columns)
convolution += t[((i0 + m)*t_columns + (j0 + n))*n_channels + c]
* tk[(m*k_columns + n)*n_kernels + k];
out[(i_out*t_columns + j_out)*n_kernels + k] = convolution;
}
}
|
12,505 | #include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <ctime>
#define CheckErrorUtil(err) CheckError(err, __FUNCTION__, __LINE__)
#define CheckErrorMsgUtil(err, msg) CheckErrorMsg(err, msg, __FUNCTION__, __LINE__)
inline void CheckError(cudaError_t const err, char const* const fun, const int line)
{
if (err)
{
printf("CUDA Error Code[%d]: %s\n%s() Line:%d\n", err, cudaGetErrorString(err), fun, line);
exit(1);
}
}
inline void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line)
{
if (err)
{
printf("CUDA Error Code[%d]: %s\n%s() Line:%d\n%s\n", err, cudaGetErrorString(err), fun, line, msg);
exit(1);
}
}
void GenerateTestArrays(int const N, float* const a, float* const b, float* const c, float* const ref);
void CompareArrays(int const N, float const* const a, float const* const b);
__global__ void ArraysSum(float* const a, float* const b, float* const c, int const N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
c[i] = a[i] + b[i];
}
void GenerateTestArrays(int const N, float* const a, float* const b, float* const c, float* const ref)
{
int i;
srand((unsigned)time(NULL));
for(i = 0; i < N; i++)
{
a[i] = (float)rand();
b[i] = (float)rand();
c[i] = 0.0f;
ref[i] = a[i] + b[i];
}
}
void CompareArrays(int const N, float const* const a, float const* const b)
{
int i;
int different = 0;
for(i = 0; i < N; i++)
{
different = (a[i] != b[i]);
if(different)
break;
}
if(different)
{
printf("\n\n*** Oh No! The GPU failed to sum the arrays. ***\n\n\n");
}
else
{
printf("\n\n*** Awesome! The GPU summed the arrays!! ***\n\n\n");
}
}
int main()
{
std::clock_t start;
start = std::clock();
dim3 gridSize;
dim3 blockSize;
int const N = 8192000;
size_t const N_BYTES = N * sizeof(float);
int const BLOCK_SIZE = 512;
float *aH, *bH, *cH, *refH;
float *aD, *bD, *cD;
aH = (float*)malloc(N_BYTES);
bH = (float*)malloc(N_BYTES);
cH = (float*)malloc(N_BYTES);
refH = (float*)malloc(N_BYTES);
printf("\n\nGenerating 2 random float arrays on Host - each of size %lu bytes...\n", N_BYTES);
GenerateTestArrays(N, aH, bH, cH, refH);
printf("Allocating %lu bytes on Device GPU to store the 2 generated arrays...\n", 2 * N_BYTES);
CheckErrorUtil(cudaMalloc((void**)&aD, N_BYTES));
CheckErrorUtil(cudaMalloc((void**)&bD, N_BYTES));
printf("Allocating %lu bytes on Device GPU to store the result array after summing the 2 arrays...\n", N_BYTES);
CheckErrorUtil(cudaMalloc((void**)&cD, N_BYTES));
printf("Copying 2 arrays from Host to Device GPU...\n");
CheckErrorUtil(cudaMemcpy(aD, aH, N_BYTES, cudaMemcpyHostToDevice));
CheckErrorUtil(cudaMemcpy(bD, bH, N_BYTES, cudaMemcpyHostToDevice));
blockSize.x = BLOCK_SIZE; blockSize.y = 1; blockSize.z = 1;
gridSize.x = ((N + BLOCK_SIZE - 1) / BLOCK_SIZE); gridSize.y = 1; gridSize.z = 1;
printf("Summing the 2 arrays and storing the result array on Device GPU...\n");
ArraysSum<<<gridSize, blockSize>>>(aD, bD, cD, N);
printf("Synchronizing the Device GPU memory before copying the result array back to Host...\n");
CheckErrorUtil(cudaDeviceSynchronize());
CheckErrorUtil(cudaGetLastError());
printf("Copying result array from Device GPU to Host...\n");
CheckErrorUtil(cudaMemcpy(cH, cD, N_BYTES, cudaMemcpyDeviceToHost));
printf("Comparing expected result array stored on Host with actual result calculated on Device GPU...\n");
CompareArrays(N, cH, refH);
printf("Freeing %lu bytes on Device GPU...\n", 3 * N_BYTES);
CheckErrorUtil(cudaFree(aD));
CheckErrorUtil(cudaFree(bD));
CheckErrorUtil(cudaFree(cD));
printf("Freeing memory on Host...\n");
free(aH);
free(bH);
free(cH);
free(refH);
printf("Resetting Device GPU as though nothing ever happened!\n\n");
CheckErrorUtil(cudaDeviceReset());
printf("Executed in %.f milliseconds.\n\n", (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000));
return 0;
}
|
12,506 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include<cuda_profiler_api.h>
#define W 1200 /*****canvas Width*****/
#define H 1200 /*****canvas Height*****/
int C[100000], address[100000][2]; //buffers to store data for CUDA application
int count = 0;
struct vector {
int x,y;
};
struct ray {
struct vector origin;
struct vector dest;
};
///////////////////////////////////// FUNCTIONS CODE ////////////////////////////////////////
void canvas_init (int canvas[H][W]){
int x,y;
//for each pixel
//initializing the canvas to be black
for( y =0; y < H; y++){
for(x = 0; x < W; x++){
canvas[y][x] = 0;
}
}
}
void circle(int x, int y ,int canvas[H][W]){
// creates a circle on the blank canvas with a radius of 9 pixels
int garbage = 0;
if ( ((x - 9)<=0) || ((x + 9)>= W) )
garbage =+ 1;//printf("Circle will not fit the width for given (%d,%d) coordinates",x,y);
else
{
if( ((y - 9)<=0) || ((y + 9)>= H) )
garbage = 0;//printf("Circle will not fit the Height for given (%d,%d) coordinates",x,y);
else
{
//half circle to the right 0 to 90
canvas[y][x+9] = 150;
canvas[y-1][x+9] = 150;
canvas[y-2][x+9] = 150;
canvas[y-3][x+9-1] = 150;
canvas[y-4][x+9-1] = 150;
canvas[y-5][x+9-2] = 150;
canvas[y-6][x+9-2] = 150;
canvas[y-6][x+9-3] = 150;
canvas[y-7][x+9-3] = 150;
canvas[y-7][x+9-4] = 150;
canvas[y-8][x+9-5] = 150;
canvas[y-8][x+9-6] = 150;
canvas[y-9][x+9-7] = 150;
canvas[y-9][x+9-8] = 150;
canvas[y-9][x] = 150;
//////////////////////////////
//half circle to the left 90 to 180
canvas[y][x+9] = 150;
canvas[y+1][x+9] = 150;
canvas[y+2][x+9] = 150;
canvas[y+3][x+9-1] = 150;
canvas[y+4][x+9-1] = 150;
canvas[y+5][x+9-2] = 150;
canvas[y+6][x+9-2] = 150;
canvas[y+6][x+9-3] = 150;
canvas[y+7][x+9-3] = 150;
canvas[y+7][x+9-4] = 150;
canvas[y+8][x+9-5] = 150;
canvas[y+8][x+9-6] = 150;
canvas[y+9][x+9-7] = 150;
canvas[y+9][x+9-8] = 150;
canvas[y+9][x] = 150;
//////////////////////////////
//half circle to the left 270 to 360
canvas[y][x-9] = 150;
canvas[y-1][x-9] = 150;
canvas[y-2][x-9] = 150;
canvas[y-3][x-9+1] = 150;
canvas[y-4][x-9+1] = 150;
canvas[y-5][x-9+2] = 150;
canvas[y-6][x-9+2] = 150;
canvas[y-6][x-9+3] = 150;
canvas[y-7][x-9+3] = 150;
canvas[y-7][x-9+4] = 150;
canvas[y-8][x-9+5] = 150;
canvas[y-8][x-9+6] = 150;
canvas[y-9][x-9+7] = 150;
canvas[y-9][x-9+8] = 150;
canvas[y-9][x] = 150;
//////////////////////////////
//half circle to the left 180 to 270
canvas[y][x-9] = 150;
canvas[y+1][x-9] = 150;
canvas[y+2][x-9] = 150;
canvas[y+3][x-9+1] = 150;
canvas[y+4][x-9+1] = 150;
canvas[y+5][x-9+2] = 150;
canvas[y+6][x-9+2] = 150;
canvas[y+6][x-9+3] = 150;
canvas[y+7][x-9+3] = 150;
canvas[y+7][x-9+4] = 150;
canvas[y+8][x-9+5] = 150;
canvas[y+8][x-9+6] = 150;
canvas[y+9][x-9+7] = 150;
canvas[y+9][x-9+8] = 150;
canvas[y+9][x] = 150;
//////////////////////////////
}
}
}
void brightness(int a, int b, int canvas[H][W]){
if(canvas[a][b] <= 250){
C[count] = canvas[a][b]; address[count][0] = a; address [count][1] = b;count++;
}
}
void raytracing (struct ray temp_ray, int canvas[H][W]){
//creating a equation
int a,b;
for(a = 0;a < H; a++){
for(b = 0; b < W; b++){
//detection of intersection
// (y - y1) = m(x - x1)
// m =(y2 - y1)/(x2 -x1)
/////////////////////////
int line_equation;
//check for other points beside them
line_equation = (temp_ray.dest.y - temp_ray.origin.y)*(b - temp_ray.origin.x)-(temp_ray.dest.x - temp_ray.origin.x)* (a - temp_ray.origin.y);
if(line_equation == 0)
{
/*if(canvas[a][b] < 50) //if u want the ray to be visible
canvas[a][b] =+ 50;*/
for(int i = 0; i<9; i++){
for(int j = 0; j<9; j++){
if(canvas[a -4 +j][b -4 + i] > 100)
brightness(a -4 + j, b -4 + i, canvas);
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////
}
void printcanvas (int canvas[H][W]){
int x,y;
//for each pixel
for( y =0; y < H; y++){
for(x = 0; x < W; x++){
//priting the image
printf("%d, ",canvas[x][y]);
}
printf("\n");
}
}
///////////// Forward declaration of the device multiplication function/////////////////////
__global__ void VectorAddCuda(int*, int);
__global__ void VectorAddCuda(int* C, int width)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
// Index of the vector processed by the block
int index = bx*blockDim.x + tx;
// each thread adds corresponding elements matrix C
C[index] = C[index] + 100;
}
///////////////////////////////////// MAIN CODE ////////////////////////////////////////
int main (int argc, char *argv[]){
//parameters initialization
int canvas[H][W];
srand(time(0));
struct ray ray[10]; //for creating a ray out of two points origin and dest
ray[0].origin.x = H;
ray[0].origin.y = H;
ray[0].dest.x = 0;
ray[0].dest.y = 0;
for(int z = 1; z <10; z++)
{
ray[z].origin.x = H - z*100;
ray[z].origin.y = H;
ray[z].dest.x = z*100;
ray[z].dest.y = 0;
}
//initializing the canvas to be black
canvas_init(canvas);
///creating circles on the canvas
for(int i = 0; i <200; i++){
int number = rand()% H;
int number2 = rand()% H;
circle (number + 1,number2,canvas);
}
//raytracing
for(int z = 0;z < 10; z++){ ///loop for multiple rays
raytracing (ray[z],canvas);
}
/////////////////////////////////////////////////////////CUDA PART OF THE PROGRAM//////////////////////////////////
//incase you want to see manual timestamps then remove //*** after here and comment out the printcanvas at the end.//
int N = count;
int Cserial[N];
// initialize the input matrices
srand(time(NULL)); // Initialization, should only be called once.
clock_t before_init = clock();
for (int i = 0; i < N; i++){
Cserial[i] = C[i];
}
clock_t after_init = clock();
int width = N;
cudaProfilerStart();
int size=width*sizeof(int);
// Allocate device memory for C matrix
int* d_C;
cudaError_t err = cudaMalloc((void**)&d_C, size);
//***printf("CUDA malloc d_C: %s\n",cudaGetErrorString(err));
// Copy Matrix data from host memory to device memory
cudaEvent_t start_memcpyh2d,stop_memcpyh2d;
cudaEventCreate(&start_memcpyh2d);
cudaEventCreate(&stop_memcpyh2d);
cudaEventRecord(start_memcpyh2d);
err = cudaMemcpy(d_C, C, size, cudaMemcpyHostToDevice);
//***printf("CUDA Memcpy C->Cd: %s\n",cudaGetErrorString(err));
cudaEventRecord(stop_memcpyh2d);
float ms1 = 0;
cudaEventElapsedTime(&ms1,start_memcpyh2d,stop_memcpyh2d);
//***printf("Time of the MEMCPY of %d bytes: %2.3f ms\n",size,ms1);
cudaEvent_t start_kernel,stop_kernel;
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
// Specify the execution configuration
//dim3 dimBlock(,);
//dim3 dimGrid(,);
// Launch the CUDA kernel on the device
cudaEventRecord(start_kernel);
VectorAddCuda<<<N/1000, 1000>>>(d_C,width);
cudaEventRecord(stop_kernel);
cudaError_t errk = cudaDeviceSynchronize();
cudaEventSynchronize(stop_kernel);
//***printf("CUDA kernel launch: %s\n",cudaGetErrorString(errk));
// Read C from the device
cudaEvent_t start_memcpyd2h,stop_memcpyd2h;
cudaEventCreate(&start_memcpyd2h);
cudaEventCreate(&stop_memcpyd2h);
cudaEventRecord(start_memcpyd2h);
err = cudaMemcpy(&C, d_C, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop_memcpyd2h);
//***printf("CUDA Memcpy d_C->C: %s\n",cudaGetErrorString(err));
float ms2 = 0;
cudaEventElapsedTime(&ms2,start_memcpyd2h,stop_memcpyd2h);
//***printf("Time of the MEMCPY of %d bytes : %2.3f ms\n",size,ms2);
// Free device memory
cudaFree(d_C);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds,start_kernel,stop_kernel);
//***printf("Time to complete CUDA Vector addition kernel of %d size: %2.3f ms\n",width,milliseconds);
cudaProfilerStop();
clock_t before_serial = clock();
for (int k=0; k < N; k++) {
Cserial[k] += 50;
}
clock_t after_serial = clock();
float serial_totaltime = ((after_serial-before_serial)*1000)/CLOCKS_PER_SEC;
float speedup = serial_totaltime/milliseconds;
//***printf("Execution time for initialization(msec) = %d\n",(((after_init-before_init)*1000)/CLOCKS_PER_SEC));
//***printf("Execution time for CUDA Vector addition(msec)= %2.3f\n",milliseconds);
//***printf("Execution time for serial execution(msec) %d\n",(((after_serial-after_init)*1000)/CLOCKS_PER_SEC));
//***printf("Speedup obtained by CUDA for %d size Vector addition: %f\n",N,speedup);
//***printf("Ccount = %d, address[count][1] = %d \n", count, address[count][1]);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//printing the canvas
for(count;count>=0;count--){
canvas[address[count][0]][address[count][1]] =+ 100;
}
printcanvas (canvas);
return 0;
}
|
12,507 |
extern "C" {
__device__
void colorize_pixel(double pixel[4], double2 pnt, double2 ipnt, unsigned long i, unsigned long maxiter, double mag, double escape) {
}
};
|
12,508 | #include "utils.cuh"
#include <iostream>
#include <chrono>
void TIME(std::function<void(void)> func) {
using std::chrono::high_resolution_clock;
using std::chrono::duration_cast;
using std::chrono::duration;
using std::chrono::milliseconds;
auto t1 = high_resolution_clock::now();
func();
auto t2 = high_resolution_clock::now();
duration<double, std::milli> ms_double = t2 - t1;
std::cout << ms_double.count() << "ms" << std::endl;
} |
12,509 | #include<iostream>
#include <cuda.h>
#define N 16
__global__ void sumThredBlk(int* dA) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
dA[index] = threadIdx.x + blockIdx.x;
}
int main(void) {
int* hA; // host copies of a
int* dA; // device copies of a
int size = N * sizeof(int);
// Alloc space for device copies for dA
cudaMalloc((void**)& dA, size);
// Alloc space for host copies of a
hA = (int*)malloc(size);
// Copy inputs to device
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
// Launch sumThredBlk() kernel on GPU with 2 blocks and 8 threads
sumThredBlk<<<2,8>>>(dA);
cudaDeviceSynchronize();
// Copy result back to host
cudaMemcpy(hA, dA, size, cudaMemcpyDeviceToHost);
// prints the dA array
for (int i = 0; i < N; i++)
{
std::printf("%d ", hA[i]);
}
// Cleanup
free(hA);
cudaFree(dA);
return 0;
} |
12,510 | #include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error : %s : %d, ", __FILE__, __LINE__); \
printf("code : %d, reason : %s\n",error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void allocate_2d(int N, int M, double ***q)
{
int rows = N;
int cols = M;
double *qmem = (double*) malloc(rows*cols*sizeof(double));
double **qrows = (double**) malloc(rows*sizeof(double*));
for(int i = 0; i < rows; i++)
{
qrows[i] = &qmem[cols*i];
}
*q = &qrows[0];
}
void delete_2d(double ***q)
{
free(&(*q)[0][0]);
free(&(*q)[0]);
*q = NULL;
}
__global__ void setup_arrays2d_cuda(int Nx, int Ny,
double *qmem, double** qrows, double*** q)
{
int rows = Nx;
int cols = Ny;
for(int i = 0; i < rows; i++)
{
qrows[i] = &qmem[cols*i];
}
*q = &qrows[0];
}
__global__ void copymat_x_div(int Nx, int Ny, double*** dev_A, double ***dev_B)
{
double **A = *dev_A;
double **B = *dev_B;
/* This thread runs over i values (rows) */
int j = threadIdx.x + blockIdx.x*blockDim.x;
if (j < Ny)
if (threadIdx.x % 4 == 0)
for(int i = 0; i < Nx; i++)
B[i][j] = A[i][j];
else if (threadIdx.x % 4 == 1)
for(int i = 0; i < Nx; i++)
B[i][j] = A[i][j];
else if (threadIdx.x % 4 == 2)
for(int i = 0; i < Nx; i++)
B[i][j] = A[i][j];
else
for(int i = 0; i < Nx; i++)
B[i][j] = A[i][j];
}
__global__ void copymat_x(int Nx, int Ny, double*** dev_A, double ***dev_B)
{
double **A = *dev_A;
double **B = *dev_B;
/* This thread runs over i values (rows) */
int j = threadIdx.x + blockIdx.x*blockDim.x;
if (j < Ny)
for(int i = 0; i < Nx; i++)
B[i][j] = A[i][j];
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return (double) tp.tv_sec + (double)tp.tv_usec*1e-6;
}
int main(int argc, char* argv[])
{
int P = atoi(argv[1]);
int block_dim = atoi(argv[2]);
if (P > 13)
printf("P is probably too large and will likely seg. fault\n");
int Nx = 1 << P;
int Ny = 1 << P;
size_t nbytes = Nx*Ny*sizeof(double);
printf("N = %d\n",Nx);
printf("Memory = %dMb\n",nbytes/(1024*1024));
double *dev_Amem, **dev_Arows, ***dev_A;
cudaMalloc( (void**) &dev_Amem, Nx*Ny*sizeof(double));
cudaMalloc( (void***) &dev_Arows, Ny*sizeof(double*));
cudaMalloc( (void****) &dev_A, sizeof(double**));
setup_arrays2d_cuda<<<1,1>>>(Nx,Ny,dev_Amem, dev_Arows,dev_A);
double *dev_Bmem, **dev_Brows, ***dev_B;
cudaMalloc( (void**) &dev_Bmem, Nx*Ny*sizeof(double));
cudaMalloc( (void***) &dev_Brows, Ny*sizeof(double*));
cudaMalloc( (void****) &dev_B, sizeof(double**));
setup_arrays2d_cuda<<<1,1>>>(Nx,Ny,dev_Bmem, dev_Brows,dev_B);
double **A;
allocate_2d(Nx, Ny, &A);
memset(&A[0][0],0,nbytes);
cudaMemcpy(dev_Amem, &A[0][0], nbytes, cudaMemcpyHostToDevice);
dim3 block(block_dim);
dim3 grid((Ny+block.x-1)/block.x);
printf("Number of blocks : %d\n",grid.x);
double etime[2];
/* Run over rows */
double start = cpuSecond();
copymat_x<<<grid,block>>>(Nx,Ny,dev_A, dev_B);
CHECK(cudaDeviceSynchronize());
etime[0] = cpuSecond() - start;
printf("GPU Kernel (non-div) %12.6f (s)\n",etime[0]);
start = cpuSecond();
copymat_x_div<<<grid,block>>>(Nx,Ny,dev_A, dev_B);
CHECK(cudaDeviceSynchronize());
etime[1] = cpuSecond() - start;
printf("GPU Kernel (warp-div) %12.6f (s)\n",etime[1]);
printf("Ratio (warp_div/non-div) %12.2f\n",etime[1]/etime[0]);
cudaFree(dev_Amem);
cudaFree(dev_Arows);
cudaFree(dev_A);
cudaFree(dev_Bmem);
cudaFree(dev_Brows);
cudaFree(dev_B);
delete_2d(&A);
cudaDeviceReset();
}
|
12,511 |
#include<stdio.h>
__global__ void end()
{
if(threadIdx.x==31 && blockIdx.x == 1023)
{
printf("end\n");
}
}
int main()
{
end<<<1024,32>>>();
cudaDeviceSynchronize();
}
|
12,512 | #include "includes.h"
__global__ void indices_offset_addition(int64_t *indices, int64_t *offsets, int64_t *output_indices, int batch_size) {
const int fea_count = 26;
__shared__ int64_t smem_offsets[fea_count];
if (threadIdx.x < fea_count) {
smem_offsets[threadIdx.x] = offsets[threadIdx.x];
}
__syncthreads();
int start_idx = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = start_idx; i < (batch_size * fea_count); i+=(gridDim.x * blockDim.x)) {
output_indices[i] = indices[i] + smem_offsets[i % fea_count];
}
} |
12,513 | #include "includes.h"
__global__ void cuImageBrighten(const float *dev_image, float *dev_out, int w, int h)
{
int tx = threadIdx.x; int ty = threadIdx.y;
int bx = blockIdx.x; int by = blockIdx.y;
int pos = tx + 32*bx + w* ty + 32*w*by;
dev_out[pos] = min(255.0f, dev_image[pos] + 50);
__syncthreads();
} |
12,514 | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include "cuda.h"
__global__ void foo() {
int x = 1;
x = !x;
}
|
12,515 | #include <stdio.h>
#include <stdlib.h>
void handleError(cudaError_t error) {
if(error != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
int main(int argc, char *argv[]) {
cudaDeviceProp prop;
int dev;
handleError(cudaGetDevice(&dev));
printf("Current Device ID is %d\n", dev);
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 3;
prop.minor = 0;
handleError(cudaChooseDevice(&dev, &prop));
printf("The closest device to revision 3.0 is %d\n", dev);
handleError(cudaSetDevice(dev));
return 0;
}
|
12,516 | #include <cstdio>
using namespace std;
__global__ void printThread(void) {
printf("Hello World! I am thread %d\n", threadIdx.x);
}
int main() {
printThread<<<1, 4>>>();
cudaDeviceSynchronize();
return 0;
}
|
12,517 | #include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#define ARR_SIZE 10
#define NUM_DEVICE 2
#define NUM_THR 8
typedef struct {
int *arr;
int *dev_arr;
int *dev_result;
int *result;
int dev_num;
int thr_num;
} cuda_st;
__global__ void kernel_fc(int *dev_arr, int *dev_result)
{
int idx = threadIdx.x;
printf("dev_arr[%d] = %d\n", idx, dev_arr[idx]);
atomicAdd(dev_result, dev_arr[idx]);
}
void *thread_func(void* struc)
{
cuda_st * data = (cuda_st*)struc;
printf("thread %d func start\n", data->thr_num);
printf("arr %d = ", data->dev_num);
for(int i=0; i<10; i++) {
printf("%d ", data->arr[i]);
}
printf("\n");
cudaSetDevice(data->dev_num);
cudaMemcpy(data->dev_arr, data->arr, sizeof(int)*ARR_SIZE, cudaMemcpyHostToDevice);
kernel_fc<<<1,ARR_SIZE>>>(data->dev_arr, data->dev_result);
cudaMemcpy(data->result, data->dev_result, sizeof(int), cudaMemcpyDeviceToHost);
printf("thread %d func exit\n", data->thr_num);
return NULL;
}
int main(void)
{
// Make object
cuda_st cuda[NUM_DEVICE][NUM_THR];
// Make thread
pthread_t pthread[NUM_DEVICE*NUM_THR];
// Host array memory allocation
int *arr[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
arr[i] = (int*)malloc(sizeof(int)*ARR_SIZE);
}
// Fill this host array up with specified data
for(int i=0; i<NUM_DEVICE; i++) {
for(int j=0; j<ARR_SIZE; j++) {
arr[i][j] = i*ARR_SIZE+j;
}
}
// To confirm host array data
for(int i=0; i<NUM_DEVICE; i++) {
printf("arr[%d] = ", i);
for(int j=0; j<ARR_SIZE; j++) {
printf("%d ", arr[i][j]);
}
printf("\n");
}
// Result memory allocation
int *result[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
result[i] = (int*)malloc(sizeof(int));
memset(result[i], 0, sizeof(int));
}
// Device array memory allocation
int *dev_arr[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
cudaSetDevice(i);
cudaMalloc(&dev_arr[i], sizeof(int)*ARR_SIZE);
}
// Device result memory allocation
int *dev_result[NUM_DEVICE];
for(int i=0; i<NUM_DEVICE; i++) {
cudaSetDevice(i);
cudaMalloc(&dev_result[i], sizeof(int));
cudaMemset(dev_result[i], 0, sizeof(int));
}
// Connect these pointers with object
for (int i=0; i<NUM_DEVICE; i++)
for (int j=0; j<NUM_THR; j++) {
cuda[i][j].arr = arr[i];
cuda[i][j].dev_arr = dev_arr[i];
cuda[i][j].result = result[i];
cuda[i][j].dev_result = dev_result[i];
cuda[i][j].dev_num = i;
cuda[i][j].thr_num = j;
}
// Create and excute pthread
for(int i=0; i<NUM_DEVICE; i++)
for (int j=0; j<NUM_THR; j++) {
pthread_create(&pthread[(i*NUM_THR)+j], NULL, thread_func, (void*)&cuda[i][j]);
}
// Join pthread
for(int i=0; i<NUM_DEVICE*NUM_THR; i++) {
pthread_join(pthread[i], NULL);
}
for(int i=0; i<NUM_DEVICE; i++)
for (int j=0; j < NUM_THR; j++) {
printf("result[%d][%d] = %d\n", i,j, (*cuda[i][j].result));
}
cudaDeviceReset();
return 0;
}
|
12,518 |
#include <stdio.h>
#include<iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define N 4
#define INF 9999
__global__ void Floyd(int** graph, int k)
{
int i = /*blockIdx.x +*/ threadIdx.x;
int j = /*blockIdx.y +*/ threadIdx.y;
if (graph[i][k] + graph[k][j] < graph[i][j])
graph[i][j] = graph[i][k] + graph[k][j];
}
int main()
{
int h_graph[N][N] = {
0,5,9999, 10,
9999, 0,3, 9999,
9999, 9999, 0,1,
9999, 9999, 9999,0
};
size_t size = N * N * sizeof(int);
int** d_graph;
cudaMalloc(&d_graph, size);
cudaMemcpy(d_graph, h_graph, size, cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
for (int k = 0; k < N*N; k++)
{
Floyd<<<numBlocks, threadsPerBlock>>>(d_graph, k);
}
cudaMemcpy(h_graph, d_graph, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
if (h_graph[i][j] == INF){
cout << "INF ";
}
else{
cout << h_graph[i][j] << " ";
}
}
cout << endl;
}
cudaFree(d_graph);
}
|
12,519 | #include <assert.h>
#include <stdio.h>
int* add(int *a, int *b, int *result, int N){
int i;
for( i = 0; i < N; i++){
result[i] = a[i] + b[i];
}
return result;
}
void onHost(){
const int ARRAY_SIZE = 10;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int *a, *b, *result;
a = (int*)malloc(ARRAY_BYTES);
b = (int*)malloc(ARRAY_BYTES);
result = (int*)malloc(ARRAY_BYTES);
for (int i=0; i<ARRAY_SIZE; i++) {
a[i] = -i;
b[i] = i * i;
result[i]=0;
}
add(a, b, result, ARRAY_SIZE);
for (int i=0; i<ARRAY_SIZE; i++) {
assert( a[i] + b[i] == result[i] );
}
printf("-: successful execution :-\n");
free(a);
free(b);
free(result);
}
int main(){
onHost();
return 0;
} |
12,520 | #include "includes.h"
using namespace std;
#ifndef __CUDACC__
#define __CUDACC__
#endif
/*
CUDA C has a __shared__ memory section where a copy of var is made for each block & threads within a block can all access var but cannot see or modify copy in other blocks
Must synchronize, if thread A writes to var and thread B wants to modify, must wait and ensure write is done else race condition occurs where correctness of var unknown
Addl shared mem buffers are physically on GPU, as opposed to off-chip DRAM which makes for much faster calls and reduced latency
*/
#define imin(a, b) (a < b ? a : b)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock); // use either all blocks if N large, or calc req blocks by taking smallest multiple of N
__global__ void dot(float *a, float *b, float *c)
{
__shared__ float cache[threadsPerBlock]; //arr of caches equal to size of 256, each thread has spot to store temp vals & must wait for all writes to finish before another iteration where val retrieved for more modification
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int cacheID = threadIdx.x;
float temp = 0;
//iteratively take sum of products (dot prod) by creating threads for each arr elem
//all threads will technically run at same hardware location, however abstract into larger set incase vector exceeds length
//for small enough vals each thread theoretically computes single sum, yet again for large vecs, can continue greater iterations
while (threadID < N)
{
temp += a[threadID] * b[threadID];
threadID += blockDim.x * gridDim.x;
}
cache[cacheID] = temp; //shared mem buffer to store running sum per thread
__syncthreads(); //Sync threads for blocks to ensure cache done being written to by all parallel processes
//Apply reduction to sum vals, whereby input arr made into smaller output arr
//Apply multiple threads for sum, each one adds two vals of cache[], resulting in log2(threadsPerBlock) steps
//Each thread does two computations, therefore 2x per thread. There are threadsPerBlock running in parallel, thus 2^threadsPerBlock computations being done per step
int i = blockDim.x / 2; //each thread does two tasks, thus need half as many
while (i != 0) //run in parallel so each step halves size of cache until reach 1 elem in arr
{
if (cacheID < i) //check cacheID being summed less than num operators
{
cache[cacheID] += cache[cacheID + i]; //add curr cache val to ith
}
__syncthreads(); //sync threads again per each iteration to ensure cache data correct before mod
i /= 2; //every other cache index
}
//Final reduction, each block has single sum left & store to global mem, use single thread rather than multiple for writing to reduce mem req
//Typically in better programs, GPU stops summing once it's reached a small enough number as threads used << threads available (e.g. using 32 out of 256 threads)
//In that case, work passed on to CPU to quickly run remaining sum sequentially
if (cacheID == 0)
{
c[blockIdx.x] = cache[0]; //send to curr block
}
} |
12,521 | #include "includes.h"
#define min(X,Y) ((X) < (Y) ? (X) : (Y))
__global__ void pos_update(int nx, int ny, double dt, double* d_z, double* d_v, double* d_a) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int r = i / nx;
int c = i % nx;
if(r<ny-1 && r>0 && c<nx-1 && c>0){
d_v[i] = d_v[i] + dt*d_a[i];
d_z[i] = d_z[i] + dt*d_v[i];
}
} |
12,522 | #include "gpuerrchk.cuh"
#include "real.h"
__global__ void ch9_aastrat1_kernel(char* buffer,unsigned int* histo, size_t inputsize){
int i = threadIdx.x + blockIdx.x*blockDim.x;
int section_size = (inputsize-1) / (blockDim.x*gridDim.x) + 1;
int start=i*section_size;
for (int k=0; k< section_size; ++k){
if (start+k < inputsize){
int alphabet_position=buffer[start+k]-'a';
if (alphabet_position >=0 && alphabet_position < 26)
atomicAdd(&histo[alphabet_position/4], 1);
}
}
}
void ch9_aastrat1(char* buffer, unsigned int* histo,size_t inputsize){
ch9_aastrat1_kernel<<<1, 512 >>>(buffer,histo,inputsize);
gpuErrchk(cudaPeekAtLastError());
}
|
12,523 | #include "includes.h"
__global__ void countQueens(int* frontQueensPos, int* data, int* numFQP)
{
int localResult = 0;
//printf("%d\n", numFQP[0]);
int thisThread = ((blockIdx.x * gridDim.x + blockIdx.y) * gridDim.y + threadIdx.x)* blockDim.x + threadIdx.y;
// printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y);
// if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS)
// return;
if (blockIdx.x >= QUEENS || blockIdx.y >= QUEENS || threadIdx.x >= QUEENS || threadIdx.y >= QUEENS)
return;
int* queenPos = new int[QUEENS];
queenPos[3] = blockIdx.x;
queenPos[4] = blockIdx.y;
queenPos[5] = threadIdx.x;
queenPos[6] = threadIdx.y;
for (int i = 4; i <= 6; i++) {
for (int j = 3; j < i; j++) {
if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) {
return;
}
}
}
int totalFQP = numFQP[0] / 3;
for (int FQP_number = 0; FQP_number < totalFQP; FQP_number++) {
// printf("1_%d %d %d %d %d %d %d %d\n", thisThread, blockIdx.x, gridDim.x, blockIdx.y, gridDim.y, threadIdx.x, blockDim.x, threadIdx.y);
// if (thisThread >= QUEENS * QUEENS * QUEENS * QUEENS)
// return;
for (int i = 0; i < 3; i++)
queenPos[i] = frontQueensPos[(FQP_number * 3) + i];
bool legal = true;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], totalFQP);
for (int i = 3; i <= 6; i++) {
for (int j = 0; j < 3; j++) {
if ((queenPos[i] - i) == (queenPos[j] - j) || (queenPos[i] + i) == (queenPos[j] + j) || queenPos[i] == queenPos[j]) {
legal = false;
break;
}
}
if (!legal)
break;
}
if (!legal)
continue;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("1_%d %d %d %d %d %d %d_%d\n", queenPos[0], queenPos[1], queenPos[2], queenPos[3], queenPos[4], queenPos[5], queenPos[6], localResult);
//printf("1_%d %d %d %d %d %d %d\n", thisThread, queenPos[2], blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]);
//backtrace
int posNow = 7;
queenPos[posNow] = -1;
while (posNow > 6) {
queenPos[posNow] ++;
while (queenPos[posNow] < QUEENS) {
legal = true;
for (int j = posNow - 1; j >= 0; j--) {
if ((queenPos[posNow] - posNow) == (queenPos[j] - j) || (queenPos[posNow] + posNow) == (queenPos[j] + j) || queenPos[posNow] == queenPos[j]) {
legal = false;
break;
}
}
if (!legal)
queenPos[posNow] ++;
else
break;
}
if (queenPos[posNow] < QUEENS) {
if (posNow == (QUEENS - 1)) {
localResult++;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("2_%d %d %d %d %d %d %d_%d\n", queenPos[7], queenPos[8], queenPos[9], queenPos[10], queenPos[11], queenPos[12], queenPos[13], localResult);
posNow--;
}
else {
posNow++;
queenPos[posNow] = -1;
}
}
else
posNow--;
}
}
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("2.5_%d\n", localResult);
data[thisThread] = localResult;
//if (blockIdx.x == 6 && blockIdx.y == 11 && threadIdx.x == 9 && threadIdx.y == 12)
// printf("3_%d %d %d %d %d %d\n", thisThread, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, data[thisThread]);
} |
12,524 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <iostream>
#include <cstdlib>
#define BLOCK_SIZE 64
#define N 64
using namespace std;
void displayLastError(const string &msg)
{
cout << "Last Error (" << msg << "):\t" << cudaGetErrorString(cudaGetLastError()) << endl;
}
__global__ void bSearchCuda(float *array, float search, int *index)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(array[x]==search)
*index = x;
}
int main(int argc, char *argv[])
{
float *data = new float[N];
for(int i=0;i<N;i++)
data[i] = (i<10)?1.0f:0.0f;
float *deviceData;
int *deviceIndex;
size_t size = N*sizeof(float);
cudaMalloc((void**)&deviceData, size);
displayLastError("memory allocation");
cudaMalloc((void**)&deviceIndex, sizeof(int));
displayLastError("memory allocation");
cudaMemcpy(deviceData, data, size, cudaMemcpyHostToDevice);
displayLastError("memory copying");
int index = -1;
cudaMemcpy(deviceIndex, &index, sizeof(int), cudaMemcpyHostToDevice);
int blocks = N / BLOCK_SIZE;
if(N % BLOCK_SIZE)
blocks++;
bSearchCuda<<<blocks, BLOCK_SIZE>>>(deviceData, 1.0f, deviceIndex);
displayLastError("kernel");
cudaMemcpy(&index, deviceIndex, sizeof(int), cudaMemcpyDeviceToHost);
displayLastError("memory copying");
cout << index << endl;
cudaFree(deviceData);
displayLastError("free");
cudaFree(deviceIndex);
displayLastError("free");
delete [] data;
return 0;
}
|
12,525 | //#include "texture.h"
//#include "../cuda_err.h"
//#include <inttypes.h>
//#include <stdio.h>
//template <typename T>
//__device__
//T Texture<T>::get(float u, float v)
//{
// uint32_t x = round(u * float(width-1));
// uint32_t y = round(v * float(height-1));
// return gpu_texels[y * width + x];
//}
//template <typename T>
//void Texture<T>::sendCopyToGPU(Texture<T> *gpu_copy)
//{
// gpuErrchk(cudaMalloc(&gpu_texels, size * sizeof(T)));
// gpuErrchk(cudaMemcpy(gpu_texels, cpu_texels, size * sizeof(T), cudaMemcpyHostToDevice));
// Texture<T> temp; //Ее копия отправится на GPU. Саму копию не надо будет освобождать специально, так как там только указатели на память на GPU и на CPU, которые хранятся и освобождаются явно в базовом объекте, который остается на CPU
// temp.cpu_texels = nullptr;
// temp.gpu_texels = gpu_texels;
// temp.size = size;
// temp.width = width;
// temp.height = height;
// if (gpu_copy == NULL) {
// gpuErrchk(cudaMalloc(&gpu_copy, sizeof(Texture)));
// }
// gpuErrchk(cudaMemcpy(gpu_copy, &temp, sizeof(Texture), cudaMemcpyHostToDevice)); //Отправить структуру
//}
//template <typename T>
//void Texture<T>::destroy()
//{
// if (!cpu_texels) {
// free(cpu_texels);
// cpu_texels = nullptr;
// printf("[Texture::destroy]: free cpu_texels\n");
// }
// if (!gpu_texels) {
// gpuErrchk(cudaFree(gpu_texels));
// gpu_texels = nullptr;
// printf("[Texture::destroy]: cudaFree gpu_texels\n");
// }
// initTexture();
//}
|
12,526 | #include "includes.h"
#define NUMAR_NODURI 500
#define NUMAR_MUCHII 500
#define COST_MAXIM 1000000
typedef struct
{
int nod1;
int nod2;
} Muchie;
typedef struct
{
int nodId;
bool vizitat;
} Nod;
//Gaseste costul drumului de la nodul start la nodul stop
__device__ __host__ int CautareMuchie(Nod start, Nod stop, Muchie *muchii, int *costuri)
{
for (int i = 0; i < NUMAR_MUCHII; i++)
if (muchii[i].nod1 == start.nodId && muchii[i].nod2 == stop.nodId)
return costuri[i];
return COST_MAXIM;
}
__global__ void Cauta_Nod(Nod *noduri, Muchie *muchii, int *costuri, int *costTemporal, int *costFinal)
{
int nod = threadIdx.x;
if (noduri[nod].vizitat == false)
{
noduri[nod].vizitat = true;
for (int n = 0; n < NUMAR_NODURI; n++)
{
//Cauta costul muchiei
int cost = CautareMuchie(noduri[nod], noduri[n], muchii, costuri);
//ia costul minim
if (costFinal[n] > costTemporal[nod] + cost && cost < COST_MAXIM)
costFinal[n] = costTemporal[nod] + cost;
}
}
} |
12,527 | #include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// Check how many cuda devices in the system.
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n -> %s\n",
(int)error_id, cudaGetErrorString(error_id));
printf("Result = FALL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA. \n");
}
else
{
printf("Detected %d CUDA Capable device(s). \n", deviceCount);
}
// Check cuda device 0 properties.
int dev;
int driverVersion = 0;
int runtimeVersion = 0;
dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d \n",
driverVersion/1000, (driverVersion%100) / 10,
runtimeVersion/1000, (runtimeVersion % 100)/10);
printf(" CUDA Capability Major / Minor version: %d.%d \n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu bytes) \n",
(float) deviceProp.totalGlobalMem / (pow(1024.0, 3)),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f GHz) \n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f MHz (%0.2f GHz)\n",
deviceProp.memoryClockRate * 1e-3f, deviceProp.memoryClockRate * 1e-6f);
printf(" Memory Bus Width: %d-bit \n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes \n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x, y, z)\n");
printf(" 1D = (%d), 2D = (%d, %d), 3D = (%d, %d, %d)\n\n",
deviceProp.maxTexture1D,
deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
/*
printf(" Max Layered Texture Size (dim) * layers\n");
printf(" 1D = (%d) * %d, 2D = (%d, %d) * %d\n",
deviceProp.MaxTexture1DLayered[0], deviceProp.MaxTexture1DLayered[1],
deviceProp.MaxTexture2DLayered[0], deviceProp.MaxTexture2DLayered[1], deviceProp.MaxTexture2DLayered[2]);
*/
printf(" Total # of constant memory: %lu bytes \n",
deviceProp.totalConstMem);
printf(" Total # of shared memory per block: %lu bytes \n",
deviceProp.sharedMemPerBlock);
printf(" Total # of registers avalilable per block: %d \n",
deviceProp.regsPerBlock);
//printf(" Warp size: %d \n",
// deviceProp.wrapSize);
printf(" Max # of threads per multiprocessor: %d \n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Max # of threads per block: %d \n",
deviceProp.maxThreadsPerBlock);
printf(" Max sizes of each dimension of a block: %d * %d * %d \n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max sizes of each dimension of a grid: %d * %d * %d \n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Max memory pitch: %lu bytes \n",
deviceProp.memPitch);
return(EXIT_SUCCESS);
}
|
12,528 | #include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} |
12,529 | #include <bits/stdc++.h>
#include <cassert>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
#define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
#define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos))
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int ARRAY_SIZE = 1E9;
__global__ void initKernel(){
return;
}
__global__ void naiveKernel(int N, float *input, float *output){
float res = 0.;
for(int i=0;i<N;++i) res += input[i];
*output = res/N;
}
__global__ void thrustKernel(int N, float *input, float *output){
float res = thrust::reduce(thrust::device, input, input + N);
*output = res/N;
}
int main(){
cudaEvent_t start, stop;
float cpu_time, gpu_time1, gpu_time2;
cudaEventCreate(&start); // creating the event 1
cudaEventCreate(&stop); // creating the event 2
initKernel<<<1,1>>>();
initKernel<<<1,1>>>();
initKernel<<<1,1>>>();
for(int N = 2; N<=ARRAY_SIZE ; N*=2){
float ans = 0.;
vector<float> input(N);
def_dvec(float) dev_in(N), dev_ans1(1, 0.), dev_ans2(1,0.);
generate(input.begin(), input.end(), [](){return float(rand())/RAND_MAX;});
gpu_copy(input, dev_in);
// Using CPU to compute the average
clock_t t_start = clock();
ans = accumulate(input.begin(), input.end(), 0.)/N;
cpu_time = float(clock() - t_start)/CLOCKS_PER_SEC;
// Using the naive kernel
cudaEventRecord(start, 0);
naiveKernel<<<1, 1>>>(N, to_ptr(dev_in), to_ptr(dev_ans1));
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time1, start, stop);
gpu_time1/=1000.;
// Using the thrust kernel
cudaEventRecord(start, 0);
thrustKernel<<<1, 1>>>(N, to_ptr(dev_in), to_ptr(dev_ans2));
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time2, start, stop);
gpu_time2 /= 1000.;
// output results
cout<< '[' << N<<','<<cpu_time<<','<<gpu_time1<<','<<gpu_time2<<',';
cout<< ans <<','<<dev_ans1[0]<<','<<dev_ans2[0]<<"],"<<endl;
}
return 0;
}
|
12,530 | #include "includes.h"
/**
* calculate pi
*/
// For the CUDA runtime routines (prefixed with "cuda_")
//Tiempo
#define NUMTHREADS 10240
#define ITERATIONS 1e12
/**
* CUDA Kernel Device code
*
*/
/*****************************************************************************/
/******************************************************************************
* Host main routine
*/
__global__ void calculatePi(double *piTotal, long int iterations, int totalThreads)
{ long int initialIteration, endIteration;
long int i = 0;
double piPartial;
//TamanioBloque*IdBloque + IdHilo
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
initialIteration = (iterations/totalThreads) * index;
endIteration = initialIteration + (iterations/totalThreads) - 1;
i = initialIteration;
piPartial = 0;
do{
piPartial = piPartial + (double)(4.0 / ((i*2)+1));
i++;
piPartial = piPartial - (double)(4.0 / ((i*2)+1));
i++;
}while(i < endIteration);
piTotal[index] = piPartial;
__syncthreads();
if(index == 0){
for(i = 1; i < totalThreads; i++)
piTotal[0] = piTotal[0] + piTotal[i];
}
} |
12,531 | // Rishabh Agarwal - 18JE0676
#include <bits/stdc++.h>
#include <cuda.h>
using namespace std;
const int block_size = 512;
// kernel function
__global__ void parallelReductionKernel(float * din, float * dout, int inputElements) {
// Load a segment of the din vector into shared memory
const int block_size = 512;
__shared__ float partialSum[2 * block_size];
int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
if ((start + t) < inputElements) {
partialSum[t] = din[start + t];
}
else {
partialSum[t] = 0.0;
}
if ((start + blockDim.x + t) < inputElements) {
partialSum[blockDim.x + t] = din[start + blockDim.x + t];
}
else {
partialSum[blockDim.x + t] = 0.0;
}
// Traverse reduction tree
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if (t % (2*stride) == 0 ) {
partialSum[t] += partialSum[t + stride];
}
}
__syncthreads();
// Write the computed sum of the block to the output vector at correct index
if (t == 0 && (globalThreadId*2) < inputElements) {
dout[blockIdx.x] = partialSum[t];
}
}
// parallelReduction Function
void parallelReduction(float *in, float *out, int inputElements, int outputElements) {
float *din, *dout;
// device (my nvidia gpu) memory allocation
cudaMalloc((void **)&din, inputElements * sizeof(float));
cudaMalloc((void **)&dout, outputElements * sizeof(float));
// transfer memory from host (laptop intel processor) to device (my nvidia gpu)
cudaMemcpy(din, in, inputElements * sizeof(float), cudaMemcpyHostToDevice);
// Number of Blocks requiredand number of threads in each block
dim3 DimGrid( outputElements, 1, 1);
dim3 DimBlock(block_size, 1, 1);
// now calling cuda kernel for parallel reduction
parallelReductionKernel<<<DimGrid, DimBlock>>>(din, dout, inputElements);
// transfer memory from device (my nvidia gpu) to host (laptop intel processor)
cudaMemcpy(out, dout, outputElements * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(din);
cudaFree(dout);
return;
}
// main
int main() {
int inputElements, outputElements;
float *in, *out;
cout << "Enter input elements: ";
cin >> inputElements;
outputElements = inputElements / (block_size<<1);
if (inputElements % (block_size<<1)) {
outputElements++;
}
// allocating memory
in = (float *) malloc(sizeof(float) * inputElements);
out = (float*) malloc(sizeof(float*) * outputElements);
// assigning values
for (int i=0; i < inputElements; i++) {
in[i] = i;
}
parallelReduction(in, out, inputElements,outputElements);
cout << "Reduced Sum from GPU = " << out[0];
free(in);
free(out);
return 0;
}
|
12,532 | /*
* nvmatrix_kernel.cu
*
* Created on: 21-Jan-2009
* Author: Alex Krizhevsky (akrizhevsky@gmail.com)
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include "nvmatrix_kernel.cuh"
__global__ void kExp(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __expf(gData[i]);
}
__global__ void kLogistic1(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = (1 + tanhf(gData[i] / 2)) / 2;
}
__global__ void kLogistic2(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / (1 + expf(-gData[i]));
}
__global__ void kLog(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __logf(gData[i]);
}
__global__ void kSquare(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] * gData[i];
}
__global__ void kSqrt(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = sqrtf(gData[i]);
}
__global__ void kZero(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 0;
}
__global__ void kReciprocal(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / gData[i];
}
__global__ void kSign(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = (gData[i] > 0) - (gData[i] < 0);
}
__global__ void kSubtractFromScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = scalar - gData[i];
}
__global__ void kAddScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = scalar + gData[i];
}
__global__ void kBiggerThanScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] > scalar;
}
__global__ void kSmallerThanScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] < scalar;
}
__global__ void kInRangeInc(float* gData, float lower, float upper, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] >= lower && gData[i] <= upper;
}
__global__ void kInRangeExc(float* gData, float lower, float upper, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] > lower && gData[i] < upper;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for (unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kBinarizeProbs(unsigned int* rndMults, unsigned long long* rndWords, float *gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for (unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = gData[i] > (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
#define PI 3.1415926535897932f
/*
* TODO: modify to take mean/stdev
*/
__global__ void kAddGaussianNoise(unsigned int* rndMults, unsigned long long* rndWords, float* gData, const float stdev,
unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for (unsigned int i = idx; i < numElements; i += 2 * NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] += stdev * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] += stdev * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kAddGaussianNoise(unsigned int* rndMults, unsigned long long* rndWords, float* gData, const float* stdevs,
unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for (unsigned int i = idx; i < numElements; i += 2 * NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] += stdevs[i] * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] += stdevs[i + NUM_RND_STREAMS] * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
/*
* TODO: modify to take mean/stdev
*/
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, const float stdev,
unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for (unsigned int i = idx; i < numElements; i += 2 * NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = stdev * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = stdev * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, const float* stdevs,
unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for (unsigned int i = idx; i < numElements; i += 2 * NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = stdevs[i] * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = stdevs[i + NUM_RND_STREAMS] * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long) seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for (unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kBiggerThan(float* gMat1, float* gMat2, float* gMatTarget, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numElements)
gMatTarget[idx] = gMat1[idx] > gMat2[idx];
}
__global__ void kCopy(float* srcStart, float* destStart, const int copyWidth,
const int srcJumpWidth, const int destJumpWidth, const int numElements) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < numElements; i += blockDim.x * gridDim.x) {
destStart[(i / copyWidth) * destJumpWidth + i % copyWidth] = srcStart[(i / copyWidth) * srcJumpWidth + i % copyWidth];
}
}
__device__ inline int getTransArrayIndex(unsigned int width, unsigned int height, unsigned int i) {
return height * (i % width) + i / width;
}
/*
* like above but assumes destination is transposed.
* note that this is not efficient because there will be
* memory transactions that are not coalesced.
*/
__global__ void kCopyToTransDestSlow(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcJumpWidth,
unsigned int destJumpHeight, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numElements)
destStart[getTransArrayIndex(srcCopyWidth, destJumpHeight, idx)] = srcStart[(idx / srcCopyWidth) * srcJumpWidth + idx
% srcCopyWidth];
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kCopyToTransDestFast(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcCopyHeight,
unsigned int srcJumpSize, unsigned int destJumpSize) {
// const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
// const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
// if(idxX < srcCopyWidth && idxY < srcCopyHeight) {
const unsigned int srcReadIdx = (blockIdx.y * blockDim.y + threadIdx.y) * srcJumpSize + blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int destWriteIdx = (blockIdx.x * blockDim.x + threadIdx.y) * destJumpSize + blockIdx.y * blockDim.y + threadIdx.x;
__shared__
float smem[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE + 1];
smem[threadIdx.x][threadIdx.y] = srcStart[srcReadIdx];
__syncthreads();
destStart[destWriteIdx] = smem[threadIdx.y][threadIdx.x];
// }
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls, float scaleA, float scaleB) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scaleA * a[i] + scaleB * b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = __fdividef(a[i], b[i]);
}
}
__global__ void kTranspose(float* a, float* dest, int width, int height) {
const int bx = blockIdx.x * blockDim.x;
const int by = blockIdx.y * blockDim.y;
const int tx = bx + threadIdx.x;
const int ty = by + threadIdx.y;
// unsigned int idx = ty * width + tx;
__shared__
float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
if (tx < width && ty < height) {
smem[threadIdx.y][threadIdx.x] = a[ty * width + tx];
}
__syncthreads();
if (by + threadIdx.x < height && threadIdx.y + bx < width) {
// idx = height * (blockIdx.x * blockDim.x + threadIdx.y) + blockIdx.y * blockDim.y + threadIdx.x;
dest[(bx + threadIdx.y) * height + by + threadIdx.x] = smem[threadIdx.x][threadIdx.y];
}
}
__global__ void kSquaredDiff(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = (a[i] - b[i]) * (a[i] - b[i]);
}
}
__global__ void kAdd3(float* a, const float* b, const float* c, const unsigned int numEls, const float scaleA, const float scaleB,
const float scaleC) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = scaleA * a[i] + scaleB * b[i] + scaleC * c[i];
}
}
__global__ void kTile(const float* src, float* tgt, const int srcWidth, const int srcHeight, const int tgtWidth, const int tgtHeight) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
// const unsigned int numEls = tgtWidth * tgtHeight;
for (unsigned int i = idx; i < tgtWidth * tgtHeight; i += numThreads) {
const int y = i / tgtWidth;
const int x = i % tgtWidth;
const int srcY = y % srcHeight;
const int srcX = x % srcWidth;
tgt[i] = src[srcY * srcWidth + srcX];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height, float scaleVec) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + scaleVec * vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, const unsigned int width, const unsigned int height,
const float scaleVec) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + scaleVec * vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kEqualsRowVector(float* mat, float* vec, float* tgtMat, const int width, const int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] == vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kEqualsColVector(float* mat, float* vec, float* tgtMat, const int width, const int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] == vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kBiggerThanRowVector(float* mat, float* vec, float* tgtMat, const int width, const int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] > vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kBiggerThanColVector(float* mat, float* vec, float* tgtMat, const int width, const int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] > vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kDivideByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = __fdividef(mat[i], vec[i % width]);
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kDivideByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = __fdividef(mat[i], vec[i / width]);
}
}
/*
* Bad when there are few columns. But if there are a few thousand columns, you can't really
* go any faster than this because all the reads are coalesced and processor utilization is maximal.
*/
__global__ void kDumbSumCols(float* mat, float* vec, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
mat += idx;
if (idx < width) {
float sum = 0;
for (int j = 0; j < height; j++) {
sum += *mat;
mat += width;
}
vec[idx] = sum;
}
}
__global__ void kDumbMaxCols(float* mat, float* vec, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
mat += idx;
if (idx < width) {
float mx = *mat;
mat += width;
for (int j = 1; j < height; j++) {
mx = myMax(*mat, mx);
mat += width;
}
vec[idx] = mx;
}
}
|
12,533 | #include "includes.h"
__global__ static void solveFull ( double* data, double* inv1, double* inv2, const int nx, const int nBatch )
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set values to last two entries in array
double oldNx2 = data[(nx - 2) * nBatch + globalIdx]; // Two points from end
double oldNx1 = data[(nx - 1) * nBatch + globalIdx]; // One point from end
// Set index being computed
int index = globalIdy * nBatch + globalIdx;
if (globalIdy < nx - 2)
{
data[index] = data[index] - (inv1[index] * oldNx2 + inv2[index] * oldNx1);
}
} |
12,534 | #include "includes.h"
__global__ void diag_normalize(float *A, float *I, int n, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n)
if (x == y && x == i){
I[x*n + y] /= A[i*n + i];
A[x*n + y] /= A[i*n + i];
}
} |
12,535 | // works but results of test statistic seems not correct
extern "C" void gpu_normBlocks(double *Genes, double *Select, double *C, int *nA, int *N, int *M, int *K, int *R);
__global__ void
normalization(const double *Genes, const double *Select, double *C, int nA, int N, int M, int K, int R)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < R)
{
int indk0=0;
double tmp = 0;
for(int k=0; k<K; k++){
indk0 = int(Select[k+i*K]-1);
for(int n=0; n<N; n++){
tmp+=Genes[n+indk0*N];
}
}
int indt = 0; //index of tested gene
int indk = 0; //index of normalizing gene
double shat = 0;
double u = 0; double sum1=0; double sum2=0;
double meanA=0; double varA = 0;
double meanB=0; double varB = 0;
int val = 0;
for(int x=1; x<(M+1); x++){
val = 0; // val =0 if normalizing and val = 1 if to be tested
for(int k = 0; k<K; k++){
if(Select[k+i*K] == x){val=val+1;break;}
}
if(val==0){//if gene 'x' to be tested
indt = int(x-1); //index of t-th genes tested
sum1=0; sum2=0;
for(int n=0; n<nA; n++){ // samples or individuals
shat = 0;
for(int k=0; k<K; k++){ //normalization for the n-th subject
indk = int(Select[k+i*K]-1);
shat+=Genes[n+indk*N];
}
shat = shat/tmp*N;
u = 2*pow(Genes[n+indt*N]/shat,0.5);
sum1 += u;
sum2 += u*u;
}
meanA = sum1/nA;
varA = (sum2 -sum1*sum1/nA)/(nA-1);
sum1=0; sum2=0;
for(int n=nA; n<N; n++){ // samples or individuals
shat = 0;
for(int k=0; k<K; k++){ //normalization for the n-th subject
indk = int(Select[k+i*K]-1);
shat+=Genes[n+indk*N];
}
shat = shat/tmp*N;
u = 2*pow(Genes[n+indt*N]/shat,0.5);
sum1 += u;
sum2 += u*u;
}
meanB = sum1/(N-nA);
varB = (sum2 -sum1*sum1/(N-nA))/(N-nA-1);
C[indt+i*M] = (meanA-meanB)*(meanA-meanB)/(varA/nA+varB/(N-nA));
}//end if.
}//end for(int x...
}
}
void gpu_normBlocks(double *Genes, double *Select, double *C, int *nA, int *N, int *M, int *K, int *R)
{
// Device Memory
double *d_Genes, *d_Select, *d_C, *tmp, *shat, *u, *sum1, *sum2, *meanA, *meanB, *varA, *varB;
// Define the execution configuration
double THREADS = 1;
double n_blocksx = ceil(*R/THREADS);
dim3 threadPerBlock(THREADS);
dim3 numBlocks(n_blocksx);
// Allocate output array
cudaMalloc((void**)&d_Genes, *N * *M * sizeof(double));
cudaMalloc((void**)&d_Select, *K * *R * sizeof(double));
cudaMalloc((void**)&d_C, *M * *R * sizeof(double));
cudaMalloc((void**)&tmp, sizeof(double));
cudaMalloc((void**)&shat, sizeof(double));
cudaMalloc((void**)&u, sizeof(double)); cudaMalloc((void**)&sum1, sizeof(double)); cudaMalloc((void**)&sum2, sizeof(double));
cudaMalloc((void**)&meanA, sizeof(double)); cudaMalloc((void**)&meanB, sizeof(double));
cudaMalloc((void**)&varA, sizeof(double)); cudaMalloc((void**)&varB, sizeof(double));
// copy data to device
cudaMemcpy(d_Genes, Genes, *N * *M * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_Select, Select, *K * *R * sizeof(double), cudaMemcpyHostToDevice);
// GPU vector normalization
normalization<<<numBlocks,threadPerBlock>>>(d_Genes, d_Select, d_C, *nA, *N, *M, *K, *R);
// Copy output
cudaMemcpy(C, d_C, *M * *R * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_Genes);
cudaFree(d_Select);
cudaFree(d_C);
cudaFree(tmp); cudaFree(shat);
cudaFree(u); cudaFree(sum1); cudaFree(sum2);
cudaFree(meanA);cudaFree(meanB);cudaFree(varA);cudaFree(varB);
}
//Bild shared object
//https://forums.developer.nvidia.com/t/shared-library-creation/4776/8
//
// nvcc --ptxas-options=-v --compiler-options '-fPIC' -o gpu_normBlocks.so --shared gpu_normBlocks.cu
|
12,536 | #include "includes.h"
__global__ void sobelEdgeDetectionWithRegisters (int *input, int *output, int width, int height, int thresh) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j * width + i;
int val1 = input[width * (j - 1) + (i + 1)], val2 = input[width * (j - 1) + (i - 1)], val3 = input[width * (j + 1) + (i + 1)], val4 = input[width * (j + 1) + (i - 1)];
if ( ((i > 0) && (j > 0)) && ((i < (width - 1)) && (j < (height - 1))))
{
int sum1 = 0, sum2 = 0, magnitude;
sum1 = val1 - val2
+ 2 * input[width * (j) + (i + 1)] - 2 * input[width * (j) + (i - 1)]
+ val3 - val4;
sum2 = val2 + 2 * input[width * (j - 1) + (i)] + val1
- val4 - 2 * input[width * (j + 1) + (i)] - val3;
magnitude = sum1 * sum1 + sum2 * sum2;
if(magnitude > thresh)
output[index] = 255;
else
output[index] = 0;
}
else {
output[index] = 0;
}
} |
12,537 | #include "includes.h"
__global__ void kernel_vec_equals_minus_vec1(double *vec, double *vec1, int numElements)
{
int iam = threadIdx.x;
int bid = blockIdx.x;
int threads_in_block = blockDim.x;
int gid = bid*threads_in_block + iam;
if (gid < numElements){
vec[gid] = -vec1[gid];
}
} |
12,538 | #define allnorm_kernel (1.0F/26.0F)*(1.0F/10.0F)
#define norm_kernel 1.0/51076
#define ONE_THIRD_GPU 0.33333333333333333333F /* 1/3 */
#define ONE_FOURTH_GPU 0.25F /* 1/4 */
#define ONE_SIXTH_GPU 0.16666666666666666666F /* 1/6 */
__global__ void kernel_warmingup(){}
__global__ void kernel_strong_corners(int *gradxy,float *cornerness,int width,int height)
{
//printf("%d ",*atom);
int hh=threadIdx.x+blockIdx.x*blockDim.x;
int gg=threadIdx.y+blockIdx.y*blockDim.y;
int N=width;
if( (hh>=6)&&(hh<(width-6))&&(gg>=6)&&(gg<(height-6))/*&&(gradxy[gg*N+hh])&&(cornerness[gg*N+hh]<28744.000000)*/ )
{
if(cornerness[gg*N+hh]<0)
gradxy[gg*N+hh]=0;
}
}
__global__ void kernel_memset(int *gradxy,int width,int height)
{
int gg=threadIdx.x+blockIdx.x*blockDim.x;
int hh=threadIdx.y+blockIdx.y*blockDim.y;
if((hh<width)&&(gg<height))
{
gradxy[gg*width+hh]=0;
}
}
__global__ void kernel_imgblurg_separable_1(int *gradx2,int *result,int *grady2,int *result1,int *gradxy,int *result2,int width,int height)
{
int imageW=height;
int gg=threadIdx.x+blockIdx.x*blockDim.x;
int hh=threadIdx.y+blockIdx.y*blockDim.y;
if((hh<width)&&(gg<height))
{
result[hh*imageW+gg]=1*gradx2[hh*imageW+gg-3]+12*gradx2[hh*imageW+gg-2]+55*gradx2[hh*imageW+gg-1]+90*gradx2[hh*imageW+gg]+55*gradx2[hh*imageW+gg+1]+
12*gradx2[hh*imageW+gg+2]+1*gradx2[hh*imageW+gg+3];
result1[hh*imageW+gg]=1*grady2[hh*imageW+gg-3]+12*grady2[hh*imageW+gg-2]+55*grady2[hh*imageW+gg-1]+90*grady2[hh*imageW+gg]+55*grady2[hh*imageW+gg+1]+
12*grady2[hh*imageW+gg+2]+1*grady2[hh*imageW+gg+3];
result2[hh*imageW+gg]=1*gradxy[hh*imageW+gg-3]+12*gradxy[hh*imageW+gg-2]+55*gradxy[hh*imageW+gg-1]+90*gradxy[hh*imageW+gg]+55*gradxy[hh*imageW+gg+1]+
12*gradxy[hh*imageW+gg+2]+1*gradxy[hh*imageW+gg+3];
}
}
__global__ void kernel_imgblurg_separable_2(int *gradx2,int *result,int *input,int *grady2,int *result1,int *input1,int *gradxy,int *result2,int *input2,int width,int height)
{
int imageW=width;
int gg=threadIdx.x+blockIdx.x*blockDim.x;
int hh=threadIdx.y+blockIdx.y*blockDim.y;
if((gg<width)&&(hh<height))
{
if((gg>=3)&&(hh<(height-3))&&(hh>=3)&&(gg<(width-3)))
{
result[hh*imageW+gg]=norm_kernel*(1*gradx2[(hh-3)*imageW+gg]+12*gradx2[(hh-2)*imageW+gg]+55*gradx2[(hh-1)*imageW+gg]+90*gradx2[hh*imageW+gg]+55*gradx2[(hh+1)*imageW+gg]+
12*gradx2[(hh+2)*imageW+gg]+1*gradx2[(hh+3)*imageW+gg]);
result1[hh*imageW+gg]=norm_kernel*(1*grady2[(hh-3)*imageW+gg]+12*grady2[(hh-2)*imageW+gg]+55*grady2[(hh-1)*imageW+gg]+90*grady2[hh*imageW+gg]+55*grady2[(hh+1)*imageW+gg]+
12*grady2[(hh+2)*imageW+gg]+1*grady2[(hh+3)*imageW+gg]);
result2[hh*imageW+gg]=norm_kernel*(1*gradxy[(hh-3)*imageW+gg]+12*gradxy[(hh-2)*imageW+gg]+55*gradxy[(hh-1)*imageW+gg]+90*gradxy[hh*imageW+gg]+55*gradxy[(hh+1)*imageW+gg]+
12*gradxy[(hh+2)*imageW+gg]+1*gradxy[(hh+3)*imageW+gg]);
}
else{
result[hh*imageW+gg]= input[hh*imageW+gg];
result1[hh*imageW+gg]= input1[hh*imageW+gg];
result2[hh*imageW+gg]= input2[hh*imageW+gg];
}
}
}
__global__ void kernel_find_max(float *array, float *max, int *mutex, unsigned int n)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ float cache[256];
float temp = -1.0;
while(index + offset < n){
temp = fmaxf(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
__global__ void kernel_selcornerness(float *cornerness,float *selcornerness,int width, int height,int *gradxy,float *maximum,int *seirial,float RELMINTHR)
{
int hh=threadIdx.x+blockIdx.x*blockDim.x;
int gg=threadIdx.y+blockIdx.y*blockDim.y;
int N=width;
float d;
if((hh>=6)&&(hh<(width-6))&&(gg>=6)&&(gg<(height-6)))
{
d=cornerness[gg*N+hh];
if(d>=((*maximum)*RELMINTHR))
{
if ( (d>cornerness[(gg)*N+(hh+1)]) &&(d>=cornerness[(gg)*N+(hh-1)])&&(d>=cornerness[(gg+1)*N+(hh+1)])
&&(d>=cornerness[(gg+1)*N+(hh-1)])&&(d>cornerness[(gg-1)*N+(hh+1)] ) && (d>cornerness[(gg-1)*N+(hh-1)]) &&(d>=cornerness[(gg+1)*N+(hh)]) &&(d>cornerness[(gg-1)*N+(hh)]) )
{
atomicAdd(seirial,1);
gradxy[gg*N+hh]=1;
}
}
}
}
__global__ void kernel_cornerness(int *gradx2_b,int *grady2_b,int *gradxy_b,float *cornerness,int width,int height)
{
int hh=threadIdx.x+blockIdx.x*blockDim.x;
int gg=threadIdx.y+blockIdx.y*blockDim.y;
int N=width;
if((hh>=5)&&(hh<(width-5))&&(gg>=5)&&(gg<(height-5)))
{
int det,trace;
float r;
int gxx,gyy,gxy;
gxx=gradx2_b[gg*N+hh];
gyy=grady2_b[gg*N+hh];
gxy=gradxy_b[gg*N+hh];
det=gxx*gyy - gxy*gxy;
trace=gxx + gyy;
r=det - 0.04*trace*trace;
if(r<0.0F) r=0.0F;
cornerness[gg*N+hh]=r;
}
}
__global__ void kernel_Ix2y2xy(int *gradx,int *grady,int *gradx2,int *grady2,int *gradxy,int width,int height)
{
int hh=threadIdx.x+blockIdx.x*blockDim.x;
int gg=threadIdx.y+blockIdx.y*blockDim.y;
int N=width;
if((hh>=0)&&(hh<width)&&(gg>=0)&&(gg<height))
{
gradx2[gg*N+hh]= gradx[gg*N+hh]* gradx[gg*N+hh];
grady2[gg*N+hh]= grady[gg*N+hh]* grady[gg*N+hh];
gradxy[gg*N+hh]= gradx[gg*N+hh]* grady[gg*N+hh];
}
}
__global__ void kernel_imgradient5_smo(unsigned char *img,int width,int height, int *gradx2,int *grady2, int *gradx, int *grady)
{
int hh=threadIdx.x+blockIdx.x*blockDim.x;
int gg=threadIdx.y+blockIdx.y*blockDim.y;
if((hh>=2)&&(hh<(width-2))&&(gg>=2)&&(gg<(height-2)))
{
int N=width;
gradx[gg*N+hh]=allnorm_kernel*(36*(img[gg*N+hh+1]-img[gg*N+hh-1]) +
18*(img[(gg+1)*N+hh+1]+img[(gg-1)*N+hh+1]-img[(gg-1)*N+hh-1]-img[(gg+1)*N+hh-1]) +
12*(img[(gg*N+hh+2)]-img[gg*N+hh-2]) +
6*(img[(gg+1)*N+hh+2]+img[(gg-1)*N+hh+2]-img[(gg+1)*N+hh-2]-img[(gg-1)*N+hh-2]) +
3*(img[(gg+2)*N+hh+1]+img[(gg-2)*N+hh+1]-img[(gg+2)*N+hh-1]-img[(gg-2)*N+hh-1]) +
1*(img[(gg+2)*N+hh+2]+img[(gg-2)*N+hh+2]-img[(gg-2)*N+hh-2]-img[(gg+2)*N+hh-2]));
grady[gg*N+hh] = allnorm_kernel*(36*(img[(gg+1)*N+hh]-img[(gg-1)*N+hh]) +
18*(img[(gg+1)*N+hh+1]+img[(gg+1)*N+hh-1]-img[(gg-1)*N+hh+1]-img[(gg-1)*N+hh-1]) +
12*(img[(gg+2)*N+hh]-img[(gg-2)*N+hh]) +
6*(img[(gg+2)*N+hh+1]+img[(gg+2)*N+hh-1]-img[(gg-2)*N+hh+1]-img[(gg-2)*N+hh-1]) +
3*(img[(gg+1)*N+hh+2]+img[(gg+1)*N+hh-2]-img[(gg-1)*N+hh+2]-img[(gg-1)*N+hh-2]) +
1*(img[(gg+2)*N+hh+2]+img[(gg+2)*N+hh-2]-img[(gg-2)*N+hh+2]-img[(gg-2)*N+hh-2]));
}
}
__device__ float kati(float x)
{
float xhalf;
int i;
/* compute inverse square root */
xhalf=0.5f*x;
i=*(int*)&x;
i=0x5f375a86 - (i>>1); // hidden initial guess, fast - LOMONT
x=*(float*)&i;
x=x*(1.5f-xhalf*x*x);
x=x*(1.5f-xhalf*x*x); // add this in for added precision, or many more...
/* compute fourth root as the inverse square root of the inverse square root */
xhalf=0.5f*x;
i=*(int*)&x;
i=0x5f375a86 - (i>>1); // hidden initial guess, fast - LOMONT
x=*(float*)&i;
x=x*(1.5f-xhalf*x*x);
x=x*(1.5f-xhalf*x*x); // add this in for added precision, or many more...
return x;
}
__global__ void kernel_coordinates(float* cornerness, float (*corners)[2],int *gradxy,int width,int height,int *atom2)
{
int hh=threadIdx.x+blockIdx.x*blockDim.x;
int gg=threadIdx.y+blockIdx.y*blockDim.y;
int N=width;
float spp, spc, spn, scp, scc, scn, snp, snc, snn;
float Pxx, Pxy, Pyy, Px, Py, ucorr, vcorr, detf;
if((hh>=6)&&(hh<(width-6))&&(gg>=6)&&(gg<(height-6)))
{
if(gradxy[gg*N+hh])
{
spp=kati(cornerness[(gg-1)*N+(hh-1)]);
spc=kati(cornerness[(gg-1)*N+(hh)]);
spn=kati(cornerness[(gg-1)*N+(hh+1)]);
scp=kati(cornerness[(gg)*N+(hh-1)]);
scc=kati(cornerness[(gg)*N+(hh)]);
scn=kati(cornerness[(gg)*N+(hh+1)]);
snp=kati(cornerness[(gg+1)*N+(hh-1)]);
snc=kati(cornerness[(gg+1)*N+(hh)]);
snn=kati(cornerness[(gg+1)*N+(hh+1)]);
Pxx=(spp + scp + snp -2.0F*(spc + scc + snc) + spn + scn + snn)*ONE_THIRD_GPU;
Pxy=(spp - spn - snp + snn)*ONE_FOURTH_GPU;
Pyy= (spp + spc + spn
-2.0F*(scp + scc + scn)
+ snp + snc + snn)*ONE_THIRD_GPU;
Px=(- spp - scp - snp + spn + scn + snn)*ONE_SIXTH_GPU;
Py=(- spp - spc - spn + snp + snc + snn)*ONE_SIXTH_GPU;
detf=Pxy*Pxy - Pxx*Pyy;
if(detf>=1E-12F || detf<=-1E-12F){ // nonzero determinant
// calculate sub-pixel corrections to the corner position
ucorr=(Pyy*Px - Pxy*Py)/detf;
vcorr=(Pxx*Py - Pxy*Px)/detf;
// pull the corrections inside the pixel
// printf("a=%lf ",ucorr );
if(ucorr>0.5F) ucorr=0.5F; else if(ucorr<-0.5F) ucorr=-0.5F;
if(vcorr>0.5F) vcorr=0.5F; else if(vcorr<-0.5F) vcorr=-0.5F;
// printf("xx");
}
else
{
ucorr=vcorr=0.0F;
}
int x=atomicAdd(atom2,1);
//cornrs_big[gg*N+hh][0]=/*u0*/hh+ucorr;
//cornrs_big[gg*N+hh][1]=/*v0*/gg+vcorr;
//printf("%lf ",ucorr);
corners[x][0]=hh+ucorr;
corners[x][1]=gg+vcorr;
}
}
}
|
12,539 | __global__ void decipher(unsigned int num_rounds, unsigned int *input_data, unsigned int *key, unsigned int *output, unsigned int length)
{
// current thread with 1D
unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if((thread_id*2) < length - 1){
unsigned int v0 = input_data[thread_id*2];
unsigned int v1 = input_data[(thread_id*2)+1];
unsigned long delta = 0x9e3779b9L;
unsigned long mask = 0xffffffffL;
unsigned long sum = (delta*num_rounds) & mask;
unsigned int i;
for (i = 0; i < num_rounds; i++){
v1 = (v1 - (((v0<<4 ^ v0>>5) + v0) ^ (sum + key[sum>>11 & 3]))) & mask;
sum = (sum - delta) & mask;
v0 = (v0 - (((v1<<4 ^ v1>>5) + v1) ^ (sum + key[sum & 3]))) & mask;
}
output[thread_id*2] = v0;
output[(thread_id*2)+1] = v1;
}
} |
12,540 | #include <cuda.h>
int nBlk,nTid;
__device__ int position[100]={0,}; // should be changed
__global__ void cuda_sort(int* arr_d,int* histogram_d, int size, int max_val);
__host__ void counting_sort(int arr[], int size, int max_val)
{
nTid = 512;
nBlk = size/nTid;
if(nBlk * nTid < size){
nBlk +=1;
}
int * arr_d;
int* histogram_d;
cudaMalloc((void**)&arr_d,size*sizeof(int));
cudaMemcpy(arr_d,arr,size*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc((void**)&histogram_d,max_val*sizeof(int));
cudaMemset((void**)&histogram_d,0,max_val*sizeof(int));
//cuda_sort<<<nBlk,nTid>>>(arr_d,histogram_d, size, max_val);
//cuda_sort<<<1,size,size*sizeof(int)>>>(arr_d,histogram_d, size, max_val);
cuda_sort<<<1,size>>>(arr_d,histogram_d, size, max_val);
// copy to host & finsh
cudaMemcpy(arr,arr_d,size*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(arr_d); cudaFree(histogram_d);
}
__global__ void cuda_sort(int* arr_d,int* histogram_d, int size, int max_val){
//extern __shared__ int position[]; // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda
int i = threadIdx.x + blockDim.x * blockIdx.x;
//__device__ int histogram_d[max_val];
int off = 1;
if(i<size){
atomicAdd(&histogram_d[arr_d[i]],1);
}
__syncthreads();
// https://www.eecs.umich.edu/courses/eecs570/hw/parprefix.pdf
if(i<size){
position[i] = histogram_d[i];
}
for(int stride = size>>1; stride>0; stride >>=1){
__syncthreads();
if(i<stride){
position[off*(2*i+2)-1] += position[off*(2*i+1)-1];
}
off *=2;
}
__syncthreads();
for(int j=off-1; j<size-1; j++){
if(i == j){
position[j+1] += position[j];
}
__syncthreads();
}
/*
if(i==0) position[off-1] = 0;
for(int stride= 1; stride<=(size/2); stride *=2){
off = off>>1;
__syncthreads();
if(i<stride){
int a = off*(2*i+1)-1;
int b = off*(2*i+2)-1;
int tmp = position[a];
position[a] = position[b];
position[b] += tmp;
}
}
__syncthreads();
*/
if(i<size){
// arr_d[i] = histogram_d[i];
arr_d[i] = position[i];
/*
for(int j=0; j<max_val; j++){
for(int k=0; k<histogram_d[j]; k++){
arr_d[position[j]+k] = j;
}
}
*/
}
// device code
}
|
12,541 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <assert.h>
__global__ void jacobiKernel(float* A, float* b, int N, float* x_now, float* x_next)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
float sum = 0.0;
int idx_Ai = N * idx;
for (int j = 0; j < N; ++j)
if (idx != j)
sum += A[idx_Ai + j] * x_now[j];
x_next[idx] = (b[idx] - sum) / A[idx_Ai + idx];
}
}
void parse_argv(int argc, char *argv[], char **fname, int *iter, int *blockSize)
{
static struct option long_options[] =
{
{"file", required_argument, NULL, 'f'},
{"iterations", optional_argument, NULL, 'i'},
{"blockSize", optional_argument, NULL, 'b'},
{NULL, 0, NULL, 0}
};
int ch = 0;
while ((ch = getopt_long(argc, argv, "f:i:b:", long_options, NULL)) != -1) {
switch (ch) {
case 'f' : *fname = optarg;
break;
case 'i' : *iter = atoi(optarg);
break;
case 'b' : *blockSize = atoi(optarg);
break;
default:
abort();
}
}
}
int main(int argc, char *argv[])
{
int N, i, iter = 10000, blockSize = 512;
char *fname = NULL;
parse_argv(argc, argv, &fname, &iter, &blockSize);
FILE *file = fopen(fname, "r");
if (file == NULL)
exit(EXIT_FAILURE);
fscanf(file, "%d", &N);
printf("N = %d, iter = %d, blocksize = %d\n", N, iter, blockSize);
float *A = (float *)calloc(N * N, sizeof(float));
float *b = (float *)calloc(N, sizeof(float));
float *x = (float *)calloc(N, sizeof(float));
assert(A != NULL);
assert(b != NULL);
assert(x != NULL);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
fscanf(file, "%f", &A[N * i + j]);
}
fscanf(file, "%f", &b[i]);
}
float *x0_d, *x1_d, *A_d, *b_d;
assert(cudaSuccess == cudaMalloc((void **) &A_d, N * N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &b_d, N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &x0_d, N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void **) &x1_d, N * sizeof(float)));
cudaMemcpy(A_d, A, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(x0_d, x, sizeof(float) * N, cudaMemcpyHostToDevice);
int nBlocks = (N + blockSize - 1) / blockSize;
printf("Running Jacobi method...\n");
for (i = 0; i < iter; ++i)
{
float *xnext = (i % 2 ? x0_d : x1_d);
float *xnow = (i % 2 ? x1_d : x0_d);
jacobiKernel <<< nBlocks, blockSize >>> (A_d, b_d, N, xnow, xnext);
}
cudaMemcpy(x, (iter % 2 ? x1_d : x0_d), sizeof(float) * N, cudaMemcpyDeviceToHost);
cudaFree(A_d); cudaFree(b_d); cudaFree(x0_d); cudaFree(x1_d);
free(A); free(b);
printf("\nResult after %d iterations:\n", iter);
for (i = 0; i < N; i++)
printf("x[%d] = %f\n", i, x[i]);
return 0;
}
|
12,542 | #include <dirent.h>
#include <iostream>
#include <stdlib.h>
#include <string>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
using namespace std;
void sendGPU(char *A, unsigned int indexOfLine[], unsigned int &iLine); //send to GPU routine
char *d_word; //GPU MEMORY word pattern pointer.
char word[100];
// EDIT THIS ACCORDING TO YOUR GPU CAPABILITIES
const int MAX_THREADS = 1024;
const int MAX_BLOCKS = 1024;
// GREP GPU KERNEL
__global__ void GrepKernel(char *A, bool *R, char *wordD, unsigned int *indexOfLine)
{
int row = threadIdx.x + blockIdx.x * gridDim.x;
if(indexOfLine[row+2]!=0) //To be sure that there is something to check.
{
int indexWord = 0;
bool matching = false;
bool matched = false;
bool starting = false; //^ REGEX
if(wordD[0]=='^')
{
indexWord=1;
matching = true;
starting = true;
}
for(int j=0;j<(indexOfLine[row+1]-indexOfLine[row]);j++) //Check each characters
{
if(A[indexOfLine[row]+j]=='\0')//End of the line reached
break;
if(((A[indexOfLine[row]+j]==wordD[indexWord])&&(matching==true || indexWord==0))||(wordD[indexWord]=='.'))//Letter match
{
matching=true;
indexWord++;
if(wordD[indexWord]=='\0')
{
matched = true;
break;
}
}
else if(matching==true)//Was matching and letter does not seem to match
{
if(wordD[indexWord]=='$') //$ REGEX
{
if(j+1==(indexOfLine[row+1]-indexOfLine[row]))
{
matched=true;
break;
}
else //It does not match anymore RESTART matching
{
indexWord=0;
matching=false;
}
}
else if(wordD[indexWord]=='*' && A[indexOfLine[row]+j]!=' ')//* REGEX
{
if(wordD[indexWord+1]==A[indexOfLine[row]+j+1])
{
indexWord++;
}
}
else if(starting) //^REGEX
break;
else //It does not match anymore RESTART matching
{
indexWord=0;
matching=false;
}
}
}
if(matched)
R[row]=true;
else
R[row]=false;
}
}
int main(int argc, const char * argv[])
{
strcpy(word, argv[2]);
//LOAD word INTO DEVICE MEMORY
cudaMalloc((void**)&d_word, 100);
cudaMemcpy(d_word, word, 100, cudaMemcpyHostToDevice);
//ALLOC ARRAY
char *A=(char*)malloc(2000000000);
A[0]='\0';
FILE * fp;
char * line = NULL;
size_t len = 0;
ssize_t read;
//OPEN FILE
fp = fopen(argv[1], "r");
if (fp == NULL)
exit(EXIT_FAILURE);
unsigned int indexOfLine[(MAX_THREADS*MAX_BLOCKS)+2];
indexOfLine[0]=0;
unsigned int iLine = 0;
while ((read = getline(&line, &len, fp)) != -1)//Line per line read
{
int index = indexOfLine[iLine];
int i=0;
for(i;i>-1;i++)//Fill the array
{
if(line[i]=='\0')
break;
A[index+i]=line[i];
}
iLine++;
indexOfLine[iLine]=i+index; //Store the index of the started line.
if(iLine>=MAX_THREADS*MAX_BLOCKS)//MAX Amount of lines reached so send to the GPU
{
indexOfLine[iLine+1]=2;
sendGPU(A,indexOfLine,iLine);
}
}
if (line)
free(line);
//File fully read, last send to the GPU
for(int i=iLine+1;i<(MAX_BLOCKS*MAX_THREADS)+2;i++)
indexOfLine[i]=0;
sendGPU(A, indexOfLine,iLine);
//FREE the memory
cudaFree(d_word);
free(A);
return 0;
}
void sendGPU(char *A, unsigned int indexOfLine[], unsigned int &iLine)
{
//Allocations
unsigned int size = indexOfLine[iLine];
bool *R=(bool*)malloc(iLine*sizeof(bool));
//SEND TO GPU ROUTINE
//LOAD A INTO DEVICE MEMORY
char *d_A;
cudaError_t err = cudaMalloc((void**)&d_A, size);
//printf("CUDA malloc A: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
//printf("Copy A to device: %s\n",cudaGetErrorString(err));
//LOAD indexOfLine INTO DEVICE MEMORY
unsigned int *d_indexOfLine;
err = cudaMalloc((void**)&d_indexOfLine, ((MAX_THREADS*MAX_BLOCKS)+2)*sizeof(unsigned int));
err = cudaMemcpy(d_indexOfLine, indexOfLine, ((MAX_THREADS*MAX_BLOCKS)+2)*sizeof(unsigned int), cudaMemcpyHostToDevice);
//CREATE R FOR RESULTS
bool *d_R;
err = cudaMalloc((void**)&d_R, iLine*sizeof(bool));
//printf("CUDA malloc C: %s\n",cudaGetErrorString(err));
// Invoke kernel
dim3 dimBlock(MAX_THREADS,1);
dim3 dimGrid(MAX_BLOCKS,1);
GrepKernel<<<dimGrid, dimBlock>>>(d_A, d_R, d_word, d_indexOfLine);
//Wait that the GPU work is over.
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// Read R from device memory
err = cudaMemcpy(R, d_R, iLine*sizeof(bool), cudaMemcpyDeviceToHost);
//printf("Copy R off of device: %s\n",cudaGetErrorString(err));
// Free device memory
cudaFree(d_A);
cudaFree(d_R);
cudaFree(d_indexOfLine);
// Display matched lines
for(int i=0;i<iLine;i++)
{
if(R[i])
{
for(int j=0;j<(indexOfLine[i+1]-indexOfLine[i]);j++)
{
char letter = A[indexOfLine[i]+j];
if(letter=='\0')
break;
else
printf("%c",letter);
}
}
}
//Reset memory and counter.
free(R);
iLine=0;
}
|
12,543 | /*
* Please write your name and net ID below
*
* Last name: Li
* First name: Yuqiong
* Net ID: yl5090
*
*/
/*
* This file contains the code for doing the heat distribution problem.
* You do not need to modify anything except starting gpu_heat_dist() at the bottom
* of this file.
* In gpu_heat_dist() you can organize your data structure and the call to your
* kernel(s) that you need to write too.
*
* You compile with:
* nvcc -o heatdist -arch=sm_60 heatdist.cu
*/
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* To index element (i,j) of a 2D array stored as 1D */
#define index(i, j, N) ((i)*(N)) + (j)
/*****************************************************************/
// Function declarations: Feel free to add any functions you want.
void seq_heat_dist(float *, unsigned int, unsigned int);
void gpu_heat_dist(float *, unsigned int, unsigned int);
__global__ void heatKernel(float *, float *, unsigned int);
/*****************************************************************/
/**** Do NOT CHANGE ANYTHING in main() function ******/
int main(int argc, char * argv[])
{
unsigned int N; /* Dimention of NxN matrix */
int type_of_device = 0; // CPU or GPU
int iterations = 0;
int i;
/* The 2D array of points will be treated as 1D array of NxN elements */
float * playground;
// to measure time taken by a specific part of the code
double time_taken;
clock_t start, end;
if(argc != 4)
{
fprintf(stderr, "usage: heatdist num iterations who\n");
fprintf(stderr, "num = dimension of the square matrix (50 and up)\n");
fprintf(stderr, "iterations = number of iterations till stopping (1 and up)\n");
fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n");
exit(1);
}
type_of_device = atoi(argv[3]);
N = (unsigned int) atoi(argv[1]);
iterations = (unsigned int) atoi(argv[2]);
/* Dynamically allocate NxN array of floats */
playground = (float *)calloc(N*N, sizeof(float));
if( !playground )
{
fprintf(stderr, " Cannot allocate the %u x %u array\n", N, N);
exit(1);
}
/* Initialize it: calloc already initalized everything to 0 */
// Edge elements to 70F
for(i = 0; i < N; i++)
playground[index(0,i,N)] = 70;
for(i = 0; i < N; i++)
playground[index(i,0,N)] = 70;
for(i = 0; i < N; i++)
playground[index(i,N-1, N)] = 70;
for(i = 0; i < N; i++)
playground[index(N-1,i,N)] = 70;
// from (0,10) to (0,30) inclusive are 100F
for(i = 10; i <= 30; i++)
playground[index(0,i,N)] = 100;
// from (n-1,10) to (n-1,30) inclusive are 150F
for(i = 10; i <= 30; i++)
playground[index(N-1,i,N)] = 150;
if( !type_of_device ) // The CPU sequential version
{
start = clock();
seq_heat_dist(playground, N, iterations);
end = clock();
}
else // The GPU version
{
start = clock();
gpu_heat_dist(playground, N, iterations);
end = clock();
}
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
printf("Time taken for %s is %lf\n", type_of_device == 0? "CPU" : "GPU", time_taken);
free(playground);
return 0;
}
/***************** The CPU sequential version (DO NOT CHANGE THAT) **************/
void seq_heat_dist(float * playground, unsigned int N, unsigned int iterations)
{
// Loop indices
int i, j, k;
int upper = N-1;
// number of bytes to be copied between array temp and array playground
unsigned int num_bytes = 0;
float * temp;
/* Dynamically allocate another array for temp values */
/* Dynamically allocate NxN array of floats */
temp = (float *)calloc(N*N, sizeof(float));
if( !temp )
{
fprintf(stderr, " Cannot allocate temp %u x %u array\n", N, N);
exit(1);
}
num_bytes = N*N*sizeof(float);
/* Copy initial array in temp */
memcpy((void *)temp, (void *) playground, num_bytes);
for( k = 0; k < iterations; k++)
{
/* Calculate new values and store them in temp */
for(i = 1; i < upper; i++)
for(j = 1; j < upper; j++)
temp[index(i,j,N)] = (playground[index(i-1,j,N)] +
playground[index(i+1,j,N)] +
playground[index(i,j-1,N)] +
playground[index(i,j+1,N)])/4.0;
/* Move new values into old values */
memcpy((void *)playground, (void *) temp, num_bytes);
}
}
/***************** The GPU version: Write your code here *********************/
/* This function can call one or more kernels if you want ********************/
void gpu_heat_dist(float * playground, unsigned int N, unsigned int iterations)
{
int k;
// number of bytes to be copied between playground and temp
unsigned int num_bytes = N * N * sizeof(float);
// to store results
float * d_temp1, * d_temp2; // define two chunks of memory to swap results
float * swap_ptr;
// 1. allocate device memory for playground and temp
cudaMalloc((void **) &d_temp1, num_bytes);
cudaMalloc((void **) &d_temp2, num_bytes);
cudaMemcpy(d_temp1, playground, num_bytes, cudaMemcpyHostToDevice);
// 2. kernel launch code : let the device perform the operation
dim3 blocksPerGrid(ceil(N/16.0), ceil(N/16.0), 1);
dim3 threadsPerBlock(16, 16, 1);
for (k = 0; k < iterations; k++){
heatKernel<<<blocksPerGrid, threadsPerBlock>>> (d_temp1, d_temp2, N);
// swap and did the whole precess again
swap_ptr = d_temp1;
d_temp1 = d_temp2;
d_temp2 = swap_ptr;
}
// 3. copy result from the device memory
cudaMemcpy(playground, d_temp2, num_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_temp1); // free memory
cudaFree(d_temp2); // free memory
}
__global__ void heatKernel(float * d_temp1, float * d_temp2, unsigned int N){
// a kernel to take average of four neighbors of point[i][j] in temp1 and store results in temp2
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if ((row >= 1) && (row < N)){
if ((col >= 1) && (col < N))
d_temp2[index(row, col, N)] = (d_temp1[index(row-1, col, N)] +
d_temp1[index(row+1, col, N)] +
d_temp1[index(row, col-1, N)] +
d_temp1[index(row, col+1, N)])/4.0;
}
}
|
12,544 |
/****************************************
* CUDA kernel for transposing matrices *
* Version with Shared Memory *
****************************************/
#include <stdio.h>
#define CUDA_SAFE_CALL( call ) { \
cudaError_t err = call; \
if( cudaSuccess != err ) { \
fprintf(stderr,"CUDA: error occurred in cuda routine. Exiting...\n"); \
exit(err); \
} }
#define A(i,j) A[ (j) + ((i)*(n)) ]
#define B(i,j) B[ (j) + ((i)*(m)) ]
#define B_cpu(i,j) B_cpu[ (j) + ((i)*(m)) ]
#define B_gpu(i,j) B_gpu[ (j) + ((i)*(m)) ]
#define d_A(i,j) d_A[ (j) + ((i)*(n)) ]
#define d_B(i,j) d_B[ (j) + ((i)*(m)) ]
#define sh_A(i,j) sh_A[ (i) ][ (j) ]
#define BLOCKSIZE 16
/* Inefficient kernel: bad access to memory */
__global__ void compute_kernel( unsigned int m, unsigned int n, float *d_A, float *d_B ) {
/* Index of thread in x dimension */
unsigned int xthread = threadIdx.x;
/* Index of thread in y dimension */
unsigned int ythread = threadIdx.y;
/* Index of block in x dimension */
unsigned int BLOCK_X = blockIdx.x;
/* Index of block in y dimension */
unsigned int BLOCK_Y = blockIdx.y;
/* Global index to a row of A */
unsigned int r_A = BLOCK_X*BLOCKSIZE+xthread;
/* Global index to a col of A */
unsigned int c_A = BLOCK_Y*BLOCKSIZE+ythread;
/* Global index to a row of B */
unsigned int r_B = BLOCK_Y*BLOCKSIZE+ythread;
/* Global index to a col of B */
unsigned int c_B = BLOCK_X*BLOCKSIZE+xthread;
/* Declare a shared memory tile of size BLOCKSIZExBLOCKSIZE */
__shared__ float block[BLOCKSIZE*BLOCKSIZE];
/* Copy Element d_A( r_A, c_A ) to shared memory and
from shared_memory to element d_B( r_B, c_B )
Prevent access to non-existing elements of A
Remember synchronize threads upon data has been saved into shared memory */
if(r_A<m && c_A<n){
block[xthread*BLOCKSIZE+ythread] = d_A(r_A,c_A);
}
__syncthreads();
if(r_B<m && c_B<n){
d_B(r_B,c_B)=block[xthread*BLOCKSIZE+ythread];
}
}
int cu_transpose( unsigned int m, unsigned int n, float *h_A, float *h_B ) {
// Allocate device memory
unsigned int mem_size = m * n * sizeof(float);
float *d_A, *d_B;
CUDA_SAFE_CALL( cudaMalloc((void **) &d_A, mem_size ) );
CUDA_SAFE_CALL( cudaMalloc((void **) &d_B, mem_size ) );
// Copy host memory to device
CUDA_SAFE_CALL( cudaMemcpy( d_A, h_A, mem_size, cudaMemcpyHostToDevice ) );
int blocks_col = (int) ceil( (float) n / (float) BLOCKSIZE );
int blocks_row = (int) ceil( (float) m / (float) BLOCKSIZE );
// Execute the kernel
dim3 dimGrid( blocks_col, blocks_row );
dim3 dimBlock( BLOCKSIZE, BLOCKSIZE );
compute_kernel<<< dimGrid, dimBlock >>>( m, n, d_A, d_B );
// Copy device memory to host
CUDA_SAFE_CALL( cudaMemcpy( h_B, d_B, mem_size, cudaMemcpyDeviceToHost ) );
// Deallocate device memory
CUDA_SAFE_CALL( cudaFree(d_A) );
CUDA_SAFE_CALL( cudaFree(d_B) );
return EXIT_SUCCESS;
}
int transpose( unsigned int m, unsigned int n, float *A, float *B ) {
unsigned int i, j;
for( i=0; i<m; i++ ) {
for( j=0; j<n; j++ ) {
B( j, i ) = A( i, j );
}
}
return EXIT_SUCCESS;
}
void printMatrix( unsigned int m, unsigned int n, float *A ) {
int i, j;
for( i=0; i<m; i++ ) {
for( j=0; j<n; j++ ) {
printf("%8.1f",A(i,j));
}
printf("\n");
}
}
int main( int argc, char *argv[] ) {
unsigned int m, n;
unsigned int i, j;
/* Generating input data */
if( argc<3 ) {
printf("Usage: %s n_rows n_cols \n",argv[0]);
exit(-1);
}
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
float *A = (float *) malloc( m*n*sizeof(float) );
printf("%s: Generating a random matrix of size %dx%d...\n",argv[0],m,n);
for( i=0; i<m; i++ ) {
for( j=0; j<n; j++ ) {
A( i, j ) = 2.0f * ( (float) rand() / RAND_MAX ) - 1.0f;
}
}
float *B_cpu = (float *) malloc( m*n*sizeof(float) );
float *B_gpu = (float *) malloc( m*n*sizeof(float) );
printf("%s: Transposing matrix A into B in CPU...\n",argv[0]);
transpose( m, n, A, B_cpu );
printf("%s: Transposing matrix A into B in GPU...\n",argv[0]);
cu_transpose( m, n, A, B_gpu );
/* Check for correctness */
float error = 0.0f;
for( i=0; i<n; i++ ) {
for( j=0; j<m; j++ ) {
error += fabs( B_gpu( i, j ) - B_cpu( i, j ) );
}
}
printf("Error CPU/GPU = %.3e\n",error);
free(A);
free(B_cpu);
free(B_gpu);
}
|
12,545 | #include <stdio.h>
__global__ void schroedinger(float * H2, float * H1, float * H0, float c, float dt, float dd){
int blocksize = blockDim.y*blockDim.x;
int blockId = gridDim.x*blockIdx.y + blockIdx.x;
int tid = blockId*blocksize + blockDim.x*threadIdx.y + threadIdx.x;
int tidDown = tid + blockDim.x;
int tidUp = tid - blockDim.x;
int tidRight = tid + blockDim.y;
int tidLeft = tid - blockDim.y;
H2[tid] = 2*H1[tid] - 2*H0[tid] + c*c*(dt/dd)*(dt/dd)*(H1[tidDown] + H1[tidUp] + H1[tidLeft] + H1[tidRight] - 4*H1[tid]);
}
__host__ int main(){
dim3 blocksize;
dim3 gridsize;
float c = 1.0;
float dt = 0.1;
float dd = 2.0;
int t = 300;
int x = 256;
int y = 256;
float * H0 = (float*)malloc(sizeof(float*)*y*x);
float * H1 = (float*)malloc(sizeof(float*)*y*x);
float * H2 = (float*)malloc(sizeof(float*)*y*x);
float * h0,* h1,* h2;
cudaMalloc(&h0, x*y*sizeof(float));
cudaMalloc(&h1, x*y*sizeof(float));
cudaMalloc(&h2, x*y*sizeof(float));
cudaMemcpy(h0,H0, x*y*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(h1,H1, x*y*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(h2,H2, x*y*sizeof(float), cudaMemcpyHostToDevice);
gridsize.x = x;
gridsize.y = y;
blocksize.x = 32;
blocksize.y = 32;
schroedinger<<<gridsize,blocksize>>>(h2,h1,h0,c,dt,dd);
cudaMemcpy(H2, h2, x*y*sizeof(float), cudaMemcpyDeviceToHost);
printf("%f ",h2[2]);
return 0;
} |
12,546 | #include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <vector>
struct Data {
Data(int size) : size(size), bytes(size * sizeof(float)) {
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y)
: size(size), bytes(size * sizeof(float)) {
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice);
}
~Data() {
cudaFree(x);
cudaFree(y);
}
void clear() {
cudaMemset(x, 0, bytes);
cudaMemset(y, 0, bytes);
}
float* x{nullptr};
float* y{nullptr};
int size{0};
int bytes{0};
};
__device__ float
squared_l2_distance(float x_1, float y_1, float x_2, float y_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2);
}
__global__ void assign_clusters(const float* __restrict__ data_x,
const float* __restrict__ data_y,
int data_size,
const float* __restrict__ means_x,
const float* __restrict__ means_y,
float* __restrict__ new_sums_x,
float* __restrict__ new_sums_y,
int k,
int* __restrict__ counts) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
// Make global loads once.
const float x = data_x[index];
const float y = data_y[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance =
squared_l2_distance(x, y, means_x[cluster], means_y[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(&new_sums_x[best_cluster], x);
atomicAdd(&new_sums_y[best_cluster], y);
atomicAdd(&counts[best_cluster], 1);
}
__global__ void compute_new_means(float* __restrict__ means_x,
float* __restrict__ means_y,
const float* __restrict__ new_sum_x,
const float* __restrict__ new_sum_y,
const int* __restrict__ counts) {
const int cluster = threadIdx.x;
const int count = max(1, counts[cluster]);
means_x[cluster] = new_sum_x[cluster] / count;
means_y[cluster] = new_sum_y[cluster] / count;
}
int main(int argc, const char* argv[]) {
if (argc < 4) {
std::cerr << "usage: assign_clusters <data-file> <k> [iterations]"
<< std::endl;
std::exit(EXIT_FAILURE);
}
const auto k = std::atoi(argv[3]);
std::cout<<argc<<std::endl;
const auto number_of_iterations = (argc == 4) ? std::atoi(argv[4]) : 300;
std::vector<float> h_x;
std::vector<float> h_y;
std::ifstream stream(argv[2]);
std::string line;
while (std::getline(stream, line)) {
std::istringstream line_stream(line);
float x, y;
uint16_t label;
line_stream >> x >> y >> label;
h_x.push_back(x);
h_y.push_back(y);
}
const size_t number_of_elements = h_x.size();
Data d_data(number_of_elements, h_x, h_y);
std::mt19937 rng(std::random_device{}());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
Data d_means(k, h_x, h_y);
Data d_sums(k);
int* d_counts;
cudaMalloc(&d_counts, k * sizeof(int));
cudaMemset(d_counts, 0, k * sizeof(int));
const int threads = 1024;
const int blocks = (number_of_elements + threads - 1) / threads;
const auto start = std::chrono::high_resolution_clock::now();
for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) {
cudaMemset(d_counts, 0, k * sizeof(int));
d_sums.clear();
assign_clusters<<<blocks, threads>>>(d_data.x,
d_data.y,
d_data.size,
d_means.x,
d_means.y,
d_sums.x,
d_sums.y,
k,
d_counts);
cudaDeviceSynchronize();
compute_new_means<<<1, k>>>(d_means.x,
d_means.y,
d_sums.x,
d_sums.y,
d_counts);
cudaDeviceSynchronize();
}
const auto end = std::chrono::high_resolution_clock::now();
const auto duration =
std::chrono::duration_cast<std::chrono::duration<float>>(end - start);
std::cerr << "Took: " << duration.count() << "s" << " " << std::endl;
cudaFree(d_counts);
std::vector<float> mean_x(k, 0);
std::vector<float> mean_y(k, 0);
cudaMemcpy(mean_x.data(), d_means.x, d_means.bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(mean_y.data(), d_means.y, d_means.bytes, cudaMemcpyDeviceToHost);
for (size_t cluster = 0; cluster < k; ++cluster) {
std::cout << mean_x[cluster] << " " << mean_y[cluster] << std::endl;
}
}
|
12,547 | #include<iostream>
#include<fstream>
#include<stdio.h>
#include<stdlib.h>
#include<string>
//#include"bfs_kernel.cu"
using namespace std;
#include"bfs_kernel.cu"
const int MAX_THREADS_PER_BLOCK = 256;
char* infile = NULL;
void
usage(char* prog_name, const char* more) {
cerr << more;
cerr << "usage: " << prog_name << " input_file "<<endl;
exit(0);
}
void
parse_args(int argc, char** argv) {
for (int i = 0; i < argc; i++) {
if (argv[i][0] == '-') {
switch(argv[i][1]) {
case 'i':
if (i == argc - 1) {
usage(argv[0], "Infile missing");
}
infile = argv[i + 1];
i++;
break;
}
}
}
}
void bfsGraph(char* filename, int start_position) {
int nb_nodes;
ifstream finput;
finput.open(filename, ios::in | ios::binary);
//Read number of nodes on 4 bytes
finput.read((char*)&nb_nodes, 4);
if (start_position < 0 || start_position > nb_nodes) {
return;
}
//Read cumulative degrees
int* degrees = new int[nb_nodes];
finput.read((char*)degrees, nb_nodes * 4);
int* starting = new int[nb_nodes];
memset(starting, 0, sizeof(int) * nb_nodes);
for (int i = 1; i < nb_nodes; i++) {
starting[i] = degrees[i - 1];
}
//Read links
int nb_links = degrees[nb_nodes - 1];
int* links = new int[nb_links];
finput.read((char*)links, nb_links * 4);
finput.close();
//Assign number of blocks and threads of each block we will use
int num_of_blocks = 1;
int num_of_threads_per_block = nb_nodes;
if (nb_nodes > MAX_THREADS_PER_BLOCK) {
num_of_blocks =
(int)ceil((double)nb_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
//allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node) * nb_nodes);
bool* h_graph_level = (bool*) malloc(sizeof(bool) * nb_nodes);
bool* h_graph_visited = (bool*) malloc(sizeof(bool) * nb_nodes);
//initialize the memory of nodes
h_graph_nodes[0].starting = 0;
h_graph_nodes[0].no_of_edges = degrees[0];
h_graph_level[0] = false;
h_graph_visited[0] = false;
for (unsigned int i = 1; i < nb_nodes; i++) {
h_graph_nodes[i].starting = starting[i];
h_graph_nodes[i].no_of_edges = degrees[i] - degrees[i-1];
h_graph_level[i] = false;
h_graph_visited[i] = false;
}
h_graph_level[start_position] = true;
h_graph_visited[start_position] = true;
//Copy node list to cuda memory
Node* d_graph_nodes;
cudaMalloc((void**)&d_graph_nodes, sizeof(Node) * nb_nodes);
cudaMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node) *
nb_nodes,cudaMemcpyHostToDevice);
//Copy the edge list to device memory
int* d_edge_list;
cudaMalloc((void**)&d_edge_list, sizeof(int) * nb_links);
cudaMemcpy(d_edge_list, links, sizeof(int) * nb_links,
cudaMemcpyHostToDevice);
//Copy the visited array to device memory
bool* d_graph_visited;
cudaMalloc((void**)&d_graph_visited, sizeof(bool) * nb_nodes);
cudaMemcpy(d_graph_visited, h_graph_visited, sizeof(bool) *
nb_nodes, cudaMemcpyHostToDevice);
//test1<<<1, nb_nodes>>>(d_graph_visited, nb_nodes);
//cudaMemcpy(h_graph_visited, d_graph_visited, sizeof(bool) *
// nb_nodes, cudaMemcpyDeviceToHost);
//for (int i = 0; i < nb_nodes; i++) {
// cout<<"visited:"<<h_graph_visited[i]<<endl;
//}
//Copy the level to device memory
bool* d_graph_level;
cudaMalloc((void**)&d_graph_level, sizeof(bool) * nb_nodes);
cudaMemcpy(d_graph_level, h_graph_level, sizeof(bool) * nb_nodes,
cudaMemcpyHostToDevice);
//allocate memory for the result on host
int* h_cost = (int*)malloc(sizeof(int) * nb_nodes);
for (int i = 0; i < nb_nodes; i++) {
h_cost[i] = -1;
}
h_cost[start_position] = 0;
//allocate device memory for result
int* d_cost;
cudaMalloc((void**)&d_cost, sizeof(int) * nb_nodes);
cudaMemcpy(d_cost, h_cost, sizeof(int) * nb_nodes,
cudaMemcpyHostToDevice);
//allocate device memory for nb_nodes
//int* d_nb_nodes;
//cudaMalloc((void**)&d_nb_nodes, sizeof(int));
//cudaMemcpy(d_nb_nodes, &nb_nodes, sizeof(int),
// cudaMemcpyHostToDevice);
//make a bool to check if the execution is over
bool* d_over;
cudaMalloc((void**)&d_over, sizeof(bool));
bool stop;
//cout<<"no of links : "<<nb_links<<endl;
//for ( int i = 0; i < nb_links; i++) {
// cout<<links[i]<<endl;
//}
//cout<<"blocks num : " << num_of_blocks<<endl;
//dim3 grid(num_of_blocks, 1, 1);
//dim3 threads(num_of_threads_per_block, 1, 1);
do {
stop = false;
cudaMemcpy(d_over, &stop, sizeof(bool),
cudaMemcpyHostToDevice);
bfs_kernel<<<num_of_blocks,
num_of_threads_per_block>>>(d_graph_nodes, d_edge_list,
d_graph_level, d_graph_visited, d_cost, d_over,
nb_nodes);
cudaThreadSynchronize();
//cudaMemcpy(&nb_nodes, d_nb_nodes, sizeof(int),
// cudaMemcpyDeviceToHost);
//cout<<"new node num : "<<nb_nodes<<endl;
cudaMemcpy(&stop, d_over, sizeof(bool),
cudaMemcpyDeviceToHost);
cout<<"stop : "<<stop<<endl;
} while(stop);
cout << "success!" <<endl;
//cout<<h_graph_level[0]<<endl;
//for (int i = 0; i < nb_nodes; i++) {
// cout<<h_graph_nodes[i].no_of_edges<<endl;
//}
cudaMemcpy(h_cost, d_cost, sizeof(int) * nb_nodes,
cudaMemcpyDeviceToHost);
//Store the result into a file
FILE* fpo = fopen("result.txt", "w");
for (int i = 0; i < nb_nodes; i++) {
fprintf(fpo, "(%d) cost:%d\n", i, h_cost[i]);
}
test<<<num_of_blocks, num_of_threads_per_block>>>(d_graph_nodes,
nb_nodes);
cudaMemcpy(h_graph_nodes, d_graph_nodes, sizeof(Node)*nb_nodes,
cudaMemcpyDeviceToHost);
//for (int i = 0; i < nb_nodes; i++) {
// cout<<"new starting: "<<h_graph_nodes[i].starting<<endl;
//}
fclose(fpo);
//cleanup memory
free(h_graph_nodes);
free(links);
free(h_graph_level);
free(h_graph_visited);
free(h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_edge_list);
cudaFree(d_graph_level);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
int main(int argc, char** argv) {
parse_args(argc, argv);
bfsGraph(infile, 0);
return 0;
}
|
12,548 |
#ifndef __TILED_KERNEL__
#define __TILED_KERNEL__
#include<cuda_runtime.h>
__global__ void normal(int row,float* input1,float* input2,float *output){
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int sum=0;
if(bx*blockDim.x+tx<row && by*blockDim.y+ty<row){
for(int i=0;i<row;i++){
sum+=input1[ty*row+i]*input2[i*row+tx];
}
output[bx*blockDim.x+tx+(by*blockDim.y+ty)*row]=sum;
}
}
template <int blockSize>
__global__ void tile(int row,float* input1,float* input2,float *output){
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int targetx=bx*blockSize+tx;
int targety=by*blockSize+ty;
__shared__ float smemA[blockSize][blockSize];
__shared__ float smemB[blockSize][blockSize];
int totalPhase=(blockSize+row-1)/blockSize;
int startA=by*blockSize*row;
int startB=bx*blockSize;
int stepA=blockSize;
int stepB=blockSize*row;
float sum=0;
for(int i=0;i<totalPhase;i++){
//start phase,load share memory
if((i*blockSize+tx<row)&&(by*blockSize+ty<row))
smemA[ty][tx]=input1[startA+tx+row*ty];
else
{
smemA[ty][tx]=0;
}
if((bx*blockSize+tx<row)&&(i*blockSize+ty<row))
smemB[ty][tx]=input2[startB+tx+row*ty];
else
{
smemB[ty][tx]=0;
}
startA+=stepA;
startB+=stepB;
__syncthreads();
for(int j=0;j<blockSize;j++){
sum+=smemA[ty][j]*smemB[j][tx];
}
__syncthreads();
}
if(targetx<row&&targety<row){
output[targetx+targety*row]=sum;
}
}
#endif
|
12,549 | #include <math.h>
#include <stdio.h>
__device__ char const* const str = "HELLO WORLD!";
constexpr size_t str_length = 12;
__global__ void hello()
{
printf("%c\n", str[threadIdx.x % str_length]);
}
int main()
{
constexpr size_t thread_count = str_length;
constexpr size_t block_count = 1;
hello<<<block_count, thread_count>>>();
cudaDeviceSynchronize();
return 0;
}
|
12,550 | #include "includes.h"
__global__ void conv_horizontal_naive_gradInput(const int n, float *dx, const float *dy, const float *w, const int oH, const int oW, const int kL)
{
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) {
int iW = oW + kL - 1;
int col = i%iW;
int dy_offset = (i/iW)*oW + i%iW;
int w_offset = (i/(iW*oH))*kL;
int k_begin = max(0, col-oW+1);
int k_end = min(kL, col+1);
dx[i] = 0.0f;
for (int k = k_begin; k < k_end; k++) {
dx[i] += w[w_offset + k]*dy[dy_offset - k];
}
}
} |
12,551 | #include "cuda.cuh"
#include "device_launch_parameters.h"
void CudaParameter::Set(int countResult, int countCalculationPer)
{
int countThread = 0;
if (countResult >= CUDA_CALCULATION_MINIMAL_THREADS)
{
ThreadPerResult = 1;
countThread = countResult;
}
else
{
int temp = 1;
do
{
ThreadPerResult = temp;
temp = ThreadPerResult * 2;
} while (temp <= countCalculationPer && temp <= CUDA_CALCULATION_BLOCK_THREAD_SIZE);
countThread = ThreadPerResult * countResult;
}
CountBlock = (countThread + CUDA_CALCULATION_BLOCK_THREAD_SIZE - 1) / CUDA_CALCULATION_BLOCK_THREAD_SIZE;
}
namespace Cuda
{
void CUDA_Calculation_Error()
{
cudaError cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
int i = 0;
i++;
}
}
void CUDA_Reset()
{
if (cudaDeviceReset() != cudaSuccess) { CUDA_Calculation_Error(); }
}
void CUDA_Read_CUDA_Data(double* cudaValues, double** target, int size)
{
if (*target != nullptr) { delete *target; }
*target = new double[size];
CUDA_Array_CopyDeviceToHost(cudaValues, size * sizeof(double), *target);
}
void CUDA_Read_CUDA_Data(double* cudaValues, double** target, int size, int offset)
{
if (*target != nullptr) { delete *target; }
*target = new double[size];
CUDA_Array_CopyDeviceToHost(&cudaValues[offset], size * sizeof(double), *target);
}
int CUDA_Get_Block_Size(int threads)
{
return (threads + CUDA_CALCULATION_BLOCK_THREAD_SIZE - 1) / CUDA_CALCULATION_BLOCK_THREAD_SIZE;
}
void CUDA_Array_Free(void ** address)
{
if (*address)
{
cudaFree(*address);
}
*address = nullptr;
}
__global__ void cuCalulation_kernel_Initialize(double * target, double value, int count) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < count)
{
target[index] = value;
}
}
void CUDA_Array_Initialize(double * target, double value, int count)
{
cuCalulation_kernel_Initialize << <CUDA_Get_Block_Size(count), CUDA_CALCULATION_BLOCK_THREAD_SIZE >> > (target, value, count);
CUDA_Calculation_Error();
}
bool CUDA_Array_CopyDeviceToHost(void* src, int size, void* dest)
{
cudaError cudaStatus = cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
int i = 0;
i++;
}
return true;
}
bool CUDA_Array_CopyHostToDevice(void* src, int size, void* dest)
{
cudaError cudaStatus = cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
int i = 0;
i++;
}
return true;
}
bool CUDA_Array_CopyDeviceToDevice(void* src, int size, void* dest)
{
cudaError cudaStatus = cudaMemcpy(dest, src, size, cudaMemcpyDeviceToDevice);
if (cudaStatus != cudaSuccess)
{
int i = 0;
i++;
}
return true;
}
} |
12,552 | #include <string>
#include <fstream>
#include <sstream>
#include <list>
#include <iterator>
#include "file_handler.cuh"
void prepareFile(std::string fileName, bool truncate) {
std::ofstream resFile (fileName, (truncate ? std::ofstream::out | std::ofstream::trunc : std::ios_base::app));
resFile << "[";
resFile.close();
}
void finishFile(std::string fileName, bool removeLast) {
std::ifstream resFile1 (fileName);
std::stringstream buffer;
buffer << resFile1.rdbuf();
std::string contents = buffer.str();
resFile1.close();
if(removeLast)
contents.pop_back();
contents += "]\n";
std::ofstream resFile2 (fileName, std::ofstream::out | std::ofstream::trunc);
resFile2 << contents;
resFile2.close();
}
void writeLogNormConstToFile(double logNormConstant) {
// std::ofstream resFile (fileName);
std::ofstream resFile (Z_FILE_NAME, std::ios_base::app); // If append to file is wanted
if(resFile.is_open()) {
resFile << logNormConstant << ",";
resFile.close();
} else {
printf("Could not open file %s\n", Z_FILE_NAME.c_str());
}
}
void writeESSToFile(std::list<double> essList) {
// std::ofstream resFile (fileName);
std::ofstream resFile (ESS_FILE_NAME, std::ios_base::app); // If append to file is wanted
if(resFile.is_open()) {
resFile << "[";
std::list <double> :: iterator it;
for(it = essList.begin(); it != essList.end(); ++it)
resFile << *it << " ";
resFile << "],\n";
resFile.close();
} else {
printf("Could not open file %s\n", ESS_FILE_NAME.c_str());
}
}
|
12,553 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <cstdlib>
#include <iostream>
#include <iterator>
int main(void)
{
// generate random data on the host
thrust::host_vector<int> h_vec(20);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
std::cerr << "input..." << std::endl;
std::copy( h_vec.begin(), h_vec.end(), std::ostream_iterator<int>(std::cerr, "\n") );
std::cerr << "" << std::endl;
// transfer to device and sort
thrust::device_vector<int> d_vec = h_vec;
std::cerr << "gpu[0] = " << d_vec[0] << ", gpu[1] = " << d_vec[1] << '\n';
thrust::sort(d_vec.begin(), d_vec.end());
std::cerr << "sort...\n";
std::cerr << "gpu[0] = " << d_vec[0] << ", gpu[1] = " << d_vec[1] << '\n';
// show result
thrust::host_vector<int> h_result = d_vec;
std::cerr << "output..." << std::endl;
std::copy( h_result.begin(), h_result.end(), std::ostream_iterator<int>(std::cerr, "\n") );
std::cerr << "" << std::endl;
std::cerr << "third item in sorted data:" << d_vec[2] << std::endl;
return 0;
}
|
12,554 | __device__ void StencilCopy(void* param)
{
float* paramIn = (float*)param;
int N = (int)paramIn[0];
float* u = paramIn+5;
float* u_prev = paramIn+5+N*N;
int i = threadIdx.x;
int I = i;
while (I < N*N)
{
//if (I>=N*N){return;}
u_prev[I] = u[I];
I = I + 32;
}
}
|
12,555 | __global__ void multiply(float *dest, float *a, float *b)
{
const int i = threadIdx.x;
dest[i] = a[i] * b[i];
} |
12,556 | /*
* Ising model: Halmitonian H = /sum_ij J(sigma_i)(sigma_j)
*/
/*
* 1. Calculate the energy in the program
* 2. Calculate the heat capacity in the program
* 3. Add more inputs to adjust the length of lattice
* 4. A matlab code to plot data.
* data format example:
* position.x position.y spin(-1, 1)
* Iteattion 1: 1 4 -1
* * * *
* * * *
* Iteattion 2: 4 3 1
* * * *
* * * *
* Iteattion N: 35 76 1
* * * *
* * * *
* 5. Compare the numerical value with the analytic value
* 6. Move to 3D
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h> /* time */
#include <curand.h>
#include <curand_kernel.h>
/*
* LATTICE_LENGTH - length of the lattice
* LATTICE_LENGTH - number of element is one lattice
* BOLTZMANN_CONST - bolzmann constant. Set to 1.
*/
#define LATTICE_LENGTH 1024
#define LATTICE_2 (LATTICE_LENGTH * LATTICE_LENGTH)
#define BOLTZMANN_CONST 1
#define N LATTICE_LENGTH
#define WARM_STEP 1e3
#define MEAS_STEP 1e3
#define WARP 1e1
#define NUM_THREAD_X 32
#define NUM_THREAD_Y 32
#define TEMPERATURE 4.0
__device__ int energy(int up, int down, int left, int right, int center);
__global__ void update(int *lattice, double beta, double *E_d, double *M_d, double *E2_d, double *M2_d, int tag, curandState * global_state);
__global__ void printstate(int *lattice);
__global__ void init_rand(curandState * global_state, unsigned long seed);
/* Setup random seed to each kernel */
__global__ void init_rand(curandState * global_state, unsigned long seed){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
curand_init(seed, idx + idy * N, 0, &global_state[idx + idy * N]);
__syncthreads();
}
/*
* update is the function to update a point
* 1. flip a point (1 -> -1 or -1 -> 1)
* 2. compare the energy before flip a point and after flip a point
* 3. if the energy with flipped point is small, accept
* 4. if the energy is larger, generate a random number pro_rand (0,1),
* if pro_rand < e^(-beta * delatE), aceept. else reject.
*/
__global__ void update(int* lattice, double beta, double *E_d, double *M_d, double *E2_d, double *M2_d, int tag, curandState * global_state){
// Calculate the global index
// Calculate the global index for the up, down, left, right index.
// declare parameters
int itx, ity, idx, idy, index;
int flip, up, down, left, right, center;
double pro_rand, deltaE, E;
// local index
itx = threadIdx.x;
ity = threadIdx.y;
// global index
idx = blockIdx.x * blockDim.x + itx;
idy = blockIdx.y * blockDim.y + ity;
index = idx * N + idy;
// load data into shared memory
__shared__ int lat[32 + 2][32 + 2];
__syncthreads();
lat[itx+1][ity+1] = lattice[index];
if(idx == 0){
lat[itx][ity + 1] = lattice[index + (N - 1) * N];
}else if(itx == 0){
lat[itx][ity + 1] = lattice[index - N];
}
if(idx == N - 1){
lat[itx + 2][ity + 1] = lattice[index - (N - 1) * N];
}else if(itx == NUM_THREAD_X - 1){
lat[itx + 2][ity + 1] = lattice[index + N -1];
}
if(idy == 0){
lat[itx + 1][ity] = lattice[index + N - 1];
}else if(ity == 0){
lat[itx + 1][ity] = lattice[index - 1];
}
if(idy == N - 1){
lat[itx + 1][ity + 2] = lattice[index - (N - 1)];
}else if(ity == NUM_THREAD_X - 1){
lat[itx + 1][ity + 2] = lattice[index + 1];
}
curandState local_state = global_state[idx * N + idy];
pro_rand = curand_uniform(&local_state);
global_state[idx * N + idy] = local_state;
__syncthreads();
// for even sites
if((idx + idy) % 2 == 0){
up = lat[itx][ity + 1];
down = lat[itx + 2][ity + 1];
left = lat[itx + 1][ity];
right = lat[itx + 1][ity + 2];
center = lat[itx + 1][ity + 1];
// Flip the center element
flip = -center;
// Calculate the difference between these two state
E = energy(up, down, left, right, center);
deltaE = -2.0 * E;
// If deltaE < 0 or pro_rand <= e^(-beta * deltaE), accept new value
if (deltaE < 0 || pro_rand <= exp(- 1.0 * beta * (deltaE * 1.0))){
lat[itx + 1][ity + 1] *= -1;
}
}
// wait for even site completion
__syncthreads();
// for odd sites
if((idx + idy) % 2 == 1){
up = lat[itx][ity + 1];
down = lat[itx + 2][ity + 1];
left = lat[itx + 1][ity];
right = lat[itx + 1][ity + 2];
center = lat[itx + 1][ity + 1];
// Flip the center element
flip = -center;
// Calculate the difference between these two state
E = energy(up, down, left, right, center);
deltaE = -2.0 * E;
// If deltaE < 0 or pro_rand <= e^(-beta * deltaE), accept new value
if (deltaE < 0 || pro_rand <= exp(- 1.0 * beta * (deltaE * 1.0))){
lat[itx + 1][ity + 1] *= -1;
}
}
// wait for odd site completion
__syncthreads();
// store data back
lattice[index] = lat[itx + 1][ity + 1];
if(tag == 1){
E_d[index] += E;
M_d[index] += lat[itx+1][ity+1];
E2_d[index] += E * E;
M2_d[index] += lat[itx+1][ity+1] * lat[itx+1][ity+1];
}
__syncthreads();
}
/*
* printstate is the function to print the whole matrix.
* Since it prints in parallel, we also print the global
* index of the matrx.
* it prints (x, y, (1 or -1)).
*/
__global__ void printstate(int* lattice) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < N && idy < N){
printf("%d, %d, %d\n", idx, idy, lattice[idx + idy * N]);
}
__syncthreads();
}
/*
* energy is the function used to calculate the energy between
* (center, up), (center, down), (center, left), (center, right)
*/
__device__ int energy(int up, int down, int left, int right, int center){
double H;
H = - up * center - down * center - left * center - right * center;
return H;
}
/*
* Commandline inputs option
* 1. Tempurature (T)
*
*/
int main (int argc, char *argv[]){
int *lattice;
int *d_lattice;
double *E;
double *E_d;
double *E2;
double *E2_d;
double *M;
double *M_d;
double *M2;
double *M2_d;
double T = TEMPERATURE;
int warmsteps = WARM_STEP;
int nout = MEAS_STEP;
int warp = WARP;
int numthreadx = NUM_THREAD_X;
int numthready = NUM_THREAD_Y;
int numblocksX = LATTICE_LENGTH / numthreadx;
int numblocksY = LATTICE_LENGTH / numthready;
// First input: Tempurature. Usually between (1, 6),
// Critical Tempurature is around 2.2
T = argc > 1 ? atof(argv[1]) : T;
warmsteps = argc > 2 ? atof(argv[2]) : warmsteps;
nout = argc > 3 ? atof(argv[3]) : nout;
warp = argc > 4 ? atof(argv[4]) : warp;
// Define the size of lattice and energy
const size_t bytes_lattice = LATTICE_2 * sizeof(int);
const size_t bytes_E = LATTICE_2 * sizeof(double);
const size_t bytes_M = LATTICE_2 * sizeof(double);
// Allocate memory for lattice. It is a lattice^2 long array.
// The value can only be 1 or -1.
lattice = (int*)malloc(LATTICE_2 * sizeof(int));
E = (double*)malloc(LATTICE_2 * sizeof(double));
M = (double*)malloc(LATTICE_2 * sizeof(double));
E2 = (double*)malloc(LATTICE_2 * sizeof(double));
M2 = (double*)malloc(LATTICE_2 * sizeof(double));
// initialize lattice by rand(-1, 1)
for(int i = 0; i < LATTICE_2; i++){
lattice[i] = 2 * (rand() % 2) - 1;
E[i] = 0.0;
M[i] = 0.0;
E2[i] = 0.0;
M2[i] = 0.0;
}
// Set dimensions of block and grid
dim3 grid(numblocksX, numblocksY, 1);
dim3 thread(numthreadx, numthready,1);
// set up random for each kernel
curandState *global_state;
cudaMalloc(&global_state, LATTICE_2 * sizeof(curandState));
init_rand<<< grid, thread >>> (global_state, unsigned(time(NULL)));
// beta is a parameter in the probability
double beta = 1.0 / (BOLTZMANN_CONST * 1.0) / T;
// Allocate memoery in device and copy from host to device
cudaMalloc((void **)&d_lattice, bytes_lattice);
cudaMalloc((void **)&E_d, bytes_E);
cudaMalloc((void **)&M_d, bytes_M);
cudaMalloc((void **)&E2_d, bytes_E);
cudaMalloc((void **)&M2_d, bytes_M);
cudaMemcpy(d_lattice, lattice, bytes_lattice, cudaMemcpyHostToDevice);
cudaMemcpy(E_d, E, bytes_E, cudaMemcpyHostToDevice);
cudaMemcpy(M_d, M, bytes_M, cudaMemcpyHostToDevice);
cudaMemcpy(E2_d, E2, bytes_E, cudaMemcpyHostToDevice);
cudaMemcpy(M2_d, M2, bytes_M, cudaMemcpyHostToDevice);
// To change the buffer size of printf; otherwise it cannot print all data
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, N * N * sizeof(int));
// printf("Testing for T = %2f, beta = %2f...\n", T, beta);
// Warmup process
// printf("Starting Warming Steps... \n");
int cnt = 0;
for (int iter = 0; iter < warmsteps; iter++){
// printf("\r [ %f% ] ", (100.0 * cnt++) / warmsteps);
update<<<grid, thread>>>(d_lattice, beta, E_d, M_d, E2_d, M2_d, 0, global_state);
cudaDeviceSynchronize();
}
// printf("\n");
// Measure process
// printf("Starting Measurement Steps... \n");
cnt = 0;
int cnt2 = 0;
for (int nstep = 0; nstep < nout; nstep++){
// printf("\r [ %f% ] ", (100.0 * cnt++) / nout);
if(nstep % warp == 0){
cnt2++;
update<<<grid, thread>>>(d_lattice, beta, E_d, M_d, E2_d, M2_d, 1, global_state);
}else{
update<<<grid, thread>>>(d_lattice, beta, E_d, M_d, E2_d, M2_d, 0, global_state);
}
cudaDeviceSynchronize();
}
// printf("\n");
double energy = 0.0;
double magnetization = 0.0;
double energy2 = 0.0;
double magnetization2 = 0.0;
cudaMemcpy(lattice, d_lattice, bytes_E, cudaMemcpyDeviceToHost);
cudaMemcpy(E, E_d, bytes_E, cudaMemcpyDeviceToHost);
cudaMemcpy(M, M_d, bytes_M, cudaMemcpyDeviceToHost);
cudaMemcpy(E2, E2_d, bytes_E, cudaMemcpyDeviceToHost);
cudaMemcpy(M2, M2_d, bytes_M, cudaMemcpyDeviceToHost);
for(int i = 0; i < LATTICE_2; i++){
energy += E[i];
magnetization += M[i];
energy2 += E2[i];
magnetization2 += M2[i];
}
double avg_E = energy / cnt2 / (LATTICE_2 * 1.0) / 2.0;
double avg_M = magnetization / cnt2 / (LATTICE_2 * 1.0);
avg_M = avg_M < 0 ? -avg_M : avg_M;
double avg_E2 = energy2 / cnt2 / (LATTICE_2 * 1.0) / 4.0;
double avg_M2 = magnetization2 / cnt2 / (LATTICE_2 * 1.0);
double heat_cap = 1.0 * (avg_E2 - avg_E * avg_E) / T / T;
double mag_sus = 1.0 * (avg_M2 - avg_M * avg_M) / T;
// printf("Average energy: %5f \n", avg_E);
// printf("Average magnetization: %5f \n", avg_M);
printf("%5f %5f %5f %5f %5f\n", T, avg_E, avg_M, heat_cap, mag_sus);
free(lattice);
free(E);
free(M);
free(E2);
free(M2);
cudaFree(d_lattice);
cudaFree(E_d);
cudaFree(M_d);
cudaFree(E2_d);
cudaFree(M2_d);
return 0;
}
|
12,557 | #include "includes.h"
__global__ void generate_binID(const float* dIn, int* out, const int binNumber, const float lumMin, const float lumMax, const int size) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>size)
{
return;
}
float range = lumMax - lumMin;
int bin = ((dIn[i] - lumMin) / range) * binNumber;
out[i] = bin;
} |
12,558 | #include "includes.h"
__global__ void elementwiseDiv(float *a, const float *b, const size_t len)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= len)
return;
a[idx] /= b[idx] + 1e-6f;
} |
12,559 | #include "includes.h"
__global__ void updateEigenVector(float* d_b, float* d_temp, float* normAb, int n)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
int stride = 0;
while (index + stride < n) {
d_b[index] = d_temp[index] / *normAb;
stride += blockDim.x * gridDim.x;
}
} |
12,560 | #include <math.h>
#include <stdio.h>
__host__ void
mat_swap(double **A, double **B) {
double *temp = *A;
*A = *B;
*B = temp;
}
__global__ void
jacobian_0(double *d0_OLD, double *d1_OLD, double *NEW, double *f, int size, int max_it, double h) {
int row = blockIdx.x * blockDim.x + threadIdx.x + 1;
int col = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (row == size/2 - 1 && col < size - 1) {
NEW[row * size + col] = 0.25 * ( d0_OLD[(row - 1) * size + col] + d1_OLD[0 * size + col] + d0_OLD[row * size + (col-1)]\
+ d0_OLD[row * size + (col+1)] + h * h * f[row * size + col]);
}
if (row < size/2 - 1 && col < size - 1) {
NEW[row * size + col] = 0.25 * ( d0_OLD[(row - 1) * size + col] + d0_OLD[(row + 1) * size + col] + d0_OLD[row * size + (col-1)]\
+ d0_OLD[row * size + (col+1)] + h * h * f[row * size + col]);
}
}
// Function taking care of the bottom of the matrix
__global__ void
jacobian_1(double *d0_OLD, double *d1_OLD, double *NEW, double *f, int size, int max_it, \
double h) {
/* initializing iteration variables */
// This time row don't start with 1 because there is no boundaries (since it's the bottom
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y + 1;
// Separate the case when communicating with the other GPU is necessary
if (row == 0 && col < size - 1) {
NEW[row * size + col] = 0.25 * ( d0_OLD[(size / 2 - 1)* size + col] + d1_OLD[(row + 1) * size + col] + d1_OLD[row * size + (col-1)]\
+ d1_OLD[row * size + (col+1)] + h * h * f[row * size + col]);
}
else if (row < size/2 - 1 && col < size - 1) {
NEW[row * size + col] = 0.25 * ( d1_OLD[(row - 1) * size + col] + d1_OLD[(row + 1) * size + col] + d1_OLD[row * size + (col-1)]\
+ d1_OLD[row * size + (col+1)] + h * h * f[row * size + col]);
}
}
|
12,561 | // #include "../cudakernels.h"
#include <iostream>
#include <vector>
#include <cmath>
using namespace std;
#define CUDA_CALL( call ) \
{ \
cudaError_t err = call; \
if ( cudaSuccess != err) \
fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,cudaGetErrorString(err));\
}
// returns value of an ELLPack matrix A at (x,y)
__device__
double valueAt(size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size)
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[x * max_row_size + k] == y)
return vValue[x * max_row_size + k];
}
return 0.0;
}
__global__
void PTAP_GPU( double* value, size_t* index, size_t mrs, // A matrix
double* p_value, size_t* p_index, size_t p_mrs, // P matrix
double* a_value, size_t* a_index, size_t a_mrs // A_ matrix (coarse)
)
{
printf("%e\n", valueAt( 3, 3, p_value, p_index, p_mrs));
}
int main()
{
vector<double> p_value = {1, 0,1, 0,0.5, 0,0.5, 0,1, 0,1, 0,0, 0,0, 0,0.25, 0.25,0.25, 0.25,0.5, 0.5,0.5, 0.5,1, 0,1, 0,0.5, 0,0.5, 0,1, 0,1, 0 };
vector<size_t> p_index = { 0, 8, 1, 8, 2, 8, 3, 8, 2, 8, 3, 8, 8, 8, 8, 8, 2, 6, 3, 7, 2, 6, 3, 7, 4, 8, 5, 8, 6, 8, 7, 8, 6, 8, 7, 8};
vector<double> value = { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 830771.2, 0.000000000029802304, -253845.76, -11538.432, -415384.96, 0.000000000029802304, 46153.92, 11538.432, 0, 0, 0, 0, 0.000000000059604672, 830771.2, 11538.432, 46153.92, 0.000000000059604672, -415384.96, -11538.432, -253845.76, 0, 0, 0, 0, -253845.76, 11538.432, 415384.32, -150000, 46153.92, -11538.432, -207692.16, 150000, 0, 0, 0, 0, -11538.432, 46153.92, -150000, 415384.32, 11538.432, -253845.76, 150000, -207692.16, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -415384.96, 0.000000000029802304, 46153.92, 11538.432, 1661536, 0.000000000059604672, -507692.16, -23076.928, -415384.96, 0.000000000029802304, 46153.92, 11538.432, 0.000000000029802304, -415384.96, -11538.432, -253845.76, 0.00000000008940672, 1661536, 23076.928, 92307.84, 0.000000000059604672, -415384.96, -11538.432, -253845.76, 46153.92, -11538.432, -207692.16, 150000, -507692.16, 23076.928, 830771.2, -300000, 46153.92, -11538.432, -207692.16, 150000, 11538.432, -253845.76, 150000, -207692.16, -23076.928, 92307.84, -300000, 830771.2, 11538.432, -253845.76, 150000, -207692.16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -415384.96, 0.000000000029802304, 46153.92, 11538.432, 830771.2, 0.000000000029802304, -253845.76, -11538.432, 0, 0, 0, 0, 0.000000000029802304, -415384.96, -11538.432, -253845.76, 0.000000000029802304, 830771.2, 11538.432, 46153.92, 0, 0, 0, 0, 46153.92, -11538.432, -207692.16, 150000, -253845.76, 11538.432, 415384.32, -150000, 0, 0, 0, 0, 11538.432, -253845.76, 150000, -207692.16, -11538.432, 46153.92, -150000, 415384.32, 0, 0, 0, 0,};
vector<size_t> index = { 0, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 1, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 2, 4, 5, 8, 10, 11, 18, 18, 18, 18, 18, 18, 3, 4, 5, 9, 10, 11, 18, 18, 18, 18, 18, 18, 2, 3, 4, 5, 8, 9, 10, 11, 18, 18, 18, 18, 2, 3, 4, 5, 8, 9, 10, 11, 18, 18, 18, 18, 6, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 7, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 2, 4, 5, 8, 10, 11, 14, 16, 17, 18, 18, 18, 3, 4, 5, 9, 10, 11, 15, 16, 17, 18, 18, 18, 2, 3, 4, 5, 8, 9, 10, 11, 14, 15, 16, 17, 2, 3, 4, 5, 8, 9, 10, 11, 14, 15, 16, 17, 12, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 13, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 8, 10, 11, 14, 16, 17, 18, 18, 18, 18, 18, 18, 9, 10, 11, 15, 16, 17, 18, 18, 18, 18, 18, 18, 8, 9, 10, 11, 14, 15, 16, 17, 18, 18, 18, 18, 8, 9, 10, 11, 14, 15, 16, 17, 18, 18, 18, 18};
vector<size_t> numrows = { 8, 18 };
//// P
size_t p_mrs = 2;
double* d_p_value;
size_t* d_p_index;
CUDA_CALL( cudaMalloc( (void**)&d_p_value, sizeof(double) * numrows[1]*p_mrs) );
CUDA_CALL( cudaMemcpy(d_p_value, &p_value[0], sizeof(double) * numrows[1]*p_mrs, cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMalloc( (void**)&d_p_index, sizeof(size_t) * numrows[1]*p_mrs) );
CUDA_CALL( cudaMemcpy(d_p_index, &p_index[0], sizeof(size_t) * numrows[1]*p_mrs, cudaMemcpyHostToDevice) );
// A
size_t mrs = 18;
double* d_value;
size_t* d_index;
CUDA_CALL( cudaMalloc( (void**)&d_value, sizeof(double) * numrows[1]*mrs) );
CUDA_CALL( cudaMemcpy(d_value, &value[0], sizeof(double) * numrows[1]*mrs, cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMalloc( (void**)&d_index, sizeof(size_t) * numrows[1]*mrs) );
CUDA_CALL( cudaMemcpy(d_index, &index[0], sizeof(size_t) * numrows[1]*mrs, cudaMemcpyHostToDevice) );
// A_ (coarse)
size_t a_mrs = 4;
double* d_a_value;
size_t* d_a_index;
CUDA_CALL( cudaMalloc( (void**)&d_a_value, sizeof(double) * numrows[0] * a_mrs ) );
CUDA_CALL( cudaMemset( d_a_value, 0, sizeof(double) * numrows[0] * a_mrs) );
CUDA_CALL( cudaMalloc( (void**)&d_a_index, sizeof(size_t) * numrows[0] * a_mrs) );
CUDA_CALL( cudaMemset( d_a_index, 0, sizeof(size_t) * numrows[0] * a_mrs) );
PTAP_GPU<<<1,1>>>( d_value, d_index, mrs, d_p_value, d_p_index, p_mrs, d_a_value, d_a_index, a_mrs );
cudaDeviceSynchronize();
} |
12,562 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SIZE 20
__global__ void max(int *input)
{
__shared__ int s_data[SIZE];
int tid = threadIdx.x, flag;
int i = tid + blockIdx.x*blockDim.x;
s_data[tid] = input[i];
__syncthreads();
for( int s = blockDim.x/2, flag = blockDim.x%2; s > 0; s = s >> 1)
{
if(tid < s) {
printf("%d\t-\t%d\t%d\t%d\n", s_data[tid], s_data[tid+s], s, tid);
s_data[tid] = (s_data[tid]>=s_data[tid+s]) ? s_data[tid] : s_data[tid+s];
}
__syncthreads();
if(s == 1) break;
flag = s%2;
}
if(tid == 0) input[0] = s_data[0];
}
int main(int argc, char *argv[])
{
int *input;
int *d_input;
cudaEvent_t start,stop;
float etime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
input = (int*)malloc(sizeof(int) * SIZE);
srand(time(0));
for(int j=0;j<SIZE;j++)
{
input[j] = rand()%SIZE;
printf(" %d ",input[j]);
}printf("\n");
cudaError_t err;
err = cudaMalloc((void**)&d_input,sizeof(int)*SIZE);
if(err != cudaSuccess)
printf("Error allocating\n");
//cudaMalloc((void**)&d_input,sizeof(int)*SIZE);
cudaMemcpy(d_input,input,sizeof(int)*SIZE, cudaMemcpyHostToDevice);
max<<<SIZE/10,10>>>(d_input);
cudaMemcpy(input,d_input,sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&etime,start,stop);
printf(" max:%d ",input[0]);
printf("\nTime:%f\n",etime);
return 0;
}
|
12,563 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define MAXBLOCKS 1
#define MAXTHREADS 1
__global__ void SimpleAddition(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c; //CPU
int *d_a, *d_b, *d_c;//GPU
//Allocate GPU memory
cudaMalloc((void **)&d_a, sizeof(int));
cudaMalloc((void **)&d_b, sizeof(int));
cudaMalloc((void **)&d_c, sizeof(int));
a = 1;
b = 2;
c = 0;
//Copy data to GPU
cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, &c, sizeof(int), cudaMemcpyHostToDevice);
SimpleAddition<<<MAXBLOCKS, MAXTHREADS>>>(d_a, d_b, d_c);
//Copy result back to CPU
cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
//Free GPU memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
12,564 | #include "includes.h"
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
__device__ int getPosition(int x, int y, int width, int margin, int pixelPosition)
{
return (x + (y * width)) * margin + pixelPosition;
}
__global__ void addEffect( unsigned char* output_img, unsigned char* input_img, int width, int height, int nbBlocks)
{
int lengthY = (int)(height/nbBlocks)+1;
int startY = blockIdx.x * lengthY;
int endY = blockIdx.x * lengthY + lengthY;
if( endY > height )
endY = height;
int lengthX = (int)(width/blockDim.x)+1;
int startX = threadIdx.x * lengthX;
int endX = threadIdx.x * lengthX + lengthX;
if( endX > width )
endX = width;
for( int x = startX; x < endX; x++ )
{
for( int y = startY; y < endY; y++ )
{
int currentIndex = getPosition(x, y, width, 3, 0);
if( (input_img[currentIndex] + input_img[currentIndex+1] + input_img[currentIndex+2])/3 < 20)
{
output_img[currentIndex] = input_img[currentIndex];
output_img[currentIndex+1] = input_img[currentIndex+1];
output_img[currentIndex+2] = input_img[currentIndex+2];
for( int i = -4; i <= 4; i++ )
{
for( int j = -4; j <= 4; j++ )
{
if( x+i < 0 || x+i > width || y+j < 0 || y+j > height )
continue;
int neighbourIndex = getPosition( x+i, y+j, width, 3, 0);
if( neighbourIndex < 0 || neighbourIndex + 2 > width*height*3)
continue;
output_img[neighbourIndex] = 0;
output_img[neighbourIndex+1] = 0;
output_img[neighbourIndex+2] = 0;
}
}
}
}
}
} |
12,565 | #include "includes.h"
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void reduced_sum_axis_zero(const float *input_data, float *output_data, int input_n, int output_n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < output_n) {
output_data[idx] = 0.0;
for (int i = 0; i < input_n / output_n; i++) {
output_data[idx] += input_data[i * output_n + idx];
}
}
} |
12,566 | #include "includes.h"
char* concat(char *s1, char *s2);
__global__ void r_initial_sum(float* a , int * indeces , float* x,float * r ,float * r_squared ,float * p_sum ,int size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x ;
int local_index = threadIdx.x ;
__shared__ float shared_r_squared[1024] ;
__shared__ float shared_p_sum[1024] ;
shared_r_squared[local_index] = 0 ;
shared_p_sum[local_index] = 0;
__syncthreads() ;
if (index < size)
{
for (int i = 0 ; i<3 ; i++)
{
shared_p_sum[local_index] += a[3*index + i] * r[indeces[3*index + i]] ;
}
__syncthreads() ;
shared_r_squared[local_index] = r[index] * r[index] ;
shared_p_sum[local_index] = shared_p_sum[local_index] * r[index] ;
}
__syncthreads() ;
for (unsigned int s = blockDim.x/2 ; s> 0 ; s >>= 1)
{
if (threadIdx.x < s)
{
shared_r_squared[local_index] = shared_r_squared[local_index] + shared_r_squared[local_index +s] ;
shared_p_sum[local_index] = shared_p_sum[local_index] + shared_p_sum[local_index +s] ;
__syncthreads() ;
}
}
if (threadIdx.x == 0)
{
r_squared[blockIdx.x] = shared_r_squared[0];
p_sum[blockIdx.x] = shared_p_sum[0];
__syncthreads() ;
}
} |
12,567 | #include <iostream>
#include <cuda_runtime.h>
using namespace std;
int main(int argc, char** argv) {
// define total data elements
int nElem=1024;
// define grid and block structure
dim3 block (1024);
dim3 grid ((nElem+block.x-1)/block.x);
cout << "grid.x " << grid.x << " block.x " << block.x << endl;
// reset block
block.x=512;
grid.x=(nElem+block.x-1)/block.x;
cout << "grid.x " << grid.x << " block.x " << block.x << endl;
// reset block
block.x=256;
grid.x=(nElem+block.x-1)/block.x;
cout << "grid.x " << grid.x << " block.x " << block.x << endl;
// reset block
block.x=128;
grid.x=(nElem+block.x-1)/block.x;
cout << "grid.x " << grid.x << " block.x " << block.x << endl;
// reset device before you leave
cudaDeviceReset();
return 0;
}
|
12,568 | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <iostream>
#include <map>
#include <vector>
#include <cstring>
#include <tuple>
#define BLOCKSIZE 2;
const int bsize = BLOCKSIZE;
#define NR 6
#define NC 6
#define NTOT (NR*NC)
using namespace std;
struct ebsparsematrix_t
{
size_t nr = NR;
size_t nc = NC;
size_t n=NTOT; // size of matrix, N x N
std::vector<size_t> nzrow; // for each non-zero, the row / global index
std::vector<size_t> nzcol; // for each non-zero, the col index
std::vector<double> entry; // non-zero values for each index
};
struct block_t {
double matrix[bsize][bsize] = {{0.0}}; // A dense 0 padded matrix of the non-zero values
size_t row; // The starting row of the block
size_t col; // The starting col of the block
};
struct ebbcsrmatrix_t
{
size_t blocksize = bsize; // Size of the blocks B*B
size_t nnzb = 0; // Number of non-zero blocks in the BCSR matrix
std::vector<block_t> values; // The vector of blocks
std::vector<size_t> cols;
std::vector<size_t> block_row_ptr;
};
// Converts a COO matrix to a BCSR matrix
void convertToBCSR(ebsparsematrix_t& ebmat, ebbcsrmatrix_t& ebbcsr) {
std::map<std::pair<int, int>, block_t> blockmap;
for (int n = 0; n < ebmat.entry.size(); ++n)
{
const int i = ebmat.nzrow[n];
const int j = ebmat.nzcol[n];
const double e = ebmat.entry[n];
// Calculate block starting point
const int ib = i / ebbcsr.blocksize;
const int jb = j / ebbcsr.blocksize;
// Calculate where the nz should be inside the block
const int ii = i % ebbcsr.blocksize;
const int jj = j % ebbcsr.blocksize;
std::pair<int, int> key = std::pair<int, int>(ib, jb);
if (blockmap.find(key) != blockmap.end()) {
// already in map
blockmap.at(key).matrix[ii][jj] = e;
}
else {
// not in the map already
block_t newblock;
newblock.row = ib;
newblock.col = jb;
newblock.matrix[ii][jj] = e;
blockmap.insert({ key, newblock });
}
}
std::map<std::pair<int, int>, block_t>::iterator it;
int prev_block_id_row = -1;
int count = 0;
for (it = blockmap.begin(); it != blockmap.end(); it++) {
ebbcsr.values.push_back(it->second);
ebbcsr.cols.push_back(it->second.col*bsize);
ebbcsr.nnzb++;
if (it->first.first != prev_block_id_row) {
ebbcsr.block_row_ptr.push_back(count);
prev_block_id_row = it->first.first;
}
count++;
}
ebbcsr.block_row_ptr.push_back(count);
}
__global__ void
bcsr_kernel(int n_block_rows, int bs, size_t *col_ids, size_t *row_ptr, block_t* data, double *x, double *y)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int row = idx % bs;
const int block_row = idx / bs;
const int first_block = row_ptr[block_row];
const int last_block = row_ptr[block_row + 1];
if (row < bs && block_row < n_block_rows)
{
double local_out = 0.0;
for (int block = first_block; block < last_block; block++)
{
int first_col = data[block].col;
for (int j=0; j<bs; j++) {
local_out += (double)((double) x[first_col+j] * (double)data[block].matrix[row][j]);
/* printf("block: %d, x: %f, mat[%d][%d] = %f, local_out=%f\n", block, x[first_col+j], row, j, data[block].matrix[row][j], local_out);*/
}
}
y[block_row*bs+row] = local_out;
//printf("y[%d]=%f\n", block_row*bs+row, y[block_row*bs+row]);
}
}
/**
* Host main routine
*/
int main(void)
{
// Launch the CUDA Kernel
int threadsPerBlock = 16;
int blocksPerGrid = 16;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vector<size_t> rows{ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4, 4};
vector<size_t> cols{ 0, 1, 2, 3, 4, 5, 0, 1, 2, 4, 5, 0, 3, 4, 3, 1, 2};
vector<double> vals{ 0.7, 0.9, 0.2, 0.3, 0.4, 0.5, 0.6,0.7, 0.8, 0.5,0.1, 0.6, 0.9, 0.5, 0.4, 0.2, 0.3};
/*Matrix
0.7 0.9 0.2 0.3 0.4 0.5
06 0.7 0.8 0 0.5 0
0.6 0 0 0.9 0.5 0
0 0 0 0.4 0 0
0 0.2 0.3 0 0 0
0 0 0 0 0 0
*/
ebsparsematrix_t mat;
mat.nzrow = rows;
mat.nzcol = cols;
mat.entry = vals;
ebbcsrmatrix_t ebbcsr;
convertToBCSR(mat, ebbcsr);
int n = mat.nr/bsize;
//cols
size_t *gpu_cols;
size_t gpu_col_bytes = ebbcsr.cols.size()*sizeof(size_t);
cudaMalloc(&gpu_cols, gpu_col_bytes);
//row_ptr
size_t *gpu_row_ptr;
size_t gpu_row_ptr_bytes = ebbcsr.block_row_ptr.size()*sizeof(size_t);
cudaMalloc(&gpu_row_ptr, gpu_row_ptr_bytes);
//vals
block_t *gpu_vals;
size_t gpu_vals_bytes = ebbcsr.values.size()*sizeof(block_t);
cudaMalloc(&gpu_vals, gpu_vals_bytes);
// dx dy
// int x_size = NR; //ebbcsr.block_row_ptr.size();
// int y_size = NR; //ebbcsr.cols.size();
double *d_y;
double *d_x;
double* h_x = (double *) malloc(NR*sizeof(double));
for (int i=0; i<NR; i++) {
h_x[i] = 1.0; // (double) rand()/1111111111;
}
cudaMalloc(&d_x, NR*sizeof(double));
cudaMalloc(&d_y, NR*sizeof(double));
size_t* r = &ebbcsr.block_row_ptr[0];
size_t* c = &ebbcsr.cols[0];
block_t* data = &ebbcsr.values[0];
//Copy all host variables to device variables
cudaMemcpy( d_x, h_x, NR*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( gpu_cols, c, gpu_col_bytes, cudaMemcpyHostToDevice);
cudaMemcpy( gpu_row_ptr, r, gpu_row_ptr_bytes, cudaMemcpyHostToDevice);
cudaMemcpy( gpu_vals, data, gpu_vals_bytes, cudaMemcpyHostToDevice);
//Launch kernel function
bcsr_kernel<<<blocksPerGrid,threadsPerBlock>>>(n, bsize, gpu_cols, gpu_row_ptr, gpu_vals, d_x, d_y);
//Copy output from device to host
double* h_y = (double *) malloc(NR*sizeof(double));
cudaMemcpy( h_y, d_y, NR*sizeof(double), cudaMemcpyDeviceToHost);
printf("output: \n");
for (int i=0; i<NR; i++) {
printf("y[%d] = %f\n", i, h_y[i]);
}
printf("Done\n");
//Free all device variables
cudaFree(d_x);
cudaFree(d_y);
cudaFree(gpu_cols);
cudaFree(gpu_vals);
cudaFree(gpu_row_ptr);
return 0;
}
|
12,569 |
#include <stdio.h>
__global__
void mykernel(void){
}
//main
int main(void){
mykernel<<<1,1>>>(); // parallelism (1 block and 1 thread) Only thing required to run on the GPU
printf("Hello World!\n");
return 0;
}
|
12,570 | #include <stdint.h>
#include <stdio.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer {
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start() {
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop() { cudaEventRecord(stop, 0); }
float Elapsed() {
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void printArray(const uint32_t *a, int n) {
for (int i = 0; i < n; i++)
printf("%2i ", a[i]);
printf("\n");
}
void sortByThrust(const uint32_t *in, int n, uint32_t *out, int nBits) {
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
__global__ void computeHistKernel(uint32_t *in, int n, uint32_t *hist,
int nBins, int bit) {
// TODO
extern __shared__ int s_hist[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) {
s_hist[s_i] = 0;
}
__syncthreads();
// Each block computes its local hist using atomic on SMEM
if (i < n) {
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();
// transpose
for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) {
hist[gridDim.x * s_i + blockIdx.x] = s_hist[s_i];
}
}
__global__ void scanBlkKernel(uint32_t *in, int n, uint32_t *out,
uint32_t *blkSums) {
// TODO
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) {
return;
}
extern __shared__ uint32_t s_in[];
s_in[threadIdx.x] = in[i];
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int strideVal;
if (threadIdx.x >= stride) {
strideVal = s_in[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride) {
s_in[threadIdx.x] += strideVal;
}
__syncthreads();
}
if (blkSums && threadIdx.x == blockDim.x - 1) {
blkSums[blockIdx.x] = s_in[threadIdx.x];
}
out[i] = s_in[threadIdx.x];
}
// TODO: You can define necessary functions here
__global__ void addBlkSums(uint32_t *in, int n, uint32_t *blkSums) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) {
return;
}
in[i] += blkSums[blockIdx.x];
}
__global__ void scatter(const uint32_t *in, int n, const uint32_t *histScan,
uint32_t *out, int nBins, int bit, int nBits) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t s_n = blockDim.x;
//// init smem
extern __shared__ uint32_t s_data[];
uint32_t *s_in = (uint32_t *)s_data;
uint32_t *s_inBin = (uint32_t *)(s_in + s_n);
uint32_t *s_inBinScan = (uint32_t *)(s_inBin + s_n);
uint32_t *s_out = (uint32_t *)(s_inBinScan + s_n);
uint32_t *s_outBin = (uint32_t *)(s_out + s_n);
uint32_t *s_startIdx = (uint32_t *)(s_outBin + s_n);
if (i >= n) {
s_inBin[threadIdx.x] = nBins - 1;
} else {
s_in[threadIdx.x] = in[i];
s_inBin[threadIdx.x] = (s_in[threadIdx.x] >> bit) & (nBins - 1);
}
__syncthreads();
//// sort smem using radix sort with 1-bit
for (int b = 0; b < nBits; ++b) {
/* printf("Doing bit: #%d\n", b); */
// exclusive scan
if (threadIdx.x == 0) {
s_inBinScan[threadIdx.x] = 0;
} else {
s_inBinScan[threadIdx.x] = (s_inBin[threadIdx.x - 1] >> b) & 1;
}
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int strideVal;
if (threadIdx.x >= stride) {
strideVal = s_inBinScan[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride) {
s_inBinScan[threadIdx.x] += strideVal;
}
__syncthreads();
}
// scatter
uint32_t rank;
if ((s_inBin[threadIdx.x] >> b) & 1) {
const uint32_t nZeros =
s_n - s_inBinScan[s_n - 1] - ((s_inBin[s_n - 1] >> b) & 1);
rank = nZeros + s_inBinScan[threadIdx.x];
} else {
rank = threadIdx.x - s_inBinScan[threadIdx.x];
}
s_outBin[rank] = s_inBin[threadIdx.x];
s_out[rank] = s_in[threadIdx.x];
__syncthreads();
s_inBin[threadIdx.x] = s_outBin[threadIdx.x];
s_in[threadIdx.x] = s_out[threadIdx.x];
__syncthreads();
}
/* out[i] = s_inBin[threadIdx.x]; */
//// calculate start index
if (threadIdx.x == 0 || s_inBin[threadIdx.x] != s_inBin[threadIdx.x - 1]) {
s_startIdx[s_inBin[threadIdx.x]] = threadIdx.x;
}
if (i >= n) {
return;
}
__syncthreads();
//// calculate number of elements at lower index that equals to current
//// elemement
uint32_t preCount = threadIdx.x - s_startIdx[s_inBin[threadIdx.x]];
//// scatter
uint32_t rank =
histScan[gridDim.x * s_inBin[threadIdx.x] + blockIdx.x] + preCount;
out[rank] = s_in[threadIdx.x];
}
void printDeviceArray(const uint32_t *d_arr, int n) {
const int BYTES = n * sizeof(*d_arr);
uint32_t *arr = (uint32_t *)malloc(BYTES);
cudaMemcpy(arr, d_arr, BYTES, cudaMemcpyDeviceToHost);
printArray(arr, n);
free(arr);
}
void checkCorrectness(uint32_t *out, uint32_t *correctOut, int n) {
for (int i = 0; i < n; i++) {
if (out[i] != correctOut[i]) {
printf("INCORRECT :( %d/%d\n", i, n);
return;
}
}
printf("CORRECT :)\n");
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel
// scan in counting sort Assume: nBits (k in slides) in {1, 2, 4, 8, 16} Why
// "int * blockSizes"? Because we may want different block sizes for diffrent
// kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t *in, int n, uint32_t *out, int nBits,
int *blockSizes) {
// TODO
const int nBins = 1 << nBits;
const dim3 histBlockSize = dim3(blockSizes[0]);
const int histBlockCount = (n - 1) / histBlockSize.x + 1;
const dim3 histGridSize = dim3(histBlockCount);
const dim3 scanBlockSize = dim3(blockSizes[1]);
const int scanBlockCount = (nBins * histBlockCount - 1) / scanBlockSize.x + 1;
const dim3 scanGridSize = dim3(scanBlockCount);
const size_t ARRAY_BYTES = n * sizeof(uint32_t);
const size_t HIST_SMEM_BYTES = nBins * sizeof(uint32_t);
const size_t HIST_BYTES = histBlockCount * HIST_SMEM_BYTES;
const size_t BLKSUMS_BYTES = scanBlockCount * sizeof(uint32_t);
const size_t SCAN_SMEM_BYTES = scanBlockSize.x * sizeof(uint32_t);
const size_t SCATTER_SMEM_BYTES =
5 * histBlockSize.x * sizeof(uint32_t) + HIST_SMEM_BYTES;
uint32_t *d_in;
uint32_t *d_out;
uint32_t *d_hist; // contains all the transposed local histogram of all blocks
uint32_t *d_histScan;
uint32_t *d_blkSums;
uint32_t *blkSums = (uint32_t *)malloc(BLKSUMS_BYTES);
CHECK(cudaMalloc(&d_in, ARRAY_BYTES));
CHECK(cudaMalloc(&d_out, ARRAY_BYTES));
CHECK(cudaMalloc(&d_hist, HIST_BYTES));
CHECK(cudaMalloc(&d_histScan, HIST_BYTES));
CHECK(cudaMalloc(&d_blkSums, BLKSUMS_BYTES));
CHECK(cudaMemcpy(d_in, in, ARRAY_BYTES, cudaMemcpyHostToDevice));
/* printf("IN: "); */
/* printArray(in, n); */
GpuTimer timer;
for (int bit = 0; bit < 8 * sizeof(uint32_t); bit += nBits) {
printf("#%d (iteration):\n", bit/nBits + 1);
//Step 1: Calculate local histogram of each block, transpose and copy to d_hist
printf(" + Step 1. Local histogram. ");
timer.Start();
computeHistKernel<<<histGridSize, histBlockSize, HIST_SMEM_BYTES>>>(
d_in, n, d_hist, nBins, bit);
CHECK(cudaGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
// Step 2: Scan d_hist
printf(" + Step 2. Exclusive scan. ");
timer.Start();
// scan per block
CHECK(cudaMemset(d_histScan, 0, sizeof(uint32_t)));
scanBlkKernel<<<scanGridSize, scanBlockSize, SCAN_SMEM_BYTES>>>(
d_hist, histBlockCount * nBins - 1, d_histScan + 1, d_blkSums);
CHECK(cudaGetLastError());
// scan blksums:
CHECK(
cudaMemcpy(blkSums, d_blkSums, BLKSUMS_BYTES, cudaMemcpyDeviceToHost));
for (int i = 1; i < scanBlockCount; ++i) {
blkSums[i] += blkSums[i - 1];
}
CHECK(
cudaMemcpy(d_blkSums, blkSums, BLKSUMS_BYTES, cudaMemcpyHostToDevice));
// add scanned blkSums
addBlkSums<<<scanGridSize, scanBlockSize>>>(
d_histScan + scanBlockSize.x + 1,
histBlockCount * nBins - scanBlockSize.x - 1, d_blkSums);
CHECK(cudaGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
// Step 3: scatter
printf(" + Step 3. Scatter. ");
timer.Start();
scatter<<<histGridSize, histBlockSize, SCATTER_SMEM_BYTES>>>(
d_in, n, d_histScan, d_out, nBins, bit, nBits);
CHECK(cudaGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
uint32_t *tmp = d_in;
d_in = d_out;
d_out = tmp;
}
cudaMemcpy(out, d_in, ARRAY_BYTES, cudaMemcpyDeviceToHost);
free(blkSums);
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_hist));
CHECK(cudaFree(d_histScan));
CHECK(cudaFree(d_blkSums));
}
// Radix sort
void sort(const uint32_t *in, int n, uint32_t *out, int nBits,
bool useThrust = false, int *blockSizes = NULL) {
GpuTimer timer;
timer.Start();
if (useThrust == false) {
printf("\nRadix sort by thrust\n");
sortByThrust(in, n, out, nBits);
} else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo() {
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n",
devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
int main(int argc, char **argv) {
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
/* n = 17; */
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t *in = (uint32_t *)malloc(bytes);
uint32_t *out = (uint32_t *)malloc(bytes); // Device result
uint32_t *correctOut = (uint32_t *)malloc(bytes); // Thrust result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
/* in[i] = rand() % 16; */
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4) {
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0],
blockSizes[1]);
// SORT BY THRUST
sort(in, n, correctOut, nBits);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
12,571 | //
//#include <cuda_runtime.h>
//#include "ray.cuh"
//#include "ray.h"
//#include <curand_kernel.h>
//#include "CUDAhelpers.h"
//
//const int CUDABLOCK = 16;
//
//static __global__ void raycast_trace_ray_kernel(
// rayCU* rays, CUDAreal3 *colours) {
//
//
// ShadeRec sr(world_ptr->hit_objects(ray));
//
// if (sr.hit_an_object) {
// sr.depth = depth;
// sr.ray = ray;
// if (sr.material_ptr == nullptr) sr.material_ptr = missing_mat;
// RGBColor L = world_ptr->background_color;
// if (noshade)
// L = sr.material_ptr->noshade(sr);
// else
// L = sr.material_ptr->shade(sr);
// if (sr.w->haze && sr.t > sr.w->haze_distance)
// {
// real damping = (sr.t - sr.w->haze_distance) * sr.w->haze_attenuation;
// damping = damping > 1.0 ? 1.0 : damping;
// return damping * sr.w->background_color + ((real)1.0 - damping) * L;
// }
//
// return L;
// }
// else
// return world_ptr->background_color;
//}
//
//
/*RGBColor RayCast::trace_ray(const Ray &ray, const int depth) const
{
if (depth > this->world_ptr->max_depth) return RGBColor();
ShadeRec sr(world_ptr->hit_objects(ray));
if (sr.hit_an_object) {
sr.depth = depth;
sr.ray = ray;
if (sr.material_ptr == nullptr) sr.material_ptr = missing_mat;
RGBColor L = world_ptr->background_color;
if (noshade)
L = sr.material_ptr->noshade(sr);
else
L = sr.material_ptr->shade(sr);
if (sr.w->haze && sr.t > sr.w->haze_distance)
{
real damping = (sr.t - sr.w->haze_distance) * sr.w->haze_attenuation;
damping = damping > 1.0 ? 1.0 : damping;
return damping * sr.w->background_color + ((real)1.0 - damping) * L;
}
return L;
}
else
return world_ptr->background_color;
}*/
|
12,572 | //
// Created by saleh on 7/23/18.
//
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <vector_types.h>
#define BLOCK_SIZE 256
// reduce sum over specific axis
__global__ void kernel_reduce_sum_3d_try01(const float * __restrict__ g_idata,
float * __restrict__ g_odata,
int dim0, int dim1, int dim2,
int overaxis0, int overaxis1,int overaxis2) {
// dim0, dim1, dim2 : TTT, TFF, FTF, FFT
if(overaxis0==1 && overaxis1==0 && overaxis2==0){ // TFF
/* Each thread handles 4 pairs of elements.
* BlockDim is 1D
* BlockDim should be dividable by 4
*
* */
// static shared memory
__shared__ float smem[BLOCK_SIZE*4];
// set thread ID
//unsigned int tid = threadIdx.x;
unsigned int tid4 = 4 * threadIdx.x;
// global index
unsigned int idx4 = 4 * (blockIdx.x * blockDim.x + threadIdx.x);
// unrolling
float tmpSum0 = 0;
float tmpSum1 = 0;
float tmpSum2 = 0;
float tmpSum3 = 0;
int offset = dim1*dim2;
int i=0;
for(i=0;i<dim0;i++){
if(idx4 + 0 <= offset) tmpSum0 += g_idata[idx4 + i*offset + 0];
if(idx4 + 1 <= offset) tmpSum1 += g_idata[idx4 + i*offset + 1];
if(idx4 + 2 <= offset) tmpSum2 += g_idata[idx4 + i*offset + 2];
if(idx4 + 3 <= offset) tmpSum3 += g_idata[idx4 + i*offset + 3];
}
if(idx4 + 0 <= offset) smem[tid4 + 0] = tmpSum0;
if(idx4 + 1 <= offset) smem[tid4 + 1] = tmpSum1;
if(idx4 + 2 <= offset) smem[tid4 + 2] = tmpSum2;
if(idx4 + 3 <= offset) smem[tid4 + 3] = tmpSum3;
__syncthreads();
unsigned int oindx = idx4;
if(idx4 + 0 <= offset) g_odata[oindx + 0] = smem[tid4 + 0];
if(idx4 + 1 <= offset) g_odata[oindx + 1] = smem[tid4 + 1];
if(idx4 + 2 <= offset) g_odata[oindx + 2] = smem[tid4 + 2];
if(idx4 + 3 <= offset) g_odata[oindx + 3] = smem[tid4 + 3];
}
}
void reduce_sum_3d_try01(
float* g_idata,
float* g_odata,
int dim0,
int dim1,
int dim2,
int overaxis0,
int overaxis1,
int overaxis2)
{
dim3 block (BLOCK_SIZE, 1);
dim3 grid_overdim0 ((dim1*dim2 + block.x - 1) / block.x / 4, 1);
dim3 grid_overdim1 ((dim0*dim2 + block.x - 1) / block.x / 4, 1);
dim3 grid_overdim2 ((dim0*dim1 + block.x - 1) / block.x / 4, 1);
dim3 grid = (overaxis0==1)?(grid_overdim0):(overaxis1==1?grid_overdim1:grid_overdim2);
kernel_reduce_sum_3d_try01 <<< grid, block >>> (
g_idata,
g_odata,
dim0, dim1, dim2,
overaxis0, overaxis1,overaxis2);
}
__global__ void kernel_reduce_sum_3d_try02(
const float* __restrict__ g_idata,
float* __restrict__ g_odata,
int dim0,
int dim1,
int dim2,
int overaxis0,
int overaxis1,
int overaxis2)
{
if (overaxis0 == 0 && overaxis1 == 1 && overaxis2 == 0) { // FTF
/* Each thread handles 1 pairs of elements.
* BlockDim is 1D
* BlockDim should be dividable by 1
*
* */
// static shared memory
__shared__ float smem_store[BLOCK_SIZE];
//__shared__ float smem_load[BLOCK_SIZE];
// set thread ID
//unsigned int tid = threadIdx.x;
unsigned int tid = threadIdx.x;
// global index
unsigned int idx = (blockIdx.x * blockDim.x + threadIdx.x);
// unrolling
float tmpSum0 = 0;
unsigned int i = 0;
unsigned int src_index ;
unsigned int _limit = (unsigned int)(dim0 * dim1 * dim2);
//Indices over output's matrix (NOT OVER INPUT) (OUTPUT'S CONSIDERED AS A ROW-MAJOR MATRIX)
int thrd_d0 = (idx) / (1*dim2);
int thrd_d2 = (idx - thrd_d0*dim2);
//Only for debugging kernel
//printf("tid: %03d \tidx: %03d\td0: %02d\td2: %02d\n",tid,idx,thrd_d0,thrd_d2);
//Merging the thread's DIM1 element from all DIM1's elements of current DIM0.
for (i = 0; i < dim1; i++) {
src_index = thrd_d0*dim1*dim2 + i * dim2 + thrd_d2;
printf("idx: %d : src_index: %d\n",idx,src_index);
if(src_index < _limit)
tmpSum0 += g_idata[src_index];
}
if (src_index + 0 < _limit) smem_store[tid + 0] = tmpSum0;
__syncthreads();
unsigned int oindx = (unsigned int)( thrd_d0*dim2 + thrd_d2 );
if (src_index + 0 <= _limit) g_odata[oindx + 0] = smem_store[tid + 0];
}
else
{
if (overaxis0 == 0 && overaxis1 == 0 && overaxis2 == 1) { // FFT
/* Each thread handles 1 pairs of elements.
* BlockDim is 1D
* BlockDim should be dividable by 1
*
* */
// static shared memory
__shared__ float smem_store[BLOCK_SIZE];
// set thread ID
//unsigned int tid = threadIdx.x;
unsigned int tid = threadIdx.x;
// global index
unsigned int idx = (blockIdx.x * blockDim.x + threadIdx.x);
// unrolling
float tmpSum0 = 0;
unsigned int i = 0;
unsigned int src_index ;
unsigned int _limit = (unsigned int)(dim0 * dim1 * dim2);
//Indices over output's matrix (NOT OVER INPUT) (OUTPUT'S CONSIDERED AS A ROW-MAJOR MATRIX)
int thrd_d0 = (idx) / (dim1*1);
int thrd_d1 = (idx - thrd_d0*dim1);
//Only for debugging kernel
printf("tid: %03d \tidx: %03d\td0: %02d\td1: %02d\n",tid,idx,thrd_d0,thrd_d1);
//Merging the thread's DIM1 element from all DIM1's elements of current DIM0.
for (i = 0; i < dim2; i++) {
src_index = thrd_d0*dim1*dim2 + thrd_d1 * dim2 + i;
if(idx<15) printf("idx: %d : src_index: %d\n",idx,src_index);
if(src_index < _limit)
tmpSum0 += g_idata[src_index];
}
if (src_index + 0 < _limit) smem_store[tid + 0] = tmpSum0;
__syncthreads();
unsigned int oindx = (unsigned int)( thrd_d0*dim1 + thrd_d1 );
if (src_index + 0 <= _limit) g_odata[oindx + 0] = smem_store[tid + 0];
}
}
}
void reduce_sum_3d_try02(
float* g_idata,
float* g_odata,
int dim0,
int dim1,
int dim2,
int overaxis0,
int overaxis1,
int overaxis2)
{
dim3 block (BLOCK_SIZE, 1);
dim3 grid_overdim0 ((dim1*dim2 + block.x - 1) / block.x, 1);
dim3 grid_overdim1 ((dim0*dim2 + block.x - 1) / block.x, 1);
dim3 grid_overdim2 ((dim0*dim1 + block.x - 1) / block.x, 1);
dim3 grid = (overaxis0==1)?(grid_overdim0):(overaxis1==1?grid_overdim1:grid_overdim2);
kernel_reduce_sum_3d_try02 <<< grid, block >>> (
g_idata,
g_odata,
dim0, dim1, dim2,
overaxis0, overaxis1,overaxis2);
}
__global__ void kernel_reduce_sum_3d_try03(
const float * __restrict__ g_idata,
float * __restrict__ g_odata,
const unsigned int dim0,
const unsigned int dim1,
const unsigned int dim2,
const bool overaxis0,
const bool overaxis1,
const bool overaxis2)
{
// WANING : dim0 means dim2 and dim2 means dim0
__shared__ float sm[BLOCK_SIZE];
if (overaxis2 && !overaxis1 && !overaxis0)
{
// Case 1 - sums in X-direction
// each threadblock is responsible for a separate row sum
unsigned int bidx = blockIdx.x;
unsigned int tidx = threadIdx.x;
sm[threadIdx.x] = 0;
while (tidx < dim0)
{
sm[threadIdx.x] += g_idata[bidx*dim0+tidx];
/*if(bidx==21){
//dbg
printf("thid: %04d\tg_index_to_read:%d\n",threadIdx.x,bidx*dim0+tidx);
}*/
tidx += blockDim.x;
} // block-stride loop
__syncthreads();
// parallel reduction
for (int i = blockDim.x>>1; i > 0; i>>=1)
{
if (threadIdx.x < i) sm[threadIdx.x] += sm[threadIdx.x + i];
__syncthreads();
}
if (!threadIdx.x) g_odata[bidx] = sm[0];
}
else if (!overaxis2 && overaxis1 && !overaxis0)
{
// Case 2 - sums in Y-direction
// each thread is responsible for a separate Y-column sum
unsigned int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < (dim0*dim2))
{
unsigned int tidx = idx%dim0 + (idx/dim0)*(dim0*dim1); //indices over input tensor (begining of axis1 slices)
float tsum = 0;
for (unsigned int i = 0; i < dim1; i++)
{
//printf("idx: %03d \t\t tidx: %03d\n",idx,tidx);
tsum += g_idata[tidx];
tidx += dim0;
}
g_odata[idx] = tsum;
}
}
else if (!overaxis2 && !overaxis1 && overaxis0)
{
// Case 3 - sums in Z-direction
// each thread is responsible for a separate Z-column sum
unsigned int idx = threadIdx.x + blockDim.x*blockIdx.x;
//printf("%d,%d,%d\n",dbg_blockid,dbg_thid,idx);
if (idx < (dim0*dim1))
{
unsigned int tidx = idx;
float tsum = 0;
for (int i = 0; i < dim2; i++)
{
//printf("%d,%d,%d,%d,%d\n",dbg_blockid,dbg_thid,idx,tidx,i);
//printf("idx:%02d, tidx:%02d, i=%02d\n",idx,tidx,i);
tsum += g_idata[tidx];
tidx += dim0*dim1;
}
g_odata[idx] = tsum;
}
}
else {
printf("reduce_sum: ERROR-NOTIMPLEMENTED\n");
}
}
void reduce_sum_3d_try03(
float* g_idata,
float* g_odata,
int dim0,
int dim1,
int dim2,
bool overaxis0,
bool overaxis1,
bool overaxis2)
{
dim3 block (BLOCK_SIZE, 1);
dim3 grid_overdim0 ((dim1*dim2 + block.x - 1) / block.x, 1);
dim3 grid_overdim1 ((dim0*dim2 + block.x - 1) / block.x, 1);
dim3 grid_overdim2 (dim0*dim1 , 1);
dim3 grid = overaxis0 ? (grid_overdim0) : (overaxis1 ? grid_overdim1 : grid_overdim2);
//printf("-------------------------------------------------------\n");
//printf("KERNEL_GRID : %d\n", grid.x);
//printf("KERNEL_BLOCK : %d\n", block.x);
kernel_reduce_sum_3d_try03 <<<grid, block>>> (
g_idata, g_odata,
dim2, dim1, dim0,
overaxis0, overaxis1,overaxis2);
} |
12,573 | /**
* mnist.cu
*
* A demo of the kernel from classifier.cu.
*
* This code reads in the weights and biases for an input layer from
* the file example/weights_1 and a single hidden layer from example/weights_2
* that have been trained for MNIST digit classification.
*
* The input image is stored in example/input_1.
*
* The first row of the weight files are the biases of the layer.
*/
#include <iostream>
#include <string>
#include <fstream>
using namespace std;
/* The weights of the current layer*/
__device__ float d_weights[785*16];
/*
* Matrix multiply. Performs the operation c = ab', where b' is the transpose of b.
*/
__device__ void mmult(float *a, float *b, float *c, int2 adim, int2 bdim)
{
int row = threadIdx.x + blockIdx.x*blockDim.x;
int col = threadIdx.y + blockIdx.y*blockDim.y;
if(adim.x != bdim.x)
{
if(row == 0 & col == 0)
{
printf("Error: Incompatible matrix dimensions: [%dx%d] * [%dx%d]\n", adim.y, adim.x, bdim.x, bdim.y);
}
}
if(row < bdim.y && col < adim.x)
{
for(int i = 0; i < adim.x; i++)
{
c[row*bdim.x + col] += a[i*adim.y + col] * b[row*adim.y + i];
}
}
}
__device__ void relu(float *mtx, int2 dim)
{
int row = threadIdx.x + blockIdx.x*blockDim.x;
int col = threadIdx.y + blockIdx.y*blockDim.y;
if(row < dim.x && col < dim.y)
{
mtx[col*dim.x + row] *= (mtx[col*dim.x + row] > 0);
}
}
__global__ void classify(float *in, float *out, int2 in_dim, int2 weight_dim)
{
mmult(d_weights, in, out, weight_dim, in_dim);
relu(out, make_int2(weight_dim.y, in_dim.x));
}
int main(int argc, char **argv)
{
const int2 in_dim = make_int2(785, 1); // The dimensions of the input matrix
const int in_size = sizeof(float) * in_dim.x * in_dim.y; // Total size of input buffer in bytes
const int2 out_dim = make_int2(1, 10); // The dimensions of the output matrix
const int out_size = sizeof(float) * out_dim.x * out_dim.y; // Total size of output buffer in bytes
const dim3 grid_size(50,16,1);
const dim3 block_size(16,16,1);
float h_in_data[785]; // Input data on host
float *d_in_data; // Input data on device
float h_hidden[17];
float *d_hidden; // Hidden layer activations
float h_out_data[10]; // Output on device
float *d_out_data; // Output on host
float *weights_1 = new float[785*16]();
float *weights_2 = new float[17*10]();
ifstream weightfile;
weightfile.open("example/weights_1");
int arrayIdx = 0;
while(!weightfile.eof())
weightfile >> weights_1[arrayIdx++];
weightfile.close();
weightfile.open("example/weights_2");
arrayIdx = 0;
while(!weightfile.eof())
weightfile >> weights_2[arrayIdx++];
weightfile.close();
weightfile.open("example/input_1");
arrayIdx = 0;
h_in_data[arrayIdx++] = 1.0f;
while(!weightfile.eof())
weightfile >> h_in_data[arrayIdx++];
//////////////////////////
// First Layer
//////////////////////////
cudaMemcpyToSymbol(d_weights, weights_1, 785*16*sizeof(float));
cudaMalloc(&d_in_data, in_size);
cudaMemcpy(d_in_data, h_in_data, in_size, cudaMemcpyHostToDevice); // Give the GPU our input data.
cudaMalloc(&d_hidden, 17);
classify<<<grid_size, block_size>>>(d_in_data, &d_hidden[1], make_int2(785,1), make_int2(785,16));
cudaDeviceSynchronize();
//////////////////////////
// Second Layer
//////////////////////////
cudaMemcpyToSymbol(d_weights, weights_2, 17*10*sizeof(float));
h_hidden[0] = 1.0f;
cudaMemcpy(d_hidden, h_hidden, 1, cudaMemcpyHostToDevice);
cudaMalloc(&d_out_data, out_size);
classify<<<grid_size, block_size>>>(d_hidden, d_out_data, make_int2(17,1), make_int2(17,10));
cudaDeviceSynchronize();
cudaMemcpy(h_out_data, d_out_data, out_size, cudaMemcpyDeviceToHost); // Retrieve the neuron outputs.
printf("Results: \n");
for(int i = 0; i < out_dim.x; i++)
{
float max = -1;
int argmax = 0;
for(int j = 0; j < out_dim.y; j++)
{
float neuron = h_out_data[i*out_dim.y + j];
if(neuron > max)
{
max = neuron;
argmax = j;
}
printf("%f ", neuron);
}
printf("\n");
printf("Predicted number: %d\n\n", argmax);
}
return 0;
}
|
12,574 | #include<stdio.h>
#include<ctime>
#include<cmath>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#define THREADS 1024
__global__ void zeroPad(float *a, int arraySize, int SIZE)
{
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * blockDim.x + threadIdx.x;
if(threadId>=SIZE && threadId<arraySize)
{
a[threadId] = 0;
}
}
__global__ void bitonic_sort_step(float *dev_values, int stage, int step, int newSize)
{
unsigned int arrayIndex, xorVal;
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
arrayIndex = blockId * blockDim.x + threadIdx.x;
xorVal = arrayIndex^stage;
//amongst the two threads that are being compared pick the lower thread and as each thread holds an array element
//pick the ones with threadID less than the size of the array being sorted
if ((xorVal)>arrayIndex && arrayIndex<newSize) {
// sort the data in ascending order
if ((arrayIndex&step)==0) {
//swap if not in the right order
if (dev_values[arrayIndex]>dev_values[xorVal]) {
float temp = dev_values[arrayIndex];
dev_values[arrayIndex] = dev_values[xorVal];
dev_values[xorVal] = temp;
}
}
// sort the data in descending order
if ((arrayIndex&step)!=0) {
//swap if not in the right order
if (dev_values[arrayIndex]<dev_values[xorVal]) {
float temp = dev_values[arrayIndex];
dev_values[arrayIndex] = dev_values[xorVal];
dev_values[xorVal] = temp;
}
}
}
}
float bitonic_sort(float *values, int newSize, int SIZE)
{
float *dev_values,tgpu = -1;
clock_t start, end;
cudaError_t status;
status = cudaMalloc((void**) &dev_values, newSize * sizeof(float));
status = cudaMemcpy(dev_values, values, SIZE * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(THREADS,1);
//decide on the grid size based on the array size
int grid1 = ceil((float)newSize/(float)THREADS);
int grid2 = 1;
int grid3=1;
if(grid1>65535)
{
grid2 = ceil((float)newSize/(float)THREADS);
grid1 = 65535;
}
if(grid2>65535)
{
grid3 = ceil((float)newSize/(float)THREADS);
grid2 = 65535;
}
dim3 dimGrid(grid1,grid2,grid3);
int stage, step;
start = clock();
//pad extra zeros when the actual array size is not a power of 2
if(newSize!=SIZE)
{
zeroPad<<<dimGrid, dimBlock>>>(dev_values,newSize,SIZE);
status = cudaDeviceSynchronize();
}
//This loop computes each step of the sorting network
for (step = 2; step <= newSize; step <<= 1) {
//This computes each stage in a step
for (stage=step>>1; stage>0; stage=stage>>1) {
bitonic_sort_step<<<dimGrid, dimBlock>>>(dev_values, stage, step, newSize);
status = cudaDeviceSynchronize();
}
}
end = clock();
// Check for errors and exit on any failures
status = cudaGetLastError();
if (status != cudaSuccess)
{
std::cout << "\n Kernel failed: " << cudaGetErrorString(status) <<
std::endl;
}
//if everything is successfully cpy back the results
else{
// if the actual array size is a power of 2 copy the entire array
if(newSize==SIZE)
cudaMemcpy(values, dev_values, newSize * sizeof(float), cudaMemcpyDeviceToHost);
//else discard the zeros that are padded and copy the actual sorted entries of the array
else
cudaMemcpy(values, dev_values+(newSize-SIZE), SIZE * sizeof(float), cudaMemcpyDeviceToHost);
tgpu = ((float)((end - start)*1000))/((float)(CLOCKS_PER_SEC));
}
cudaFree(dev_values);
// Returns the time for which the actual computation was being executed on the device
return tgpu;
}
|
12,575 | /*
For DIRECTED GRAPH
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <string>
#include <algorithm>
#define MAX_NODE 100000000
#define DEBUG 0
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
class Node{
public:
unsigned int val;
vector<unsigned int> weights;
vector<Node*> Edges;
Node(int val){
this->val = val;
}
void addEdge(Node* v,unsigned int w){
this->Edges.push_back(v);
this->weights.push_back(w);
}
};
void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c);
void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph,
int* diffOff,int* diffEdges,unsigned int* diffWeight );
void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int& del_size);
void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int E,
int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size,
int* mOffset,int* mEdges,unsigned int* mWeight);
// __device__ volatile int Cx[MAX_NODE];
__device__ volatile int PQ[MAX_NODE];
//K in parallel
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
__global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent,volatile int* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
__global__ void keepHeapPQ(int* PQ_size,int* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
__global__ void checkMIN(int* PQ_size,int* flagEnd,int* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
__global__ void getCx(int* Cx,int dest,int* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
int main(){
//the K PQ
printf("number of pq:");
int K ;
scanf("%d",&K);
printf("source,destination:");
int startNode,endNode;
scanf("%d %d",&startNode,&endNode);
int num_paths;
printf("number of paths:");
scanf("%d",&num_paths);
FILE* fgraph = fopen("graph.txt","r");
int N,E;
fscanf(fgraph,"%d %d\n",&N,&E);
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*E);
unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E);
int* H_hx = (int*)malloc(sizeof(int)*N);
int* H_cx = (int*)malloc(sizeof(int)*N);
int* H_parent = (int*)malloc(sizeof(int)*N);
int* H_parent_old = (int*)malloc(sizeof(int)*N);
int* H_PQ = (int*)malloc(sizeof(int)*N);
int* H_openList = (int*)malloc(sizeof(int)*N);
int* H_PQ_size = (int*)malloc(sizeof(int)*K);
//for cost of endNode
int* H_dest_cost = (int*)malloc(sizeof(int));
memset(H_PQ_size,0,sizeof(int)*K);
memset(H_openList,-1,sizeof(int)*N);
//init cx
for(int i=0;i<N;i++){
H_cx[i]=INT_MAX;
H_parent[i]=-1;
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%d",&H_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%u",&H_weight[i]);
}
FILE* fhx = fopen("Hx.txt","r");
for(int i=0;i<N;i++){
int temp;
fscanf(fhx,"%d",&temp);
if(temp!=-1)
H_hx[i]= temp;
else
H_hx[i] = 0; //to change
}
fclose(fgraph);
fclose(fhx);
printf("[INFO] completed taking input\n");
//init Host var
int* H_flagEnd = (int*)malloc(sizeof(int));
int* H_flagfound = (int*)malloc(sizeof(int));
int* H_a0 = (int*)malloc(sizeof(int));
int* H_nV_size = (int*)malloc(sizeof(int));
int* H_nV = (int*)malloc(sizeof(int)*N);
//required coz if many tries to add same in diff threads high low lower
int* H_nVFlag = (int*)malloc(sizeof(int)*N);
memset(H_nVFlag,-1,sizeof(int)*N);
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
//insert startNode in PQ[0]
H_cx[startNode]=H_hx[startNode];
H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
//create events to record runtime
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//graph struture
int* D_offset;
int* D_edges ;
unsigned int* D_weight;
int* D_hx;
int* D_parent;
//for reading the ancessostor to avoid lock for write after read.
int* D_parent_old;
//Priority queue size
int* D_PQ_size;
//CX
int* D_Cx;
//flag if in openList(contains which PQ)
int* D_openList;
//lock for nodes
int* D_lock;
//Diff structure
int* D_diff_edges;
int* D_diff_offset;
unsigned int* D_diff_weight;
//next nodes flag
int* D_nVFlag;
//next nodes array to insert PQ
int* D_nV;
int* D_nV_size;
//nodes to be expanded ( extracted from PQ )
int* D_expandNodes;
int* D_expandNodes_size;
//flag to end while loop and found the destination
int* D_flagEnd;
int* D_flagfound;
//cost of endNode
int* D_dest_cost;
gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E) );
gpuErrchk ( cudaMalloc(&D_hx,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent_old,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_Cx,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) );
gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_dest_cost,sizeof(int)) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) );
gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) );
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
if(DEBUG)
printf("[INFO] A* started\n");
cudaEventRecord(start);
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,D_Cx,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
false,D_diff_offset,D_diff_edges,D_diff_weight,0);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_Cx,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//gen from flag D_nV
//for N in parallel
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,D_Cx,endNode,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) );
}
}
getCx<<<1,1>>>(D_Cx,endNode,D_dest_cost);
gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("RUN TIME: %f\n",milliseconds);
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
printf("[OUT] Cost: %d\n",*H_dest_cost);
printf("[OUT] Path(in reverse): ");
if(*H_dest_cost!=INT_MAX){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
p = H_parent[p];
}
printf("%d\n",p);
}
else{
printf("not found\n");
}
// FILE* fdiff = fopen("Updates.txt","r");
// int line;
int update_count = 0;
for(int p=0;p<num_paths;p++){
unordered_map<unsigned int,Node*> Graph;
unordered_map<unsigned int,Node*> rev_Graph;
vector<pair<int,int>>deleted_edges;
int insertEdge=0, delEdge=0;
// for(int i=0;i<line;i++){
// int flag;
// int u,v;
// unsigned int w;
// fscanf(fdiff,"%d %d %d %u\n",&flag,&u,&v,&w);
// if(flag==1){
// insertDiff(Graph,u,v,w);
// insertDiff(rev_Graph,v,u,w);
// insertEdge++;
// }
// else if(flag==0){
// deleted_edges.push_back(pair<int,int>(u,v));
// }
// }
if(H_parent[endNode]==-1)
break;
deleted_edges.push_back(pair<int,int>(H_parent[endNode],endNode));
// insertEdge is insertion size
//for diff
int* H_diff_edges = (int*)malloc(sizeof(int)*insertEdge);
int* H_diff_offset = (int*)malloc(sizeof(int)*N);
unsigned int* H_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge);
//reset offset to 0 ..ie no nodes
memset(H_diff_offset,0,sizeof(int)*N);
if(1)
printf("[INFO](%d) insertion:%d\n",update_count,insertEdge);
createDiffGraph(N,Graph,H_diff_offset,H_diff_edges,H_diff_weight);
//start computation for deletion
for(int j=0;j<deleted_edges.size();j++){
int u,v;
u= deleted_edges[j].first;
v= deleted_edges[j].second;
//if deleted adds to delEdge
removeDelEdges(u,v,H_offset,H_edges,N,E,delEdge);
}
//merge graph
int* H_offset_new,*H_edges_new;
unsigned int* H_weight_new;
int E_new = E + insertEdge - delEdge;
H_offset_new = (int*)malloc(sizeof(int)*N);
H_edges_new = (int*)malloc(sizeof(int)*E_new);
H_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new);
mergeDiff(H_offset,H_edges,H_weight,N,E,
H_diff_offset,H_diff_edges,H_diff_weight,insertEdge,delEdge,
H_offset_new,H_edges_new,H_weight_new);
//free pointer
free(H_offset);
free(H_edges);
free(H_weight);
free(H_diff_offset);
free(H_diff_edges);
free(H_diff_weight);
H_offset = H_offset_new;
H_edges = H_edges_new;
H_weight = H_weight_new;
//cudaFree and cpy
cudaFree(D_edges);
cudaFree(D_weight);
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E_new) );
gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E_new) );
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E_new,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E_new,cudaMemcpyHostToDevice) );
//change E
E = E_new;
//reset everything
memset(H_parent,-1,sizeof(int)*N);
memset(H_openList,-1,sizeof(int)*N);
memset(H_PQ_size,0,sizeof(int)*K);
H_cx[startNode]=H_hx[startNode];
H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
cudaEventRecord(start);
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,D_Cx,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
false,D_diff_offset,D_diff_edges,D_diff_weight,0);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_Cx,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//gen from flag D_nV
//for N in parallel
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,D_Cx,endNode,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) );
}
}
getCx<<<1,1>>>(D_Cx,endNode,D_dest_cost);
gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float mt = 0;
cudaEventElapsedTime(&mt, start, stop);
printf("RUN TIME: %f\n",mt);
milliseconds+=mt;
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
printf("[OUT] Cost: %d\n",*H_dest_cost);
printf("[OUT] Path(in reverse): ");
if(*H_dest_cost!=INT_MAX){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
p = H_parent[p];
}
printf("%d\n",p);
}
else{
printf("not found\n");
}
update_count++;
}
printf("[INFO] total run time %d: %f\n",update_count,milliseconds);
}
void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c){
unordered_map<unsigned int,Node*>:: iterator itr;
itr = Graph.find(a);
if(itr!=Graph.end()){
Node* n = itr->second;
unordered_map<unsigned int,Node*>:: iterator it;
it = Graph.find(b);
if(it!=Graph.end()){
Node* v = it->second;
n->addEdge(v,c);
}
else{
Node* v = new Node(b);
n->addEdge(v,c);
Graph.insert(pair<unsigned int,Node*>(b,v));
}
}
else{
Node* n =new Node(a);
Graph.insert(pair<unsigned int,Node*>(a,n));
unordered_map<unsigned int,Node*>:: iterator it;
it = Graph.find(b);
if(it!=Graph.end()){
Node* v = it->second;
n->addEdge(v,c);
}
else{
Node* v = new Node(b);
n->addEdge(v,c);
Graph.insert(pair<unsigned int,Node*>(b,v));
}
}
}
void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph,
int* diffOff,int* diffEdges,unsigned int* diffWeight ){
int offindex = 0;
diffOff[offindex] = 0;
offindex++;
int k =0;
int weightCount = 0;
for(int i=0;i<N;i++){
unordered_map<unsigned int,Node*>:: iterator itr;
itr = Graph.find(i);
if(itr!=Graph.end()){
Node* n = itr->second;
for(int j=0;j<n->Edges.size();j++){
diffEdges[k] = n->Edges[j]->val;
k++;
}
for(int j=0;j<n->weights.size();j++){
diffWeight[weightCount] = n->weights[j];
weightCount++;
}
if(offindex < N ){
diffOff[offindex] = k;
offindex++;
}
}
else{
if(offindex < N ){
diffOff[offindex] = k;
offindex++;
}
}
}
}
void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int& del_size){
int start = offset[u];
int end = E;
bool flag_done = false;
if(u!=N-1)
end = offset[u+1];
while(start<end){
if( v == edges[start]){
edges[start]=-1;
flag_done = true;
break;
}
start++;
}
if(flag_done)
del_size++;
}
void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int E,
int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size,
int* mOffset,int* mEdges,unsigned int* mWeight){
int E_new = E + insert_size - del_size;
mOffset[0] = 0;
int edegOffset= 0;
for(int i=0;i<N;i++){
int start = offset[i];
int end = E;
if(i!=N-1)
end = offset[i+1];
//int count = 0;
while(start<end){
int child = edges[start];
if(child!=-1){
mEdges[edegOffset] = child;
mWeight[edegOffset] = weight[start];
edegOffset++;
}
start++;
}
start = diff_offset[i];
end = insert_size;
if(i!=N-1)
end = diff_offset[i+1];
while(start<end){
int child = diff_edges[start];
if(child!=-1){
mEdges[edegOffset] = child;
mWeight[edegOffset]= diff_weight[start];
edegOffset++;
}
start++;
}
if(edegOffset > E_new){
printf("ERROR: size %d::%d\n",E_new,edegOffset);
}
if(i!=N-1)
mOffset[i+1]=edegOffset;
}
}
|
12,576 | #include "includes.h"
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N){
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for(int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
} |
12,577 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5) {
comp += (-1.0050E-37f + var_2 + +0.0f - expf(-1.5145E-42f));
for (int i=0; i < var_1; ++i) {
comp = -1.4568E-43f + -1.4872E34f;
float tmp_1 = +1.9446E-42f * -1.6293E36f * var_3;
comp += tmp_1 + +1.6737E36f + (var_4 - (-1.0178E8f / (-1.8077E21f * var_5)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6);
cudaDeviceSynchronize();
return 0;
}
|
12,578 | #include "includes.h"
__global__ void add_query_points_norm_and_sqrt(float * array, int width, int pitch, int k, float * norm){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
array[yIndex*pitch + xIndex] = sqrt(array[yIndex*pitch + xIndex] + norm[xIndex]);
} |
12,579 | #include "includes.h"
__global__ void arr_times_const_checkerboard(float*a,float b, float * c, int N, int sx,int sy,int sz)
{
int ids=blockIdx.x*blockDim.x+threadIdx.x; // which source array element do I have to deal with?
if(ids>=N) return; // not in range ... quit
int px=(ids/2)%sx; // my x pos
int py=(ids/2)/sx; // my y pos
float minus1=(1-2*((px+py)%2));
c[ids]=a[ids]*b*minus1;
} |
12,580 | // filename: gaxpy2.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "gaxpy2"
{
__global__ void gaxpy2(const double *a, const double *b, double *c)
{
int i = threadIdx.x + threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y;
c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
} |
12,581 | /*
Calculating which points in the specified window of the complex plane
lie inside the Mandelbrot set
Can be easily extended using OpenGL to include graphics
and actually plot the results on screen
*/
#include <iostream>
#include <complex>
#include <cmath>
#include <cstdlib>
// -----------------------------
// declare some global variables
const int MAX_ITER = 200; // maximum number of iterations
const int NX = 1000; // number of points along X inside the window
const int NY = 1000; // number of points along Y inside the window
// -------------------------------------------------------
// define a class for complex numbers and their operations
class dcmplx
{
public:
double re; // real component
double im; // imaginary component
// function to calculate the magnitude or absolute value of the complex number
// this function is called from and executes on the device (GPU)
__device__
double magnitude()
{
return pow((re*re + im*im),0.5);
}
};
// ----------------------------------------------------------------------------------
// function to check all points inside the specified window for membership in the set
// this function is called from the host (CPU) but executes on the device (GPU)
__global__
void Mandelbrot(double xmin, double xmax, double ymin, double ymax, int *dev_color)
{
double dx = (xmax - xmin)/NX; // grid spacing along X
double dy = (ymax - ymin)/NY; // grid spacing along Y
int i = 10*blockIdx.x + threadIdx.x;
int j = 10*blockIdx.y + threadIdx.y;
double x = xmin + (double) i*dx; // actual x coordinate (real component)
double y = ymin + (double) j*dy; // actual y coordinate (imaginary component)
dcmplx c;
c.re = x;
c.im = y;
dcmplx z;
z.re = 0.0;
z.im = 0.0;
// ---------------
// z <---- z*z + c
int iter = 0;
while(iter<MAX_ITER) {
iter++;
dcmplx temp = z;
z.re = temp.re*temp.re - temp.im*temp.im + c.re;
z.im = 2.0*temp.re*temp.im + c.im;
if (z.magnitude() > 2.0) break;
}
// the 2D array "dev_color" stores how many iterations were required for divergence
// for points outside the Mandelbrot set, this is typically a small number
// points inside the set do not diverge and thus iter is a large number for such points
dev_color[i+j*NX] = iter;
// GRAPHICS: the value of iter can be used to pick a suitable "color" when drawing the set
}
// ---------------------------------------
// main program executes on the host (CPU)
int main(int argc, char* argv[])
{
// allocate memory for a 2D array on the host (CPU)
int *color = new int[NX*NY];
// fill the 2D array with zeroes
for(int i=0;i<NX;i++) {
for(int j=0;j<NY;j++) {
color[i+j*NX] = 0;
}
}
// allocate memory on the device (GPU)
int *dev_color;
cudaMalloc((void **) &dev_color, NX*NY*sizeof(int));
// initialize parameters on the host
double xmin = -2, xmax = 1, ymin = -1.5, ymax = 1.5;
// check each pixel for membership and fill the 2D array accordingly
// this kernel is executed in parallel on the GPU using a grid of 100 x 100 blocks
// where each block contains 10 x 10 threads
dim3 dimGrid (1000, 1000, 1); // 1000 x 1000 blocks in a grid
dim3 dimBlock(1 , 1 , 1); // 1 x 1 x 1 threads in each block
for(int k=0;k<10;k++) {
Mandelbrot<<<dimGrid,dimBlock>>>(xmin,xmax,ymin,ymax,dev_color);
}
// copy the 2D array from device (GPU) to host (CPU)
cudaMemcpy(color, dev_color, NX*NY*sizeof(int), cudaMemcpyDeviceToHost);
return 0;
}
|
12,582 | // Kernel Call
#include<cuda.h>
#include<stdio.h>
__global__ void kernel(void){
}
int main(void)
{
kernel<<<1,1>>>();
printf("Hello World \n");
return 0;
}
|
12,583 |
// GPU kernel
__global__ void summation_kernel(int data_size, float * data_out)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
data_out[i] += ((i % 2 == 0) ? 1 : - 1) / (i + 1.0f);
/*for(int cpt = i; cpt < i + data_size; cpt++) {
data_out[cpt] += 1.0f;
}*/
}
__global__ void reduce(float data_size, float * data_out, float * data_block) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = (i < data_size) ? data_out[i] : 0;
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if(tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if(tid == 0) data_block[blockIdx.x] = sdata[0];
}
|
12,584 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
int main(void)
{
std::cout << "Start Thrust\n";
// generate 32M random numbers serially
thrust::host_vector<int> h_vec(32 << 20);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device (846M keys per second on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
std::cout << "Finish Thrust\n";
return 0;
}
|
12,585 | #include "includes.h"
__global__ void add(int *a, int *b, int *c) {
int index=threadIdx.x+blockIdx.x*blockDim.x;
if(index<SIZE)
{
c[index] = a[index] + b[index];
}
} |
12,586 | // tdfc-cuda backend autocompiled body file
// tdfc version 1.160
// Thu May 26 17:01:38 2011
#include <stdio.h>
__global__ void tdfc_scal(float cc_alpha,float* cc_x,float* cc_x_out,int N )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<N)
{
{
cc_x_out[idx] = ((cc_x[idx]*cc_alpha));
}
}
} //tdfc_scal
|
12,587 | //
// Created by saleh on 7/16/18.
//
#define DIM 128
#define SMEMDIM 4 // 128/32 = 8
/*
// COPYRIGHT "PROFESSIONAL CUDA C PROGRAMMING - CHAPTER 5 - reduceSmemUnrollShfl"
__global__ void kernel_reduce_sum_allaxes(const float * __restrict__ g_idata, float * __restrict__ g_odata, unsigned int n)
{
// static shared memory
__shared__ float smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// global index
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4 blocks
float localSum = 0;
if (idx + 3 * blockDim.x < n)
{
float a1 = g_idata[idx];
float a2 = g_idata[idx + blockDim.x];
float a3 = g_idata[idx + 2 * blockDim.x];
float a4 = g_idata[idx + 3 * blockDim.x];
localSum = a1 + a2 + a3 + a4;
}
smem[tid] = localSum;
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
if (blockDim.x >= 64 && tid < 32) smem[tid] += smem[tid + 32];
__syncthreads();
// unrolling warp
localSum = smem[tid];
if (tid < 32)
{
localSum += __shfl_xor(localSum, 16);
localSum += __shfl_xor(localSum, 8);
localSum += __shfl_xor(localSum, 4);
localSum += __shfl_xor(localSum, 2);
localSum += __shfl_xor(localSum, 1);
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = localSum;
}
__global__ void reduceSmem (const float * __restrict__ g_idata, float * __restrict__ g_odata, unsigned int n)
{
__shared__ float smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// convert global data pointer to the local pointer of this block
const float *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
// Interleaved Pair Implementation with less divergence
__global__ void reduceInterleaved (const int * __restrict__ g_idata, int * __restrict__ g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
const int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Interleaved Pair Implementation with less divergence
__global__ void reduceInterleavedFloat (const float * __restrict__ g_idata, float * __restrict__ g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
float *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8 (const int * __restrict__ g_idata, int * __restrict__ g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8Float (const float * __restrict__ g_idata, float * __restrict__ g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
float *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
float a1 = g_idata[idx];
float a2 = g_idata[idx + blockDim.x];
float a3 = g_idata[idx + 2 * blockDim.x];
float a4 = g_idata[idx + 3 * blockDim.x];
float b1 = g_idata[idx + 4 * blockDim.x];
float b2 = g_idata[idx + 5 * blockDim.x];
float b3 = g_idata[idx + 6 * blockDim.x];
float b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
*/
// CHAPTER 05 - reduceInteger.cu PROFESSIONAL CUDA C PROGRAMMING
__global__ void kernel_reduceSmemUnroll(const float * __restrict__ g_idata, float * __restrict__ g_odata, unsigned int n)
{
// static shared memory
__shared__ float smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// global index, 4 blocks of input data processed at a time
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4 blocks
float tmpSum = 0;
// boundary check
if (idx + 4 * blockDim.x <= n)
{
float a1 = g_idata[idx];
float a2 = g_idata[idx + blockDim.x];
float a3 = g_idata[idx + 2 * blockDim.x];
float a4 = g_idata[idx + 3 * blockDim.x];
tmpSum = a1 + a2 + a3 + a4;
}
smem[tid] = tmpSum;
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile float *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
// This is the wrapper function that will be used in cpp files!
void reduce_sum_all_axes(dim3 grid, dim3 block, float *g_idata, float *g_odata, unsigned int n){
//reduceCompleteUnrollWarps8Float<<<grid.x,block>>>(g_idata, g_odata, n);
kernel_reduceSmemUnroll<<<grid.x,block>>>(g_idata, g_odata, n);
}
|
12,588 | /*
* dgemm_gpu_shared.cu
*
* compile with: make dgemm_gpu_shared
* -> nvcc -O3 -arch=sm_20 -o dgemm_gpu_shared dgemm_gpu_shared.cu -lcudart
*
* Matrices are stored as array in row-major order:
* A[row][col] = A[row * N + col]
*
* Use shared memory to speed up the matrix multiplication. We can reuse
* the memory if we load a block of the matrix and have a thread block
* calculate a sub matrix.
*/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
// Thread block size: BLOCK_SIZE * BLOCK_SIZE
#define BLOCK_SIZE 16
// Declaration of helper functions (see bottom of file for details)
void checkError (const char* action);
float getGflops (int, float);
/*
* Matrix multiplication kernel called by matrixMulOnDevice()
*/
__global__ void dgemm_gpu_shared(double* a, double* b, double* c, int n){
// TODO: Allocate shared memory for the two blocks aSub and bSub.
// Use two-dimensional matrices of size BLOCK_SIZE * BLOCK_SIZE
__shared__ double aSub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double bSub[BLOCK_SIZE][BLOCK_SIZE];
// TODO: Calculate global thread index
int idxX = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int idxY = blockIdx.y * BLOCK_SIZE + threadIdx.y;
// For the matrix multiplication, we need to multiply all the elements of
// the idxYth row of a with all the elements of the idXth column of b and
// sum up the results.
double sum = 0;
// TODO: Calculate global offset of upper left corner of thread block.
int blockaY = blockIdx.y * BLOCK_SIZE;
int blockbX = blockIdx.x * BLOCK_SIZE;
for (int block = 0; block < gridDim.x; ++block){
// Get the two sub matrices
int blockaX = block * (BLOCK_SIZE);
int blockbY = block * (BLOCK_SIZE);
if (((blockaY + threadIdx.y) < n) && (blockaX + threadIdx.x) < n) {
// TODO: Copy block into shared memory
aSub[threadIdx.y][threadIdx.x] = a[(blockaY + threadIdx.y) * n + blockaX + threadIdx.x];
} else {
aSub[threadIdx.y][threadIdx.x] = 0;
}
if (((blockbY + threadIdx.y) < n) && (blockbX + threadIdx.x) < n) {
bSub[threadIdx.y][threadIdx.x] = b[(blockbY + threadIdx.y) * n + blockbX + threadIdx.x];
} else {
bSub[threadIdx.y][threadIdx.x] = 0;
}
// TODO: Synchronize threads to make sure all threads are done copying
__syncthreads();
if ((idxX < n) && (idxY < n))
{
for (int i=0; i < blockDim.x; ++i){ //assumes that we use square blocks
sum += aSub[threadIdx.y][i] * bSub[i][threadIdx.x];
}
}
// TODO: Synchronize threads to make sure all threads are done with the data
__syncthreads();
}
if ((idxX < n) && (idxY < n)){
c[idxY * n + idxX] = sum;
}
}
/*
* Matrix multiplication host function called by main()
*/
void matrixMulOnDevice(double* a, double* b, double* c, int n)
{
int size = n * n * sizeof(double);
int xGrid, yGrid;
double *d_a, *d_b, *d_c;
float time;
// Define events for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// TODO
// Allocate memory for d_a, d_b and d_c on device
cudaMalloc((void **)&d_a, size );
checkError("cudaMalloc: d_a");
cudaMalloc((void **)&d_b, size );
checkError("cudaMalloc: d_b");
cudaMalloc((void **)&d_c, size );
checkError("cudaMalloc: d_c");
// First calculate grid size by dividing n by BLOCK_SIZE = 16
xGrid = (n % BLOCK_SIZE == 0) ? (n / BLOCK_SIZE) : (n / BLOCK_SIZE + 1);
yGrid = (n % BLOCK_SIZE == 0) ? (n / BLOCK_SIZE) : (n / BLOCK_SIZE + 1);
dim3 gridDim(xGrid, yGrid);
dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE);
printf("Grid: %d, %d; block:%d, %d\n", xGrid , yGrid , BLOCK_SIZE, BLOCK_SIZE);
// Invoke kernel and measure execution time
cudaEventRecord( start, 0 );
// TODO
// Copy data for a and b from host to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
checkError("copying data of A from host to device");
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
checkError("copying data of B from host to device");
// TODO: Call the kernel
dgemm_gpu_shared<<<gridDim, blockDim>>>( d_a, d_b, d_c, n);
// TODO
// Read restults from device memory to C
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
checkError("cudaMemcpyDeviceToHost");
// Get elapsed time for kernel execution
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
checkError("executing Kernel");
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf ("\nKernel Execution Time: %f ms (dim C: %d * %d)", time, n, n);
printf ("\nThis corresponds to: %4.4f GFLOPS", getGflops(n, time));
// Free device memory
cudaFree(d_a);
checkError("Freeing d_a");
cudaFree(d_b);
checkError("Freeing d_b");
cudaFree(d_c);
checkError("Freeing d_c");
}
int main(int argc, char** argv)
{
int n = 1024;
double *a, *b, *c;
int row, col;
double absError, maxAbsError = 0.0, sumAbsError = 0.0;
if (argc > 1) {
n = atoi(argv[1]);
}
// show banner
printf ("\n\n Matrix-Multiplication \n");
printf ( " ==========================================\n");
printf ( "\n Simple DGEMM implemantation on GPU");
// echo device data
int idevice = 0;
cudaSetDevice(idevice);
cudaDeviceProp dprops;
cudaGetDeviceProperties( &dprops, idevice );
printf ("\n Device name = %s, with compute capability %d.%d \n",
dprops.name, dprops.major, dprops.minor);
printf ( "\n Matrix size %d x %d", n, n);
// Allocate memory for matrices on host
assert ( a = (double*) malloc (n * n * sizeof(double)) );
assert ( b = (double*) malloc (n * n * sizeof(double)) );
assert ( c = (double*) malloc (n * n * sizeof(double)) );
// Init matrices A and B: A = E so result will be B
#pragma omp parallel for private(row, col)
for (row = 0; row < n; ++row){
for (col = 0; col < n; col++){
a[row * n + col] = (row == col) ? 1.0 : 0.0;
b[row * n + col] = row * n + col;
}
}
// do matrix multiplication on device
matrixMulOnDevice(a, b, c, n);
// Compare results
for ( row = 0; row < n; ++row){
for ( col = 0; col < n; ++col) {
absError = fabs ( c[row * n + col] - b[row * n + col]);
sumAbsError += absError;
if (absError > maxAbsError)
maxAbsError = absError;
}
}
// Free memory on host
free (a);
free (b);
free (c);
printf ("\nmaxAbsError: %4.4f, sumAbsError: %4.4f", maxAbsError, sumAbsError);
if (maxAbsError < 2.0e-5) {
printf ("\n\nProgram terminated SUCCESSFULLY.\n\n");
} else {
printf ("\n\nProgram terminated UNSUCCESSFULLY.\n\n");
}
return 0;
}
/*
* Some helper functions
*/
// get compute performance
float getGflops (int n, float time) {
float gf = (2.0e-6 * n * n* n / time);
return gf;
}
// Simple error checking function for CUDA actions
void checkError (const char* action) {
cudaError_t error;
error = cudaGetLastError();
if (error != cudaSuccess) {
printf ("\nError while '%s': %s\nprogram terminated ...\n\n", action, cudaGetErrorString(error));
exit (EXIT_FAILURE);
}
}
|
12,589 | // Source: https://github.com/lzhengchun/matrix-cuda/blob/master/matrix_cuda.cu
/*
* file name: matrix.cu
*
* matrix.cu contains the code that realize some common used matrix operations in CUDA
*
* this is a toy program for learning CUDA, some functions are reusable in other project
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 16
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
/*
*********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
*********************************************************************
*/
int main(int argc, char const *argv[])
{
int m, n, k;
/* Fixed seed for illustration */
srand(3333);
m=n=k=2048;
// allocate memory in host RAM, h_cc is used to store CPU result
int *h_a, *h_b, *h_c, *h_cc;
cudaMallocHost((void **) &h_a, sizeof(int)*m*n);
cudaMallocHost((void **) &h_b, sizeof(int)*n*k);
cudaMallocHost((void **) &h_c, sizeof(int)*m*k);
cudaMallocHost((void **) &h_cc, sizeof(int)*m*k);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
h_b[i * k + j] = rand() % 1024;
}
}
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// Allocate memory space on the device
int *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(int)*m*n);
cudaMalloc((void **) &d_b, sizeof(int)*n*k);
cudaMalloc((void **) &d_c, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice);
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
// Transefr results from device to host
cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms);
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
12,590 | #include "argmax-accuracy.hh"
#include "graph.hh"
#include "ops-builder.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
ArgmaxAccuracy::ArgmaxAccuracy(Op* y, Op* y_hat)
: Op("argmax_accuracy", Shape{}, {y, y_hat})
{}
void ArgmaxAccuracy::compile()
{
auto& g = Graph::instance();
auto& cy = g.compiled(preds()[0]);
auto& cy_hat = g.compiled(preds()[1]);
std::size_t rows = cy.out_shape[0];
std::size_t cols = cy.out_shape[1];
Shape out_shape {};
dbl_t* out_data = tensor_alloc(1);
auto out_node = rt::Node::op_argmax_acc(cy.out_data, cy_hat.out_data, out_data,
rows, cols,
{cy.out_node, cy_hat.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
12,591 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
__global__
void get(int *output)
{
*output = 10;;
}
int main(int argc, char *argv[])
{
int *h_num;
int *d_num;
cudaError_t cudaerr;
h_num = (int*)malloc(sizeof(int));
cudaerr = cudaMalloc((void **) &d_num, sizeof(int));
if (cudaerr != cudaSuccess)
printf("nao pode alocar memoria no device\n");
get<<<1, 1>>>(d_num);
cudaerr = cudaMemcpy(h_num, d_num, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaerr != cudaSuccess)
printf("nao pode copiar memoria\n");
printf("%d\n", *h_num);
cudaFree(d_num);
return 0;
}
|
12,592 |
// Babak Poursartip
// 09/14/2020
// Udemy Cuda
// launch hello world
#include <iostream>
// The kernel needs a modifier before the function type, in this case __global__
// other options: __host__, __device__, __shared__
// The return type is always void !?
__global__ void hello_cuda() { printf(" Hello, CUDA is here!\n"); }
int main() {
std::cout << " starts ...\n";
// launch the kernal: this is an asyn function call, means that the host does
// not need to wait until the kernel finishes its job.
// max no. of threads/block x<=1024, y<=1024m, z<=64, and x*y*z<=1024
// max no. grids: up to (2^31 - 1) blocks in the x, and at most 65535 blocks
// in the y and z dimensions
// dim3 block(4); // equivalent to block(4,1,1)
// dim3 grid(8); // equivalent to grid(4,1,1)
// It is more convenient to specify the number of threads in each direction,
// separately.
int nx, ny;
nx = 16;
ny = 4;
dim3 block(8, 2); // equivalent to block(4,1,1)
dim3 grid(nx / block.x, ny / block.y); // equivalent to grid(4,1,1)
hello_cuda<<<grid, block>>>();
// If we want the host to wait until the kernel finishes its job, we need to
// explicity ask the host. Host will wait here, until all previous launch
// kernels finish their executions.
cudaDeviceSynchronize();
cudaDeviceReset();
std::cout << " Finished.\n";
return 0;
}
|
12,593 | #include <stdio.h>
#include <stdlib.h>
#define N 4096
#define block_Size 256
/* function to integrate, defined as a function on the GPU device */
__device__ float myfunction(float a)
{
return a*a+2.0*a + 3.0;
}
/* kernel function to compute the summation used in the trapezoidal rule
for numerical integration */
__global__ void integratorKernel(float *a, float c, float deltaX)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float x = c + (float)id * deltaX;
if (id<N)
a[id] = myfunction(x)+myfunction(x+deltaX);
}
int main( int argc, char* argv[] )
{
double end = 1.0, start = 0.0;
// deltaX
float deltaX = (end-start)/(double) N;
// error code variable
cudaError_t errorcode = cudaSuccess;
// Size of the arrays in bytes
int size = N*sizeof(float);
// Allocate array on host and device
float* a_h = (float *)malloc(size);
float* a_d;
if (( errorcode = cudaMalloc((void **)&a_d,size))!= cudaSuccess)
{
printf("cudaMalloc(): %s/n", cudaGetErrorString(errorcode));
exit(1);
}
// Do calculation on device
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
printf("blocks: %d\n", grid_Size);
printf("block size: %d\n ", block_Size);
integratorKernel <<< grid_Size, block_Size >>> (a_d, start, deltaX);
// Copy results from device to host
if((errorcode = cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost))
!=cudaSuccess)
{
printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode));
exit(1);
}
// Add up results
float sum = 0.0;
for(int i=0; i<N; i++)
sum += a_h[i];
sum *= deltaX/2.0;
printf("The integral is: %f\n", sum);
// clean up
free(a_h);
cudaFree(a_d);
return 0;
}
|
12,594 | extern "C" // ensure function name to be left alone
{
__global__ void normal_pdf_gpu(const double *x, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if(i<n) y[i] = exp(-0.5*x[i]*x[i])*rsqrt(2.0*M_PI);
}
__global__ void sum_gpu(double *y, double *sumptr, unsigned int n,
unsigned int n_subsums, unsigned int percore)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x
+ threadIdx.x;
unsigned int j = i * percore; // first element that this thread will take care of
unsigned int k;
if (i >= n_subsums)
return;
sumptr[i] = 0.0;
for (k = 0; k < percore; k++) {
if (j + k < n)
sumptr[i] += y[j + k];
}
}
__global__ void get_sum_gpu(double *y, double *sumptr)
// copy the result into a smaller array
{
sumptr[0] = y[0];
}
}
/* vim: set sw=4 sts=4 et : */
|
12,595 | #include "includes.h"
__global__ void kTanhDerivative(const int nThreads, float const *input, float *output)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = 1 - (tanh(input[i]) * tanh(input[i]));
}
} |
12,596 | #include <cuda_runtime.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <math.h>
#include <ctype.h>
#include <cuda.h>
#include <time.h>
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
#define CEIL(a,b) ((a+b-1)/b)
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
struct ImgProp{
int Hpixels;
int Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
typedef struct{
ui i;
ui j;
}pixelCoords;
// buffers for images
uch *TheImg, *CopyImg;
uch *GPUImg, *GPUCopyImg, *GPUptr, *GPUResult, *NoiseMap, *KernelIndices;
double *GPU_PREV_BW, *GPU_CURR_BW;
// noisy pixel locations
pixelCoords *NoisyPixelCoords;
// mutex variables for tracking noisy pixels
ui *GlobalMax, *GlobalMin, *NumNoisyPixelsGPU, *GPUmutexes, *GPU_SAD;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that locates potentially noisy pixels in an image by using impulse noise detection
__global__
void findNoisyPixels(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, ui*globalMax, ui*globalMin, ui*ListLength, ui Hpixels, ui Vpixels)
{
// 3x3 matrix of pixels around current pixel
//uch mat3x3[8]; // 3 x 3 - 1 = 8
// threads/blocks info and IDs
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
// leave buffer frame around image to avoid 8 edge cases for convolutions
if (MYcol > Hpixels-4 || MYcol < 3 || MYrow > Vpixels-4 || MYrow < 3) return;
ui MYpixIndex = MYrow * Hpixels + MYcol; // pixel index in B&W image
uch pIJ = ImgSrc[MYpixIndex];
uch max = 0;
uch min = 255;
uch curr;
uch nMax;
uch nMin;
uch oldMax;
uch oldMin;
int row;
int col;
int indx;
// find min and max pixel intensities in current window
for (int i = -1; i <= 1; i++){
for (int j = -1; j <= 1; j++){
if(!(j==0 && i==0)){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
curr = ImgSrc[indx];
if(curr > max)
max = curr;
if(curr < min)
min = curr;
}
}
}
// atomically update global max and min pixel intensities
oldMax = atomicMax(globalMax, (ui)max);
oldMin = atomicMin(globalMin, (ui)min);
// if the old max wasn't updated, then max is "salt" noise
// otherwise, we must assume that 255 is "salt" noise
if(oldMax == max)
nMax = max;
else
nMax = 255;
// if the old min wasn't updated, then min is "pepper" noise
// otherwise, we must assume that 0 is "pepper" noise
if(oldMin == min)
nMin = min;
else
nMin = 0;
// if the current pixel intensity is equal to min or max,
// then it is likely s&p noise. Mark as such.
if(pIJ == nMin || pIJ == nMax){
int listIndex = atomicAdd(ListLength, (ui)1);
locations[listIndex].i = MYrow;
locations[listIndex].j = MYcol;
noiseMap[MYpixIndex] = 0;
}
// if(pIJ == 255 || pIJ == 0){
// ui listIndex = atomicAdd(ListLength, (ui)1);
// locations[listIndex].i = MYrow;
// locations[listIndex].j = MYcol;
// noiseMap[MYpixIndex] = 0;
// }
}
// __device__
// uch Horz[5][5] = { { 0, 0, 0, 0, 0 },
// { 1, 1, 1, 1, 1 },
// { 1, 1, 0, 1, 1 },
// { 1, 1, 1, 1, 1 },
// { 0, 0, 0, 0, 0 } };
// __device__
// uch Vert[5][5] = { { 0, 1, 1, 1, 0 },
// { 0, 1, 1, 1, 0 },
// { 0, 1, 0, 1, 0 },
// { 0, 1, 1, 1, 0 },
// { 0, 1, 1, 1, 0 } };
// __device__
// uch mask45[7][7]={ {0, 0, 0, 0, 1, 0, 0},
// {0, 0, 0, 1, 1, 1, 0},
// {0, 0, 1, 1, 1, 1, 1},
// {0, 1, 1, 0, 1, 1, 0},
// {1, 1, 1, 1, 1, 0, 0},
// {0, 1, 1, 1, 0, 0, 0},
// {0, 0, 1, 0, 0, 0, 0}};
// __device__
// uch mask135[7][7]={ {0, 0, 1, 0, 0, 0, 0},
// {0, 1, 1, 1, 0, 0, 0},
// {1, 1, 1, 1, 1, 0, 0},
// {0, 1, 1, 0, 1, 1, 0},
// {0, 0, 1, 1, 1, 1, 1},
// {0, 0, 0, 1, 1, 1, 0},
// {0, 0, 0, 0, 1, 0, 0}};
//3x3 standard mask
__constant__
double mask0[3][3] = { {0.1036, 0.1464, 0.1036},
{0.1464, 0, 0.1464},
{0.1036, 0.1464, 0.1036}};
// horizontal 5x5 mask
__constant__
double mask1[5][5] = { {0, 0, 0, 0, 0 },
{0.0465, 0.0735, 0.1040, 0.0735, 0.0465 },
{0.0520, 0.1040, 0, 0.1040, 0.0520 },
{0.0465, 0.0735, 0.1040, 0.0735, 0.0465 },
{0, 0, 0, 0, 0 }};
//vertical 5x5 mask
__constant__
double mask2[5][5] = { {0, 0.0465, 0.0520, 0.0465, 0},
{0, 0.0735, 0.1040, 0.0735, 0},
{0, 0.1040, 0, 0.1040, 0},
{0, 0.0735, 0.1040, 0.0735, 0},
{0, 0.0465, 0.0520, 0.0465, 0}};
//45 degree 7x7 mask
__constant__
double mask3[7][7] = { {0, 0, 0, 0, 0.0251, 0, 0 },
{0, 0, 0, 0.0397, 0.0355, 0.0281, 0 },
{0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 },
{0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 },
{0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 },
{0, 0.0281, 0.0355, 0.0397, 0, 0, 0 },
{0, 0, 0.0251, 0, 0, 0, 0 }};
//135 degree 7x7 mask
__constant__
double mask4[7][7] = { {0, 0, 0.0251, 0, 0, 0, 0 },
{0, 0.0281, 0.0355, 0.0397, 0, 0, 0 },
{0.0251, 0.0355, 0.0562, 0.0794, 0.0562, 0, 0 },
{0, 0.0397, 0.0794, 0, 0.0794, 0.0397, 0 },
{0, 0, 0.0562, 0.0794, 0.0562, 0.0355, 0.0251 },
{0, 0, 0, 0.0397, 0.0355, 0.0281, 0 },
{0, 0, 0, 0, 0.0251, 0, 0 }};
// Kernel that determines appropriate inpainting mask to use based on surrounding noiseless pixels
__global__
void determineMasks(pixelCoords *locations, uch *ImgSrc, uch *noiseMap, uch *kernelIndices, ui ListLength, ui Hpixels, ui R) {
// threads/blocks info and IDs
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
// ensure not out-of-bounds
if (MYgtid > ListLength) return;
// masked arrays of those pixels denoted as noise-free
uch noiseFreeLists[60];
uch *maskA = noiseFreeLists;
uch *maskB = maskA+14;
uch *maskC = maskB+14;
uch *maskD = maskC+14;
uch *listLengths = maskD+14;
uch *currMask;
uch currListLength;
// control and tracking variables
int i, j, row, col, indx, maskAIndx=0, maskBIndx=0, maskCIndx=0, maskDIndx=0, chosenMask;
float minStdDev=1000000.0, currStdDev, sum = 0.0, mean, standardDeviation = 0.0;
// obtain current noisy pixel indices
pixelCoords currCoord = locations[MYgtid];
ui MYrow = currCoord.i;
ui MYcol = currCoord.j;
// iterate through both 5x5 masks to find values of noise-free pixels
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
// find current absolute index
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
// if the current pixel is noise-free AND
if(noiseMap[indx]){
// if the current 5x5 horizontal mask cell is set to TRUE
if(mask1[i+2][j+2]) {
// obtain noise free pixel and add to list
maskA[maskAIndx] = ImgSrc[indx];
maskAIndx++;
}
// if the current 5x5 vertical mask cell is set to TRUE
if(mask2[i+2][j+2]) {
// obtain noise free pixel and add to list
maskB[maskBIndx] = ImgSrc[indx];
maskBIndx++;
}
}
}
}
// iterate through both 7x7 masks to find values of noise-free pixels
for (i = -3; i <= 3; i++){
for ( j = -3; j <= 3; j++){
// find current absolute index
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
// if the current pixel is noise-free AND
if(noiseMap[indx]){
// if the current 7x7 45 degree mask cell is set to TRUE
if(mask3[i+3][j+3]) {
// obtain noise free pixel and add to list
maskC[maskCIndx] = ImgSrc[indx];
maskCIndx++;
}
// if the current 7x7 135 degree mask cell is set to TRUE
if(mask4[i+3][j+3]) {
// obtain noise free pixel and add to list
maskD[maskDIndx] = ImgSrc[indx];
maskDIndx++;
}
}
}
}
// if the amounts of noise free pixels in any of the directional masks is
// below threshold R, then we use 3x3 convolution
// this helps to mitigate promoting false edges
if(maskAIndx < R || maskBIndx < R || maskCIndx < R || maskDIndx < R)
chosenMask = 0;
else {
// assign list lengths for smoother access
listLengths[0] = maskAIndx;
listLengths[1] = maskBIndx;
listLengths[2] = maskCIndx;
listLengths[3] = maskDIndx;
// find the mask index (from 1 to 4) of minimum standard deviation
for(i=0; i < 4; i++) {
currListLength = listLengths[i];
currMask = maskA+(i*14);
// first find mean of array
for(j = 0; j < currListLength; j++)
{
sum += (float)currMask[j];
}
mean = sum/currListLength;
// then find sum of individual deviations
for(j = 0; j < currListLength; j++)
standardDeviation += pow((float)currMask[j] - mean, 2);
// final StdDev is normalized by list length
currStdDev = standardDeviation / currListLength;
if(currStdDev < minStdDev) {
chosenMask = i+1;
minStdDev = currStdDev;
}
}
}
// assign the mask index that was chosen
kernelIndices[MYgtid] = chosenMask;
}
// convolutions based on kernel indices
__global__
void Convolute(double *ImgCurr, double *ImgBW, pixelCoords *pc, uch *kernalI, ui numNoisy, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid >= numNoisy) return; // index out of range
// current noisy pixel coordinates
ui i=pc[MYgtid].i,j=pc[MYgtid].j,m=kernalI[MYgtid];
// absolute pixel index
ui MYpixIndex = i * Hpixels + j;
int a,b,row,col,index;
double C = 0.0;
switch(m)
{
case 0: for (a = -1; a <= 1; a++){
for (b = -1; b <= 1; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask0[a + 1][b + 1]);
}
}
ImgCurr[MYpixIndex] = C;
break;
case 1: for (a = -2; a <= 2; a++){
for (b = -2; b <= 2; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask1[a + 2][b + 2]);
}
}
ImgCurr[MYpixIndex] = C;
break;
case 2: for (a = -2; a <= 2; a++){
for (b = -2; b <= 2; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask2[a + 2][b + 2]);
}
}
ImgCurr[MYpixIndex] = C;
break;
case 3: for (a = -3; a <= 3; a++){
for (b = -3; b <= 3; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask3[a + 3][b + 3]);
}
}
ImgCurr[MYpixIndex] = C;
break;
default: for (a = -3; a <= 3; a++){
for (b = -3; b <= 3; b++){
row = i + a;
col = j + b;
index = row*Hpixels + col;
C += (ImgBW[index] * mask4[a + 3][b + 3]);
}
}
// assign convolution sum to current noisy pixel index
ImgCurr[MYpixIndex] = C;
break;
}
}
// sum of absolute differences, reconstruction progress tracking mechanism
__global__
void SAD(ui *sad, double *prev, double *current, pixelCoords *pc, ui numNoisy, ui Hpixels, ui Vpixels)
{
// thread IDs
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid >= numNoisy) return; // index out of range
ui i=pc[MYgtid].i, j=pc[MYgtid].j; // current noisy pixel coordinates
ui MYpixIndex = i * Hpixels + j; // absolute index
// difference of old and updated pixel values, round to nearest integer
int absDiff=(int)(prev[MYpixIndex]-current[MYpixIndex]+0.5);
// absolute difference
if(absDiff<0)
absDiff = absDiff*(-1);
atomicAdd(sad, (ui)absDiff); // update global sum
}
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
__global__
void BWKernel(uch *ImgBW, uch *ImgGPU, double *ImgfpBW, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (uch)((R+G+B)/3.0);
ImgfpBW[MYpixIndex] = (R+G+B)/3.0;
}
// Kernel that calculates a RGB (grayscale) version of B&W image for filing as Windows BMP
__global__
void RGBKernel(uch *ImgRGB, double *ImgBW, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYdstIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
uch pixInt = ImgBW[MYpixIndex];
ImgRGB[MYdstIndex] = pixInt;
ImgRGB[MYdstIndex+1] = pixInt;
ImgRGB[MYdstIndex+2] = pixInt;
}
// Kernel that copies an image from one part of the
// GPU memory (ImgSrc) to another (ImgDst)
__global__
void NoisyPixCopy(double *NPDst, double *ImgSrc, pixelCoords *pc, ui NoisyPixelListLength, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid >= NoisyPixelListLength) return;// outside the allocated memory
pixelCoords currCoord = pc[MYgtid];
ui srcIndex = currCoord.i * Hpixels + currCoord.j;
NPDst[srcIndex] = ImgSrc[srcIndex];
}
// Kernel that copies an image from one part of the
// GPU memory (ImgSrc) to another (ImgDst)
__global__
void PixCopy(double *ImgDst, double *ImgSrc, ui FS)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid > FS) return; // outside the allocated memory
ImgDst[MYgtid] = ImgSrc[MYgtid];
}
/*
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(cudaError_t error_id)
{
if (error_id != CUDA_SUCCESS)
{
printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
*/
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
float /*totalTime, tfrCPUtoGPU, tfrGPUtoCPU,*/ kernelExecutionTime; // GPU code run times
cudaError_t cudaStatus;
cudaEvent_t time1, time2;//, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks, /* GPUDataTransfer,*/ NumBlocksNP;
cudaDeviceProp GPUprop;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
ui GPUtotalBufferSize, R = 5, T = 5, NumNoisyPixelsCPU, mutexInit[4] = {0, 255, 0, 0};
ui CPU_SAD;
strcpy(ProgName, "randNoiseRemoval");
switch (argc){
case 6: ThrPerBlk = atoi(argv[5]);
case 5: R = atoi(argv[4]);
case 4: T = atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [T] [R] [ThrPerBlk]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 5", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5",ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 5 5 128",ProgName);
printf("\n\nT = reconstruction threshold, R = mask selection threshold\n\n");
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
free(TheImg);
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
exit(EXIT_FAILURE);
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(EXIT_FAILURE);
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1);
cudaEventCreate(&time2);
// cudaEventCreate(&time3);
// cudaEventCreate(&time4);
/*
>>> GPU STORAGE DETAILS >>>
GPUImage: IMAGESIZE
GPUCopyImage(BW) : IMAGEPIX
NoisyPixelCoords: IMAGEPIX*sizeof(pixelCoords)
NoiseMap : IMAGEPIX
KernelIndices : IMAGEPIX
GlobalMax : sizeof(ui)
GlobalMin : sizeof(ui)
NumNoisyPixelsGPU : sizeof(ui)
GPU_PREV_BW : sizeof(double) * IMAGEPIX
GPU_CURR_BW : sizeof(double) * IMAGEPIX
GPU_SAD : sizeof(ui)
***********************
*/
// allocate sufficient memory on the GPU to hold all above items
GPUtotalBufferSize = IMAGESIZE+(IMAGEPIX*sizeof(pixelCoords))+IMAGEPIX*3+sizeof(ui)*4+2*(sizeof(double)*IMAGEPIX);
cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory for buffers");
exit(EXIT_FAILURE);
}
// setup buffer pointers for functions
GPUImg = (uch *)GPUptr;
GPUCopyImg = GPUImg + IMAGESIZE;
NoiseMap = GPUCopyImg + IMAGEPIX; // add the previous image/array of noisy pixel intensities
KernelIndices = NoiseMap + IMAGEPIX;
NoisyPixelCoords = (pixelCoords*)(KernelIndices + IMAGEPIX);
GPU_PREV_BW = (double*)(NoisyPixelCoords+IMAGEPIX);
GPU_CURR_BW = GPU_PREV_BW + IMAGEPIX;
GlobalMax = (ui*)(GPU_CURR_BW + IMAGEPIX);
GlobalMin = GlobalMax+1;
NumNoisyPixelsGPU = GlobalMin+1;
GPU_SAD = NumNoisyPixelsGPU+1;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for input image CPU to GPU failed!");
exit(EXIT_FAILURE);
}
// Copy mutex initializations from CPU to GPU
cudaStatus = cudaMemcpy(GlobalMax, mutexInit, 4*sizeof(ui), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for mutex initializations CPU to GPU failed!");
exit(EXIT_FAILURE);
}
// assume pixels are not noisy by default
cudaStatus = cudaMemset (NoiseMap, 1, IMAGEPIX );
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset for Noise Map failed!");
exit(EXIT_FAILURE);
}
cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk);
NumBlocks = IPV*BlkPerRow;
BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, GPU_CURR_BW, IPH);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for B&WKernel returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
findNoisyPixels <<< NumBlocks, ThrPerBlk >>> (NoisyPixelCoords, GPUCopyImg, NoiseMap, GlobalMax, GlobalMin, NumNoisyPixelsGPU, IPH, IPV);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize for findNoisyPixels returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
//cudaEventRecord(time3, 0);
cudaStatus = cudaMemcpy(&NumNoisyPixelsCPU, NumNoisyPixelsGPU, sizeof(ui), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy of NumNoisyPixels, GPU to CPU failed!");
exit(EXIT_FAILURE);
}
// only schedule as many threads are needed for NoisyPixelListLength
NumBlocksNP = CEIL(NumNoisyPixelsCPU, ThrPerBlk);
determineMasks <<< NumBlocksNP, ThrPerBlk >>> (NoisyPixelCoords, GPUCopyImg, NoiseMap, KernelIndices, NumNoisyPixelsCPU, IPH, R);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize for determineMasks returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
PixCopy <<< NumBlocks, ThrPerBlk >>> (GPU_PREV_BW, GPU_CURR_BW, IMAGEPIX);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize for PixCopy returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
int t=1;
// progress tracking
do{
// reset SAD (sum of absolute pixel differences)
cudaStatus = cudaMemset (GPU_SAD, 0, sizeof(ui) );
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset for GPU_SAD failed!");
exit(EXIT_FAILURE);
}
Convolute <<< NumBlocksNP, ThrPerBlk >>> (GPU_CURR_BW, GPU_PREV_BW, NoisyPixelCoords, KernelIndices, NumNoisyPixelsCPU, IPH);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for Convolute returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
SAD <<< NumBlocksNP, ThrPerBlk >>> (GPU_SAD, GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH, IPV);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for SAD returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
NoisyPixCopy <<< NumBlocksNP, ThrPerBlk >>> (GPU_PREV_BW, GPU_CURR_BW, NoisyPixelCoords, NumNoisyPixelsCPU, IPH);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize for NoisyPixCopy returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// CudaMemcpy the SAD from GPU to CPU
cudaStatus = cudaMemcpy(&CPU_SAD, GPU_SAD, sizeof(ui), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy of SAD from GPU to CPU failed!");
exit(EXIT_FAILURE);
}
// must convert floating point B&W back to unsigned char format
NumBlocks = IPV*BlkPerRow;
RGBKernel <<< NumBlocks, ThrPerBlk >>> (GPUImg, GPU_CURR_BW, IPH);
GPUResult = GPUImg;
//Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
exit(EXIT_FAILURE);
}
sprintf(OutputFileName, "%s%d%s", "stages/stage", t++, ".bmp");
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk
} while(CPU_SAD > T);
cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done
//GPUDataTransfer = GPUtotalBufferSize;
//cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
//cudaEventSynchronize(time3);
//cudaEventSynchronize(time4);
//cudaEventElapsedTime(&totalTime, time1, time4);
//cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time1, time2);
//cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
printf("\n\n--------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("--------------------------------------------------------------------------\n");
printf("%s %s %s %d %d %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName,
T, R, ThrPerBlk, NumBlocks, BlkPerRow);
// printf("--------------------------------------------------------------------------\n");
// printf("CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU));
printf("Kernel Execution =%7.2f ms\n", kernelExecutionTime);//, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, kernelExecutionTime)); ... %4d MB ... %6.2f GB/s
// printf("GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
// printf("--------------------------------------------------------------------------\n");
// printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((2 * IMAGESIZE + GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime));
printf("--------------------------------------------------------------------------\n\n");
// Deallocate CPU, GPU memory and destroy events.
cudaFree(GPUptr);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
// cudaEventDestroy(time3);
// cudaEventDestroy(time4);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
}
|
12,597 | __global__ void norm2(float *output, float * input, int dim) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
output[id] = 0.0f;
float val = 0;
for(int i = 0; i<dim; i++){
val = val + input[id * dim + i] * input[id * dim + i];
}
output[id] = sqrt(val);
} |
12,598 | #include <assert.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
const int N = 16384;
const int THREADS_PER_BLOCK = 512;
__global__ void add_threads_blocks (int *a, int *b, int *c, int n) {
int index = threadIdx.x * blockIdx.x * threadIdx.x;
if (index < n) {
c[index] = a[index] + b[index];
}
}
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
size_t size = N * sizeof(int);
srand(1);
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
for (int i = 0; i < N; ++i) {
a[i] = rand();
b[i] = rand();
}
//uint kernelTime;
//cutCreateTimer(&kernelTime);
//cutResetTimer(kernelTime);
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//cutStartTimer(kernelTime);
add_threads_blocks<<<(N + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N);
// cudaThreadSynchronize();
//cutStopTimer(kernelTime);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("Suma con %d hebras con %d hebras por bloque!\n", N, THREADS_PER_BLOCK);
//printf ("Time for the kernel: %f ms\n", cutGetTimerValue(kernelTime));
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
12,599 | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include "cuda.h"
__device__ void f(int x) {
}
__global__ void foo() {
f(2);
}
|
12,600 | #include <stdio.h>
// CUDA runtime
#include <cuda_runtime.h>
int DIM_LIM = 10;
int MAT_COUNT = 20;
int SEED = 15; //seed for rand
class matrix {
public:
int row; //number of rows, y
int col; //number of columns, x
double* d_data;
bool isCopy;
matrix(int columns, int rows) :
col(columns), row(rows)
{cudaMalloc(&d_data, sizeof(double) * columns * rows );}
matrix( const matrix& _orig ) { *this = _orig; isCopy = true;}
~matrix(){if(!isCopy) cudaFree(d_data);}
__device__ double& getData(int x, int y){
return d_data[y * col + x]; //vertical position * row length + pos in row
}
};
void init_matrix(matrix * mat){
int x_dim = mat->row;
int y_dim = mat->col;
double arr[x_dim][y_dim];
for(int x = 0; x < x_dim; x++){
for(int y = 0; y < y_dim; y++){
arr[x][y] = 5;
}
}
cudaMemcpy(mat->d_data, arr, sizeof(arr), cudaMemcpyHostToDevice);
}
__global__ void d_printMat(matrix *mat)
{
int dimxn = mat->col;
int dimyn = mat->row;
printf("Dim x %d, Dim y %d\n", dimxn, dimyn);
for(int y = 0; y<dimyn; y++){
for(int x = 0; x<dimxn; x++){
printf("%lf ", mat->getData(x,y));
}
printf("\n");
}
printf("\n");
}
__global__ void d_multMat(matrix *mat_a, matrix *mat_b, matrix *result)
{
//input: [a x b] * [b x c] = [a x c]
int dim_a = mat_a->col;
int dim_b = mat_a->row;
int dim_c = mat_b->row;
if(mat_a->row != mat_b->col){
printf("does not match!");
}
else {
int tmp=0;
for(int x=0; x < dim_a; x++){
for(int y=0; y < dim_c; y++){
tmp=0;
for(int z=0; z < dim_b; z++){
tmp += mat_a->getData(x,z) * mat_b->getData(z,y);
}
result->getData(x,y)=tmp;
}
}
}
}
__global__ void d_multMat_thd(matrix *mat_a, matrix *mat_b, matrix *result)
{
//input: [a x b] * [b x c] = [a x c]
int dim_a = mat_a->col;
int dim_b = mat_a->row;
int dim_c = mat_b->row;
int idx = threadIdx.x;
int idy = threadIdx.y;
int tmp = 0;
if(mat_a->row != mat_b->col)
{
printf("does not match!");
}
else
{
if(idx < dim_a){
if(idy < dim_c){
for(int z=0; z < dim_b; z++){
tmp += mat_a->getData(idx,z) * mat_b->getData(z,idy);
}
result->getData(idx,idy) = tmp;
}
}
}
}
int main(){
//initialize random number gen, get array sizes
srand(SEED); //init random gen
int dim[MAT_COUNT + 1]; //stores matrix sizes
for(int z = 0; z <= MAT_COUNT; z++){
dim[z] = rand()%DIM_LIM + 1;//random between 1 and limit
}
//end initialize
//generate array of matrices from size array
matrix *mat_arr[MAT_COUNT];
matrix *d_mat_arr[MAT_COUNT];
for(int i=0; i<MAT_COUNT; i++){
mat_arr[i] = new matrix(dim[i],dim[i+1]);
init_matrix(mat_arr[i]);
cudaMalloc(&d_mat_arr[i], sizeof(matrix));
cudaMemcpy(d_mat_arr[i], mat_arr[i], sizeof(matrix), cudaMemcpyHostToDevice);
// d_printMat<<<1,1>>>(d_mat_arr[i]);
// cudaDeviceSynchronize();
}
cudaDeviceSynchronize();
//end generate array
for(int i = 0; i < MAT_COUNT-1; i++){
int dimxn = mat_arr[i]->col;
int dimyn = mat_arr[i+1]->row;
//printf("Dim x %d, Dim y %d\n", dimxn, dimyn);
matrix *result = new matrix(dimxn,dimyn);
matrix *d_result;
cudaMalloc(&d_result, sizeof(matrix));
cudaMemcpy(d_result, result, sizeof(matrix), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
dim3 numBlocks(1);
dim3 threadsPerBlock(10,10);
d_multMat_thd<<<numBlocks,threadsPerBlock>>>(d_mat_arr[i],d_mat_arr[i+1],d_result);
cudaDeviceSynchronize();
d_printMat<<<1,1>>>(d_result);
cudaDeviceSynchronize();
cudaFree(d_result); //free device copy
delete result; //free host copy, includes destructor for device data member
}
printf("finished!");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.